diff --git a/agents/prompts/followup.md b/agents/prompts/followup.md new file mode 100644 index 0000000..8d2f395 --- /dev/null +++ b/agents/prompts/followup.md @@ -0,0 +1,35 @@ +You are a Project Manager reviewing completed pipeline results. + +Your job: analyze the output from all pipeline steps and create follow-up tasks. + +## Rules + +- Create one task per actionable item found in the pipeline output +- Group small related fixes into a single task when logical (e.g. "CORS + Helmet + CSP headers" = one task) +- Set priority based on severity: CRITICAL=1, HIGH=2, MEDIUM=4, LOW=6, INFO=8 +- Set type: "hotfix" for CRITICAL/HIGH security, "debug" for bugs, "feature" for improvements, "refactor" for cleanup +- Each task must have a clear, actionable title +- Include enough context in brief so the assigned specialist can start without re-reading the full audit +- Skip informational/already-done items — only create tasks for things that need action +- If no follow-ups are needed, return an empty array + +## Output format + +Return ONLY valid JSON (no markdown, no explanation): + +```json +[ + { + "title": "Добавить requireAuth на admin endpoints", + "type": "hotfix", + "priority": 2, + "brief": "3 admin-эндпоинта без auth: /api/admin/collect-hot-tours, /api/admin/refresh-hotel-details, /api/admin/hotel-stats. Добавить middleware requireAuth." + }, + { + "title": "Rate limiting на /api/auth/login", + "type": "feature", + "priority": 4, + "brief": "Эндпоинт login не имеет rate limiting. Добавить express-rate-limit: 5 попыток / 15 мин на IP." + } +] +``` diff --git a/cli/main.py b/cli/main.py index e720020..aea81db 100644 --- a/cli/main.py +++ b/cli/main.py @@ -410,6 +410,46 @@ def cost(ctx, period): click.echo(f"\nTotal: ${total:.4f}") +# =========================================================================== +# approve +# =========================================================================== + +@cli.command("approve") +@click.argument("task_id") +@click.option("--followup", is_flag=True, help="Generate follow-up tasks from pipeline results") +@click.option("--decision", "decision_text", default=None, help="Record a decision with this text") +@click.pass_context +def approve_task(ctx, task_id, followup, decision_text): + """Approve a task (set status=done). Optionally generate follow-ups.""" + from core.followup import generate_followups + + conn = ctx.obj["conn"] + task = models.get_task(conn, task_id) + if not task: + click.echo(f"Task '{task_id}' not found.", err=True) + raise SystemExit(1) + + models.update_task(conn, task_id, status="done") + click.echo(f"Approved: {task_id} → done") + + if decision_text: + models.add_decision( + conn, task["project_id"], "decision", decision_text, decision_text, + task_id=task_id, + ) + click.echo(f"Decision recorded.") + + if followup: + click.echo("Generating follow-up tasks...") + created = generate_followups(conn, task_id) + if created: + click.echo(f"Created {len(created)} follow-up tasks:") + for t in created: + click.echo(f" {t['id']}: {t['title']} (pri {t['priority']})") + else: + click.echo("No follow-up tasks generated.") + + # =========================================================================== # run # =========================================================================== diff --git a/core/followup.py b/core/followup.py new file mode 100644 index 0000000..70abe21 --- /dev/null +++ b/core/followup.py @@ -0,0 +1,147 @@ +""" +Kin follow-up generator — analyzes pipeline output and creates follow-up tasks. +Runs a PM agent to parse results and produce actionable task list. +""" + +import json +import sqlite3 + +from core import models +from core.context_builder import format_prompt, PROMPTS_DIR + + +def _collect_pipeline_output(conn: sqlite3.Connection, task_id: str) -> str: + """Collect all pipeline step outputs for a task into a single string.""" + rows = conn.execute( + """SELECT agent_role, output_summary, success + FROM agent_logs WHERE task_id = ? ORDER BY created_at""", + (task_id,), + ).fetchall() + if not rows: + return "" + parts = [] + for r in rows: + status = "OK" if r["success"] else "FAILED" + parts.append(f"=== {r['agent_role']} [{status}] ===") + parts.append(r["output_summary"] or "(no output)") + parts.append("") + return "\n".join(parts) + + +def _next_task_id(conn: sqlite3.Connection, project_id: str) -> str: + """Generate the next sequential task ID for a project.""" + prefix = project_id.upper() + existing = models.list_tasks(conn, project_id=project_id) + max_num = 0 + for t in existing: + tid = t["id"] + if tid.startswith(prefix + "-"): + try: + num = int(tid.split("-", 1)[1]) + max_num = max(max_num, num) + except ValueError: + pass + return f"{prefix}-{max_num + 1:03d}" + + +def generate_followups( + conn: sqlite3.Connection, + task_id: str, + dry_run: bool = False, +) -> list[dict]: + """Analyze pipeline output and create follow-up tasks. + + 1. Collects all agent_logs output for the task + 2. Runs followup agent (claude -p) to analyze and propose tasks + 3. Creates tasks in DB with parent_task_id = task_id + + Returns list of created task dicts. + """ + task = models.get_task(conn, task_id) + if not task: + return [] + + project_id = task["project_id"] + project = models.get_project(conn, project_id) + if not project: + return [] + + pipeline_output = _collect_pipeline_output(conn, task_id) + if not pipeline_output: + return [] + + # Build context for followup agent + language = project.get("language", "ru") + context = { + "project": { + "id": project["id"], + "name": project["name"], + "path": project["path"], + "tech_stack": project.get("tech_stack"), + "language": language, + }, + "task": { + "id": task["id"], + "title": task["title"], + "status": task["status"], + "priority": task["priority"], + "brief": task.get("brief"), + "spec": task.get("spec"), + }, + "previous_output": pipeline_output, + } + + prompt = format_prompt(context, "followup") + + if dry_run: + return [{"_dry_run": True, "_prompt": prompt}] + + # Run followup agent + from agents.runner import _run_claude, _try_parse_json + + result = _run_claude(prompt, model="sonnet") + output = result.get("output", "") + + # Parse the task list from output + parsed = _try_parse_json(output) + if not isinstance(parsed, list): + # Maybe it's wrapped in a dict + if isinstance(parsed, dict): + parsed = parsed.get("tasks") or parsed.get("followups") or [] + else: + return [] + + # Create tasks in DB + created = [] + for item in parsed: + if not isinstance(item, dict) or "title" not in item: + continue + new_id = _next_task_id(conn, project_id) + brief = item.get("brief") + brief_dict = {"source": f"followup:{task_id}"} + if item.get("type"): + brief_dict["route_type"] = item["type"] + if brief: + brief_dict["description"] = brief + + t = models.create_task( + conn, new_id, project_id, + title=item["title"], + priority=item.get("priority", 5), + parent_task_id=task_id, + brief=brief_dict, + ) + created.append(t) + + # Log the followup generation + models.log_agent_run( + conn, project_id, "followup_pm", "generate_followups", + task_id=task_id, + output_summary=json.dumps( + [{"id": t["id"], "title": t["title"]} for t in created], + ensure_ascii=False, + ), + success=True, + ) + + return created diff --git a/tests/test_followup.py b/tests/test_followup.py new file mode 100644 index 0000000..bf27178 --- /dev/null +++ b/tests/test_followup.py @@ -0,0 +1,141 @@ +"""Tests for core/followup.py — follow-up task generation.""" + +import json +import pytest +from unittest.mock import patch, MagicMock + +from core.db import init_db +from core import models +from core.followup import generate_followups, _collect_pipeline_output, _next_task_id + + +@pytest.fixture +def conn(): + c = init_db(":memory:") + models.create_project(c, "vdol", "ВДОЛЬ", "~/projects/vdolipoperek", + tech_stack=["vue3"], language="ru") + models.create_task(c, "VDOL-001", "vdol", "Security audit", + status="done", brief={"route_type": "security_audit"}) + # Add some pipeline logs + models.log_agent_run(c, "vdol", "security", "execute", + task_id="VDOL-001", + output_summary=json.dumps({ + "summary": "8 уязвимостей найдено", + "findings": [ + {"severity": "HIGH", "title": "Admin endpoint без auth", + "file": "index.js", "line": 42}, + {"severity": "HIGH", "title": "SEO endpoints без auth", + "file": "index.js", "line": 88}, + {"severity": "MEDIUM", "title": "Нет rate limiting на login", + "file": "auth.js", "line": 15}, + ], + }, ensure_ascii=False), + success=True) + yield c + c.close() + + +class TestCollectPipelineOutput: + def test_collects_all_steps(self, conn): + output = _collect_pipeline_output(conn, "VDOL-001") + assert "security" in output + assert "Admin endpoint" in output + + def test_empty_for_no_logs(self, conn): + assert _collect_pipeline_output(conn, "NONEXISTENT") == "" + + +class TestNextTaskId: + def test_increments(self, conn): + assert _next_task_id(conn, "vdol") == "VDOL-002" + + def test_handles_obs_ids(self, conn): + # OBS tasks shouldn't interfere with numbering + models.create_task(conn, "VDOL-OBS-001", "vdol", "Obsidian task") + assert _next_task_id(conn, "vdol") == "VDOL-002" + + +class TestGenerateFollowups: + @patch("agents.runner._run_claude") + def test_creates_followup_tasks(self, mock_claude, conn): + mock_claude.return_value = { + "output": json.dumps([ + {"title": "Fix admin auth", "type": "hotfix", "priority": 2, + "brief": "Add requireAuth to admin endpoints"}, + {"title": "Add rate limiting", "type": "feature", "priority": 4, + "brief": "Rate limit login to 5/15min"}, + ]), + "returncode": 0, + } + + created = generate_followups(conn, "VDOL-001") + + assert len(created) == 2 + assert created[0]["id"] == "VDOL-002" + assert created[1]["id"] == "VDOL-003" + assert created[0]["title"] == "Fix admin auth" + assert created[0]["parent_task_id"] == "VDOL-001" + assert created[0]["priority"] == 2 + assert created[1]["parent_task_id"] == "VDOL-001" + + # Brief should contain source reference + assert created[0]["brief"]["source"] == "followup:VDOL-001" + assert created[0]["brief"]["route_type"] == "hotfix" + + @patch("agents.runner._run_claude") + def test_handles_empty_response(self, mock_claude, conn): + mock_claude.return_value = {"output": "[]", "returncode": 0} + assert generate_followups(conn, "VDOL-001") == [] + + @patch("agents.runner._run_claude") + def test_handles_wrapped_response(self, mock_claude, conn): + """PM might return {tasks: [...]} instead of bare array.""" + mock_claude.return_value = { + "output": json.dumps({"tasks": [ + {"title": "Fix X", "priority": 3}, + ]}), + "returncode": 0, + } + created = generate_followups(conn, "VDOL-001") + assert len(created) == 1 + + @patch("agents.runner._run_claude") + def test_handles_invalid_json(self, mock_claude, conn): + mock_claude.return_value = {"output": "not json", "returncode": 0} + assert generate_followups(conn, "VDOL-001") == [] + + def test_no_logs_returns_empty(self, conn): + models.create_task(conn, "VDOL-999", "vdol", "Empty task") + assert generate_followups(conn, "VDOL-999") == [] + + def test_nonexistent_task(self, conn): + assert generate_followups(conn, "NOPE") == [] + + def test_dry_run(self, conn): + result = generate_followups(conn, "VDOL-001", dry_run=True) + assert len(result) == 1 + assert result[0]["_dry_run"] is True + assert "followup" in result[0]["_prompt"].lower() or "Previous step output" in result[0]["_prompt"] + + @patch("agents.runner._run_claude") + def test_logs_generation(self, mock_claude, conn): + mock_claude.return_value = { + "output": json.dumps([{"title": "Fix A", "priority": 2}]), + "returncode": 0, + } + generate_followups(conn, "VDOL-001") + + logs = conn.execute( + "SELECT * FROM agent_logs WHERE agent_role='followup_pm'" + ).fetchall() + assert len(logs) == 1 + assert logs[0]["task_id"] == "VDOL-001" + + @patch("agents.runner._run_claude") + def test_prompt_includes_language(self, mock_claude, conn): + """Followup prompt should include language instruction.""" + mock_claude.return_value = {"output": "[]", "returncode": 0} + generate_followups(conn, "VDOL-001") + + prompt = mock_claude.call_args[0][0] + assert "Russian" in prompt diff --git a/web/api.py b/web/api.py index 2f131c4..21064b5 100644 --- a/web/api.py +++ b/web/api.py @@ -181,11 +181,14 @@ class TaskApprove(BaseModel): decision_title: str | None = None decision_description: str | None = None decision_type: str = "decision" + create_followups: bool = False @app.post("/api/tasks/{task_id}/approve") def approve_task(task_id: str, body: TaskApprove | None = None): - """Approve a task: set status=done, optionally add a decision.""" + """Approve a task: set status=done, optionally add decision and create follow-ups.""" + from core.followup import generate_followups + conn = get_conn() t = models.get_task(conn, task_id) if not t: @@ -199,8 +202,15 @@ def approve_task(task_id: str, body: TaskApprove | None = None): body.decision_title, body.decision_description or body.decision_title, task_id=task_id, ) + followup_tasks = [] + if body and body.create_followups: + followup_tasks = generate_followups(conn, task_id) conn.close() - return {"status": "done", "decision": decision} + return { + "status": "done", + "decision": decision, + "followup_tasks": followup_tasks, + } class TaskReject(BaseModel): diff --git a/web/frontend/src/api.ts b/web/frontend/src/api.ts index fd6ed2b..f4aa92b 100644 --- a/web/frontend/src/api.ts +++ b/web/frontend/src/api.ts @@ -112,8 +112,8 @@ export const api = { post('/projects', data), createTask: (data: { project_id: string; title: string; priority?: number; route_type?: string }) => post('/tasks', data), - approveTask: (id: string, data?: { decision_title?: string; decision_description?: string; decision_type?: string }) => - post<{ status: string }>(`/tasks/${id}/approve`, data || {}), + approveTask: (id: string, data?: { decision_title?: string; decision_description?: string; decision_type?: string; create_followups?: boolean }) => + post<{ status: string; followup_tasks: Task[] }>(`/tasks/${id}/approve`, data || {}), rejectTask: (id: string, reason: string) => post<{ status: string }>(`/tasks/${id}/reject`, { reason }), runTask: (id: string) => diff --git a/web/frontend/src/views/ProjectView.vue b/web/frontend/src/views/ProjectView.vue index 3fed2e6..c20a99b 100644 --- a/web/frontend/src/views/ProjectView.vue +++ b/web/frontend/src/views/ProjectView.vue @@ -197,6 +197,7 @@ async function addDecision() { {{ t.id }} {{ t.title }} + from {{ t.parent_task_id }}
{{ t.assigned_role }} diff --git a/web/frontend/src/views/TaskDetail.vue b/web/frontend/src/views/TaskDetail.vue index b8a4467..8db2768 100644 --- a/web/frontend/src/views/TaskDetail.vue +++ b/web/frontend/src/views/TaskDetail.vue @@ -15,7 +15,9 @@ let pollTimer: ReturnType | null = null // Approve modal const showApprove = ref(false) -const approveForm = ref({ title: '', description: '', type: 'decision' }) +const approveForm = ref({ title: '', description: '', type: 'decision', createFollowups: true }) +const approveLoading = ref(false) +const followupResults = ref<{ id: string; title: string }[]>([]) // Reject modal const showReject = ref(false) @@ -74,16 +76,29 @@ function formatOutput(text: string | null): string { async function approve() { if (!task.value) return + approveLoading.value = true + followupResults.value = [] try { - const data = approveForm.value.title - ? { decision_title: approveForm.value.title, decision_description: approveForm.value.description, decision_type: approveForm.value.type } - : undefined - await api.approveTask(props.id, data) - showApprove.value = false - approveForm.value = { title: '', description: '', type: 'decision' } + const data: Record = { + create_followups: approveForm.value.createFollowups, + } + if (approveForm.value.title) { + data.decision_title = approveForm.value.title + data.decision_description = approveForm.value.description + data.decision_type = approveForm.value.type + } + const res = await api.approveTask(props.id, data as any) + if (res.followup_tasks?.length) { + followupResults.value = res.followup_tasks.map(t => ({ id: t.id, title: t.title })) + } else { + showApprove.value = false + } + approveForm.value = { title: '', description: '', type: 'decision', createFollowups: true } await load() } catch (e: any) { error.value = e.message + } finally { + approveLoading.value = false } } @@ -227,16 +242,36 @@ const hasSteps = computed(() => (task.value?.pipeline_steps?.length ?? 0) > 0)
- -
-

Optionally record a decision from this task:

+ + +
+

Task approved. Created {{ followupResults.length }} follow-up tasks:

+
+ + {{ f.id }} {{ f.title }} + +
+ +
+ + + +

Optionally record a decision:

-