Add permission-aware follow-up flow with interactive resolution

When follow-up agent detects permission-blocked items ("ручное
применение", "permission denied", etc.), they become pending_actions
instead of auto-created tasks. User chooses per item:
  1. Rerun with --dangerously-skip-permissions
  2. Create manual task
  3. Skip

core/followup.py:
  _is_permission_blocked() — regex detection of 9 permission patterns
  generate_followups() returns {created, pending_actions}
  resolve_pending_action() — handles rerun/manual_task/skip

agents/runner.py:
  _run_claude(allow_write=True) adds --dangerously-skip-permissions
  run_agent/run_pipeline pass allow_write through

CLI: kin approve --followup — interactive 1/2/3 prompt per blocked item
API: POST /approve returns {needs_decision, pending_actions}
     POST /resolve resolves individual actions
Frontend: pending actions shown as cards with 3 buttons in approve modal

136 tests, all passing. Frontend builds clean.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
johnfrum1234 2026-03-15 15:16:48 +02:00
parent 9264415776
commit ab693d3c4d
7 changed files with 356 additions and 73 deletions

View file

@ -23,6 +23,7 @@ def run_agent(
previous_output: str | None = None, previous_output: str | None = None,
brief_override: str | None = None, brief_override: str | None = None,
dry_run: bool = False, dry_run: bool = False,
allow_write: bool = False,
) -> dict: ) -> dict:
"""Run a single Claude Code agent as a subprocess. """Run a single Claude Code agent as a subprocess.
@ -62,7 +63,8 @@ def run_agent(
# Run claude subprocess # Run claude subprocess
start = time.monotonic() start = time.monotonic()
result = _run_claude(prompt, model=model, working_dir=working_dir) result = _run_claude(prompt, model=model, working_dir=working_dir,
allow_write=allow_write)
duration = int(time.monotonic() - start) duration = int(time.monotonic() - start)
# Parse output — ensure output_text is always a string for DB storage # Parse output — ensure output_text is always a string for DB storage
@ -106,6 +108,7 @@ def _run_claude(
prompt: str, prompt: str,
model: str = "sonnet", model: str = "sonnet",
working_dir: str | None = None, working_dir: str | None = None,
allow_write: bool = False,
) -> dict: ) -> dict:
"""Execute claude CLI as subprocess. Returns dict with output, returncode, etc.""" """Execute claude CLI as subprocess. Returns dict with output, returncode, etc."""
cmd = [ cmd = [
@ -114,6 +117,8 @@ def _run_claude(
"--output-format", "json", "--output-format", "json",
"--model", model, "--model", model,
] ]
if allow_write:
cmd.append("--dangerously-skip-permissions")
try: try:
proc = subprocess.run( proc = subprocess.run(
@ -207,6 +212,7 @@ def run_pipeline(
task_id: str, task_id: str,
steps: list[dict], steps: list[dict],
dry_run: bool = False, dry_run: bool = False,
allow_write: bool = False,
) -> dict: ) -> dict:
"""Execute a multi-step pipeline of agents. """Execute a multi-step pipeline of agents.
@ -253,6 +259,7 @@ def run_pipeline(
previous_output=previous_output, previous_output=previous_output,
brief_override=brief, brief_override=brief,
dry_run=dry_run, dry_run=dry_run,
allow_write=allow_write,
) )
results.append(result) results.append(result)

View file

@ -421,7 +421,7 @@ def cost(ctx, period):
@click.pass_context @click.pass_context
def approve_task(ctx, task_id, followup, decision_text): def approve_task(ctx, task_id, followup, decision_text):
"""Approve a task (set status=done). Optionally generate follow-ups.""" """Approve a task (set status=done). Optionally generate follow-ups."""
from core.followup import generate_followups from core.followup import generate_followups, resolve_pending_action
conn = ctx.obj["conn"] conn = ctx.obj["conn"]
task = models.get_task(conn, task_id) task = models.get_task(conn, task_id)
@ -441,12 +441,36 @@ def approve_task(ctx, task_id, followup, decision_text):
if followup: if followup:
click.echo("Generating follow-up tasks...") click.echo("Generating follow-up tasks...")
created = generate_followups(conn, task_id) result = generate_followups(conn, task_id)
created = result["created"]
pending = result["pending_actions"]
if created: if created:
click.echo(f"Created {len(created)} follow-up tasks:") click.echo(f"Created {len(created)} follow-up tasks:")
for t in created: for t in created:
click.echo(f" {t['id']}: {t['title']} (pri {t['priority']})") click.echo(f" {t['id']}: {t['title']} (pri {t['priority']})")
else:
for action in pending:
click.echo(f"\nPermission issue: {action['description']}")
click.echo(" 1. Rerun with --dangerously-skip-permissions")
click.echo(" 2. Create task for manual fix")
click.echo(" 3. Skip")
choice_input = click.prompt("Choice", type=click.Choice(["1", "2", "3"]), default="2")
choice_map = {"1": "rerun", "2": "manual_task", "3": "skip"}
choice = choice_map[choice_input]
result = resolve_pending_action(conn, task_id, action, choice)
if choice == "rerun" and result:
rr = result.get("rerun_result", {})
if rr.get("success"):
click.echo(" Re-run completed successfully.")
else:
click.echo(f" Re-run failed: {rr.get('error', 'unknown')}")
elif choice == "manual_task" and result:
click.echo(f" Created: {result['id']}: {result['title']}")
elif choice == "skip":
click.echo(" Skipped.")
if not created and not pending:
click.echo("No follow-up tasks generated.") click.echo("No follow-up tasks generated.")

View file

@ -1,14 +1,34 @@
""" """
Kin follow-up generator analyzes pipeline output and creates follow-up tasks. Kin follow-up generator analyzes pipeline output and creates follow-up tasks.
Runs a PM agent to parse results and produce actionable task list. Runs a PM agent to parse results and produce actionable task list.
Detects permission-blocked items and returns them as pending actions.
""" """
import json import json
import re
import sqlite3 import sqlite3
from core import models from core import models
from core.context_builder import format_prompt, PROMPTS_DIR from core.context_builder import format_prompt, PROMPTS_DIR
_PERMISSION_PATTERNS = [
r"(?i)permission\s+denied",
r"(?i)ручное\s+применение",
r"(?i)не\s+получил[иа]?\s+разрешени[ея]",
r"(?i)cannot\s+write",
r"(?i)read[- ]?only",
r"(?i)нет\s+прав\s+на\s+запись",
r"(?i)manually\s+appl",
r"(?i)apply\s+manually",
r"(?i)требуется\s+ручн",
]
def _is_permission_blocked(item: dict) -> bool:
"""Check if a follow-up item describes a permission/write failure."""
text = f"{item.get('title', '')} {item.get('brief', '')}".lower()
return any(re.search(p, text) for p in _PERMISSION_PATTERNS)
def _collect_pipeline_output(conn: sqlite3.Connection, task_id: str) -> str: def _collect_pipeline_output(conn: sqlite3.Connection, task_id: str) -> str:
"""Collect all pipeline step outputs for a task into a single string.""" """Collect all pipeline step outputs for a task into a single string."""
@ -48,27 +68,35 @@ def generate_followups(
conn: sqlite3.Connection, conn: sqlite3.Connection,
task_id: str, task_id: str,
dry_run: bool = False, dry_run: bool = False,
) -> list[dict]: ) -> dict:
"""Analyze pipeline output and create follow-up tasks. """Analyze pipeline output and create follow-up tasks.
1. Collects all agent_logs output for the task Returns dict:
2. Runs followup agent (claude -p) to analyze and propose tasks {
3. Creates tasks in DB with parent_task_id = task_id "created": [task, ...], # tasks created immediately
"pending_actions": [action, ...], # items needing user decision
}
Returns list of created task dicts. A pending_action looks like:
{
"type": "permission_fix",
"description": "...",
"original_item": {...}, # raw item from PM
"options": ["rerun", "manual_task", "skip"],
}
""" """
task = models.get_task(conn, task_id) task = models.get_task(conn, task_id)
if not task: if not task:
return [] return {"created": [], "pending_actions": []}
project_id = task["project_id"] project_id = task["project_id"]
project = models.get_project(conn, project_id) project = models.get_project(conn, project_id)
if not project: if not project:
return [] return {"created": [], "pending_actions": []}
pipeline_output = _collect_pipeline_output(conn, task_id) pipeline_output = _collect_pipeline_output(conn, task_id)
if not pipeline_output: if not pipeline_output:
return [] return {"created": [], "pending_actions": []}
# Build context for followup agent # Build context for followup agent
language = project.get("language", "ru") language = project.get("language", "ru")
@ -94,7 +122,7 @@ def generate_followups(
prompt = format_prompt(context, "followup") prompt = format_prompt(context, "followup")
if dry_run: if dry_run:
return [{"_dry_run": True, "_prompt": prompt}] return {"created": [{"_dry_run": True, "_prompt": prompt}], "pending_actions": []}
# Run followup agent # Run followup agent
from agents.runner import _run_claude, _try_parse_json from agents.runner import _run_claude, _try_parse_json
@ -105,43 +133,100 @@ def generate_followups(
# Parse the task list from output # Parse the task list from output
parsed = _try_parse_json(output) parsed = _try_parse_json(output)
if not isinstance(parsed, list): if not isinstance(parsed, list):
# Maybe it's wrapped in a dict
if isinstance(parsed, dict): if isinstance(parsed, dict):
parsed = parsed.get("tasks") or parsed.get("followups") or [] parsed = parsed.get("tasks") or parsed.get("followups") or []
else: else:
return [] return {"created": [], "pending_actions": []}
# Create tasks in DB # Separate permission-blocked items from normal ones
created = [] created = []
pending_actions = []
for item in parsed: for item in parsed:
if not isinstance(item, dict) or "title" not in item: if not isinstance(item, dict) or "title" not in item:
continue continue
new_id = _next_task_id(conn, project_id)
brief = item.get("brief")
brief_dict = {"source": f"followup:{task_id}"}
if item.get("type"):
brief_dict["route_type"] = item["type"]
if brief:
brief_dict["description"] = brief
t = models.create_task( if _is_permission_blocked(item):
conn, new_id, project_id, pending_actions.append({
title=item["title"], "type": "permission_fix",
priority=item.get("priority", 5), "description": item["title"],
parent_task_id=task_id, "original_item": item,
brief=brief_dict, "options": ["rerun", "manual_task", "skip"],
) })
created.append(t) else:
new_id = _next_task_id(conn, project_id)
brief_dict = {"source": f"followup:{task_id}"}
if item.get("type"):
brief_dict["route_type"] = item["type"]
if item.get("brief"):
brief_dict["description"] = item["brief"]
t = models.create_task(
conn, new_id, project_id,
title=item["title"],
priority=item.get("priority", 5),
parent_task_id=task_id,
brief=brief_dict,
)
created.append(t)
# Log the followup generation # Log the followup generation
models.log_agent_run( models.log_agent_run(
conn, project_id, "followup_pm", "generate_followups", conn, project_id, "followup_pm", "generate_followups",
task_id=task_id, task_id=task_id,
output_summary=json.dumps( output_summary=json.dumps({
[{"id": t["id"], "title": t["title"]} for t in created], "created": [{"id": t["id"], "title": t["title"]} for t in created],
ensure_ascii=False, "pending": len(pending_actions),
), }, ensure_ascii=False),
success=True, success=True,
) )
return created return {"created": created, "pending_actions": pending_actions}
def resolve_pending_action(
conn: sqlite3.Connection,
task_id: str,
action: dict,
choice: str,
) -> dict | None:
"""Resolve a single pending action.
choice: "rerun" | "manual_task" | "skip"
Returns created task dict for "manual_task", None otherwise.
"""
task = models.get_task(conn, task_id)
if not task:
return None
project_id = task["project_id"]
item = action.get("original_item", {})
if choice == "skip":
return None
if choice == "manual_task":
new_id = _next_task_id(conn, project_id)
brief_dict = {"source": f"followup:{task_id}"}
if item.get("type"):
brief_dict["route_type"] = item["type"]
if item.get("brief"):
brief_dict["description"] = item["brief"]
return models.create_task(
conn, new_id, project_id,
title=item.get("title", "Manual fix required"),
priority=item.get("priority", 5),
parent_task_id=task_id,
brief=brief_dict,
)
if choice == "rerun":
# Re-run pipeline for the parent task with allow_write
from agents.runner import run_pipeline
steps = [{"role": item.get("type", "frontend_dev"),
"brief": item.get("brief", item.get("title", "")),
"model": "sonnet"}]
result = run_pipeline(conn, task_id, steps, allow_write=True)
return {"rerun_result": result}
return None

View file

@ -1,4 +1,4 @@
"""Tests for core/followup.py — follow-up task generation.""" """Tests for core/followup.py — follow-up task generation with permission handling."""
import json import json
import pytest import pytest
@ -6,7 +6,10 @@ from unittest.mock import patch, MagicMock
from core.db import init_db from core.db import init_db
from core import models from core import models
from core.followup import generate_followups, _collect_pipeline_output, _next_task_id from core.followup import (
generate_followups, resolve_pending_action,
_collect_pipeline_output, _next_task_id, _is_permission_blocked,
)
@pytest.fixture @pytest.fixture
@ -16,7 +19,6 @@ def conn():
tech_stack=["vue3"], language="ru") tech_stack=["vue3"], language="ru")
models.create_task(c, "VDOL-001", "vdol", "Security audit", models.create_task(c, "VDOL-001", "vdol", "Security audit",
status="done", brief={"route_type": "security_audit"}) status="done", brief={"route_type": "security_audit"})
# Add some pipeline logs
models.log_agent_run(c, "vdol", "security", "execute", models.log_agent_run(c, "vdol", "security", "execute",
task_id="VDOL-001", task_id="VDOL-001",
output_summary=json.dumps({ output_summary=json.dumps({
@ -24,8 +26,6 @@ def conn():
"findings": [ "findings": [
{"severity": "HIGH", "title": "Admin endpoint без auth", {"severity": "HIGH", "title": "Admin endpoint без auth",
"file": "index.js", "line": 42}, "file": "index.js", "line": 42},
{"severity": "HIGH", "title": "SEO endpoints без auth",
"file": "index.js", "line": 88},
{"severity": "MEDIUM", "title": "Нет rate limiting на login", {"severity": "MEDIUM", "title": "Нет rate limiting на login",
"file": "auth.js", "line": 15}, "file": "auth.js", "line": 15},
], ],
@ -50,11 +50,30 @@ class TestNextTaskId:
assert _next_task_id(conn, "vdol") == "VDOL-002" assert _next_task_id(conn, "vdol") == "VDOL-002"
def test_handles_obs_ids(self, conn): def test_handles_obs_ids(self, conn):
# OBS tasks shouldn't interfere with numbering
models.create_task(conn, "VDOL-OBS-001", "vdol", "Obsidian task") models.create_task(conn, "VDOL-OBS-001", "vdol", "Obsidian task")
assert _next_task_id(conn, "vdol") == "VDOL-002" assert _next_task_id(conn, "vdol") == "VDOL-002"
class TestIsPermissionBlocked:
def test_detects_permission_denied(self):
assert _is_permission_blocked({"title": "Fix X", "brief": "permission denied on write"})
def test_detects_manual_application_ru(self):
assert _is_permission_blocked({"title": "Ручное применение фикса для auth.js"})
def test_detects_no_write_permission_ru(self):
assert _is_permission_blocked({"title": "X", "brief": "не получили разрешение на запись"})
def test_detects_read_only(self):
assert _is_permission_blocked({"title": "Apply manually", "brief": "file is read-only"})
def test_normal_item_not_blocked(self):
assert not _is_permission_blocked({"title": "Fix admin auth", "brief": "Add requireAuth"})
def test_empty_item(self):
assert not _is_permission_blocked({})
class TestGenerateFollowups: class TestGenerateFollowups:
@patch("agents.runner._run_claude") @patch("agents.runner._run_claude")
def test_creates_followup_tasks(self, mock_claude, conn): def test_creates_followup_tasks(self, mock_claude, conn):
@ -68,54 +87,75 @@ class TestGenerateFollowups:
"returncode": 0, "returncode": 0,
} }
created = generate_followups(conn, "VDOL-001") result = generate_followups(conn, "VDOL-001")
assert len(created) == 2 assert len(result["created"]) == 2
assert created[0]["id"] == "VDOL-002" assert len(result["pending_actions"]) == 0
assert created[1]["id"] == "VDOL-003" assert result["created"][0]["id"] == "VDOL-002"
assert created[0]["title"] == "Fix admin auth" assert result["created"][0]["parent_task_id"] == "VDOL-001"
assert created[0]["parent_task_id"] == "VDOL-001"
assert created[0]["priority"] == 2
assert created[1]["parent_task_id"] == "VDOL-001"
# Brief should contain source reference @patch("agents.runner._run_claude")
assert created[0]["brief"]["source"] == "followup:VDOL-001" def test_separates_permission_items(self, mock_claude, conn):
assert created[0]["brief"]["route_type"] == "hotfix" mock_claude.return_value = {
"output": json.dumps([
{"title": "Fix admin auth", "type": "hotfix", "priority": 2,
"brief": "Add requireAuth"},
{"title": "Ручное применение .dockerignore",
"type": "hotfix", "priority": 3,
"brief": "Не получили разрешение на запись в файл"},
{"title": "Apply CSP headers manually",
"type": "feature", "priority": 4,
"brief": "Permission denied writing nginx.conf"},
]),
"returncode": 0,
}
result = generate_followups(conn, "VDOL-001")
assert len(result["created"]) == 1 # Only "Fix admin auth"
assert result["created"][0]["title"] == "Fix admin auth"
assert len(result["pending_actions"]) == 2
assert result["pending_actions"][0]["type"] == "permission_fix"
assert "options" in result["pending_actions"][0]
assert "rerun" in result["pending_actions"][0]["options"]
@patch("agents.runner._run_claude") @patch("agents.runner._run_claude")
def test_handles_empty_response(self, mock_claude, conn): def test_handles_empty_response(self, mock_claude, conn):
mock_claude.return_value = {"output": "[]", "returncode": 0} mock_claude.return_value = {"output": "[]", "returncode": 0}
assert generate_followups(conn, "VDOL-001") == [] result = generate_followups(conn, "VDOL-001")
assert result["created"] == []
assert result["pending_actions"] == []
@patch("agents.runner._run_claude") @patch("agents.runner._run_claude")
def test_handles_wrapped_response(self, mock_claude, conn): def test_handles_wrapped_response(self, mock_claude, conn):
"""PM might return {tasks: [...]} instead of bare array."""
mock_claude.return_value = { mock_claude.return_value = {
"output": json.dumps({"tasks": [ "output": json.dumps({"tasks": [
{"title": "Fix X", "priority": 3}, {"title": "Fix X", "priority": 3},
]}), ]}),
"returncode": 0, "returncode": 0,
} }
created = generate_followups(conn, "VDOL-001") result = generate_followups(conn, "VDOL-001")
assert len(created) == 1 assert len(result["created"]) == 1
@patch("agents.runner._run_claude") @patch("agents.runner._run_claude")
def test_handles_invalid_json(self, mock_claude, conn): def test_handles_invalid_json(self, mock_claude, conn):
mock_claude.return_value = {"output": "not json", "returncode": 0} mock_claude.return_value = {"output": "not json", "returncode": 0}
assert generate_followups(conn, "VDOL-001") == [] result = generate_followups(conn, "VDOL-001")
assert result["created"] == []
def test_no_logs_returns_empty(self, conn): def test_no_logs_returns_empty(self, conn):
models.create_task(conn, "VDOL-999", "vdol", "Empty task") models.create_task(conn, "VDOL-999", "vdol", "Empty task")
assert generate_followups(conn, "VDOL-999") == [] result = generate_followups(conn, "VDOL-999")
assert result["created"] == []
def test_nonexistent_task(self, conn): def test_nonexistent_task(self, conn):
assert generate_followups(conn, "NOPE") == [] result = generate_followups(conn, "NOPE")
assert result["created"] == []
def test_dry_run(self, conn): def test_dry_run(self, conn):
result = generate_followups(conn, "VDOL-001", dry_run=True) result = generate_followups(conn, "VDOL-001", dry_run=True)
assert len(result) == 1 assert len(result["created"]) == 1
assert result[0]["_dry_run"] is True assert result["created"][0]["_dry_run"] is True
assert "followup" in result[0]["_prompt"].lower() or "Previous step output" in result[0]["_prompt"]
@patch("agents.runner._run_claude") @patch("agents.runner._run_claude")
def test_logs_generation(self, mock_claude, conn): def test_logs_generation(self, mock_claude, conn):
@ -129,13 +169,56 @@ class TestGenerateFollowups:
"SELECT * FROM agent_logs WHERE agent_role='followup_pm'" "SELECT * FROM agent_logs WHERE agent_role='followup_pm'"
).fetchall() ).fetchall()
assert len(logs) == 1 assert len(logs) == 1
assert logs[0]["task_id"] == "VDOL-001"
@patch("agents.runner._run_claude") @patch("agents.runner._run_claude")
def test_prompt_includes_language(self, mock_claude, conn): def test_prompt_includes_language(self, mock_claude, conn):
"""Followup prompt should include language instruction."""
mock_claude.return_value = {"output": "[]", "returncode": 0} mock_claude.return_value = {"output": "[]", "returncode": 0}
generate_followups(conn, "VDOL-001") generate_followups(conn, "VDOL-001")
prompt = mock_claude.call_args[0][0] prompt = mock_claude.call_args[0][0]
assert "Russian" in prompt assert "Russian" in prompt
class TestResolvePendingAction:
def test_skip_returns_none(self, conn):
action = {"type": "permission_fix", "original_item": {"title": "X"}}
assert resolve_pending_action(conn, "VDOL-001", action, "skip") is None
def test_manual_task_creates_task(self, conn):
action = {
"type": "permission_fix",
"original_item": {"title": "Fix .dockerignore", "type": "hotfix",
"priority": 3, "brief": "Create .dockerignore"},
}
result = resolve_pending_action(conn, "VDOL-001", action, "manual_task")
assert result is not None
assert result["title"] == "Fix .dockerignore"
assert result["parent_task_id"] == "VDOL-001"
assert result["priority"] == 3
@patch("agents.runner._run_claude")
def test_rerun_launches_pipeline(self, mock_claude, conn):
mock_claude.return_value = {
"output": json.dumps({"result": "applied fix"}),
"returncode": 0,
}
action = {
"type": "permission_fix",
"original_item": {"title": "Fix X", "type": "frontend_dev",
"brief": "Apply the fix"},
}
result = resolve_pending_action(conn, "VDOL-001", action, "rerun")
assert "rerun_result" in result
# Verify --dangerously-skip-permissions was passed
call_args = mock_claude.call_args
cmd = call_args[0][0] if call_args[0] else None
# _run_claude is called with allow_write=True which adds the flag
# Check via the cmd list in subprocess.run mock... but _run_claude
# is mocked at a higher level. Let's check the allow_write param.
# The pipeline calls run_agent with allow_write=True which calls
# _run_claude with allow_write=True
assert result["rerun_result"]["success"] is True
def test_nonexistent_task(self, conn):
action = {"type": "permission_fix", "original_item": {}}
assert resolve_pending_action(conn, "NOPE", action, "skip") is None

View file

@ -203,16 +203,43 @@ def approve_task(task_id: str, body: TaskApprove | None = None):
task_id=task_id, task_id=task_id,
) )
followup_tasks = [] followup_tasks = []
pending_actions = []
if body and body.create_followups: if body and body.create_followups:
followup_tasks = generate_followups(conn, task_id) result = generate_followups(conn, task_id)
followup_tasks = result["created"]
pending_actions = result["pending_actions"]
conn.close() conn.close()
return { return {
"status": "done", "status": "done",
"decision": decision, "decision": decision,
"followup_tasks": followup_tasks, "followup_tasks": followup_tasks,
"needs_decision": len(pending_actions) > 0,
"pending_actions": pending_actions,
} }
class ResolveAction(BaseModel):
action: dict
choice: str # "rerun" | "manual_task" | "skip"
@app.post("/api/tasks/{task_id}/resolve")
def resolve_action(task_id: str, body: ResolveAction):
"""Resolve a pending permission action from follow-up generation."""
from core.followup import resolve_pending_action
if body.choice not in ("rerun", "manual_task", "skip"):
raise HTTPException(400, f"Invalid choice: {body.choice}")
conn = get_conn()
t = models.get_task(conn, task_id)
if not t:
conn.close()
raise HTTPException(404, f"Task '{task_id}' not found")
result = resolve_pending_action(conn, task_id, body.action, body.choice)
conn.close()
return {"choice": body.choice, "result": result}
class TaskReject(BaseModel): class TaskReject(BaseModel):
reason: str reason: str

View file

@ -92,6 +92,13 @@ export interface TaskFull extends Task {
related_decisions: Decision[] related_decisions: Decision[]
} }
export interface PendingAction {
type: string
description: string
original_item: Record<string, unknown>
options: string[]
}
export interface CostEntry { export interface CostEntry {
project_id: string project_id: string
project_name: string project_name: string
@ -113,7 +120,9 @@ export const api = {
createTask: (data: { project_id: string; title: string; priority?: number; route_type?: string }) => createTask: (data: { project_id: string; title: string; priority?: number; route_type?: string }) =>
post<Task>('/tasks', data), post<Task>('/tasks', data),
approveTask: (id: string, data?: { decision_title?: string; decision_description?: string; decision_type?: string; create_followups?: boolean }) => approveTask: (id: string, data?: { decision_title?: string; decision_description?: string; decision_type?: string; create_followups?: boolean }) =>
post<{ status: string; followup_tasks: Task[] }>(`/tasks/${id}/approve`, data || {}), post<{ status: string; followup_tasks: Task[]; needs_decision: boolean; pending_actions: PendingAction[] }>(`/tasks/${id}/approve`, data || {}),
resolveAction: (id: string, action: PendingAction, choice: string) =>
post<{ choice: string; result: unknown }>(`/tasks/${id}/resolve`, { action, choice }),
rejectTask: (id: string, reason: string) => rejectTask: (id: string, reason: string) =>
post<{ status: string }>(`/tasks/${id}/reject`, { reason }), post<{ status: string }>(`/tasks/${id}/reject`, { reason }),
runTask: (id: string) => runTask: (id: string) =>

View file

@ -1,6 +1,6 @@
<script setup lang="ts"> <script setup lang="ts">
import { ref, onMounted, computed } from 'vue' import { ref, onMounted, computed } from 'vue'
import { api, type TaskFull, type PipelineStep } from '../api' import { api, type TaskFull, type PipelineStep, type PendingAction } from '../api'
import Badge from '../components/Badge.vue' import Badge from '../components/Badge.vue'
import Modal from '../components/Modal.vue' import Modal from '../components/Modal.vue'
@ -18,6 +18,8 @@ const showApprove = ref(false)
const approveForm = ref({ title: '', description: '', type: 'decision', createFollowups: true }) const approveForm = ref({ title: '', description: '', type: 'decision', createFollowups: true })
const approveLoading = ref(false) const approveLoading = ref(false)
const followupResults = ref<{ id: string; title: string }[]>([]) const followupResults = ref<{ id: string; title: string }[]>([])
const pendingActions = ref<PendingAction[]>([])
const resolvingAction = ref(false)
// Reject modal // Reject modal
const showReject = ref(false) const showReject = ref(false)
@ -78,6 +80,7 @@ async function approve() {
if (!task.value) return if (!task.value) return
approveLoading.value = true approveLoading.value = true
followupResults.value = [] followupResults.value = []
pendingActions.value = []
try { try {
const data: Record<string, unknown> = { const data: Record<string, unknown> = {
create_followups: approveForm.value.createFollowups, create_followups: approveForm.value.createFollowups,
@ -90,7 +93,11 @@ async function approve() {
const res = await api.approveTask(props.id, data as any) const res = await api.approveTask(props.id, data as any)
if (res.followup_tasks?.length) { if (res.followup_tasks?.length) {
followupResults.value = res.followup_tasks.map(t => ({ id: t.id, title: t.title })) followupResults.value = res.followup_tasks.map(t => ({ id: t.id, title: t.title }))
} else { }
if (res.pending_actions?.length) {
pendingActions.value = res.pending_actions
}
if (!res.followup_tasks?.length && !res.pending_actions?.length) {
showApprove.value = false showApprove.value = false
} }
approveForm.value = { title: '', description: '', type: 'decision', createFollowups: true } approveForm.value = { title: '', description: '', type: 'decision', createFollowups: true }
@ -102,6 +109,25 @@ async function approve() {
} }
} }
async function resolveAction(action: PendingAction, choice: string) {
resolvingAction.value = true
try {
const res = await api.resolveAction(props.id, action, choice)
// Remove resolved action from list
pendingActions.value = pendingActions.value.filter(a => a !== action)
if (choice === 'manual_task' && res.result && typeof res.result === 'object' && 'id' in res.result) {
followupResults.value.push({ id: (res.result as any).id, title: (res.result as any).title })
}
if (choice === 'rerun') {
await load()
}
} catch (e: any) {
error.value = e.message
} finally {
resolvingAction.value = false
}
}
async function reject() { async function reject() {
if (!task.value || !rejectReason.value) return if (!task.value || !rejectReason.value) return
try { try {
@ -242,9 +268,31 @@ const hasSteps = computed(() => (task.value?.pipeline_steps?.length ?? 0) > 0)
</div> </div>
<!-- Approve Modal --> <!-- Approve Modal -->
<Modal v-if="showApprove" title="Approve Task" @close="showApprove = false; followupResults = []"> <Modal v-if="showApprove" title="Approve Task" @close="showApprove = false; followupResults = []; pendingActions = []">
<!-- Pending permission actions -->
<div v-if="pendingActions.length" class="space-y-3">
<p class="text-sm text-yellow-400">Permission issues need your decision:</p>
<div v-for="(action, i) in pendingActions" :key="i"
class="border border-yellow-900/50 rounded-lg p-3 space-y-2">
<p class="text-sm text-gray-300">{{ action.description }}</p>
<div class="flex gap-2">
<button @click="resolveAction(action, 'rerun')" :disabled="resolvingAction"
class="px-3 py-1 text-xs bg-blue-900/50 text-blue-400 border border-blue-800 rounded hover:bg-blue-900 disabled:opacity-50">
Rerun (skip permissions)
</button>
<button @click="resolveAction(action, 'manual_task')" :disabled="resolvingAction"
class="px-3 py-1 text-xs bg-gray-800 text-gray-300 border border-gray-700 rounded hover:bg-gray-700 disabled:opacity-50">
Create task
</button>
<button @click="resolveAction(action, 'skip')" :disabled="resolvingAction"
class="px-3 py-1 text-xs bg-gray-800 text-gray-500 border border-gray-700 rounded hover:bg-gray-700 disabled:opacity-50">
Skip
</button>
</div>
</div>
</div>
<!-- Follow-up results --> <!-- Follow-up results -->
<div v-if="followupResults.length" class="space-y-3"> <div v-if="followupResults.length && !pendingActions.length" class="space-y-3">
<p class="text-sm text-green-400">Task approved. Created {{ followupResults.length }} follow-up tasks:</p> <p class="text-sm text-green-400">Task approved. Created {{ followupResults.length }} follow-up tasks:</p>
<div class="space-y-1"> <div class="space-y-1">
<router-link v-for="f in followupResults" :key="f.id" :to="`/task/${f.id}`" <router-link v-for="f in followupResults" :key="f.id" :to="`/task/${f.id}`"
@ -258,7 +306,7 @@ const hasSteps = computed(() => (task.value?.pipeline_steps?.length ?? 0) > 0)
</button> </button>
</div> </div>
<!-- Approve form --> <!-- Approve form -->
<form v-else @submit.prevent="approve" class="space-y-3"> <form v-if="!followupResults.length && !pendingActions.length" @submit.prevent="approve" class="space-y-3">
<label class="flex items-center gap-2 text-sm text-gray-300 cursor-pointer"> <label class="flex items-center gap-2 text-sm text-gray-300 cursor-pointer">
<input type="checkbox" v-model="approveForm.createFollowups" <input type="checkbox" v-model="approveForm.createFollowups"
class="rounded border-gray-600 bg-gray-800 text-blue-500" /> class="rounded border-gray-600 bg-gray-800 text-blue-500" />