Add task detail view, pipeline visualization, approve/reject workflow
API (web/api.py) — 5 new endpoints:
GET /api/tasks/{id}/pipeline — agent_logs as pipeline steps
GET /api/tasks/{id}/full — task + steps + related decisions
POST /api/tasks/{id}/approve — mark done, optionally add decision
POST /api/tasks/{id}/reject — return to pending with reason
POST /api/tasks/{id}/run — launch pipeline in background (202)
Frontend:
TaskDetail (/task/:id) — full task page with:
- Pipeline graph: role cards with icons, arrows, status colors
- Click step → expand output (pre-formatted, JSON detected)
- Action bar: Approve (with optional decision), Reject, Run Pipeline
- Polling for live pipeline updates
Dashboard: review_tasks badge ("awaiting review" in yellow)
ProjectView: task rows are now clickable links to /task/:id
Runner: output_summary no longer truncated (full output for GUI).
Models: get_project_summary includes review_tasks count.
13 new API tests, 105 total, all passing. Frontend builds clean.
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-15 14:32:29 +02:00
|
|
|
"""Tests for web/api.py — new task endpoints (pipeline, approve, reject, full)."""
|
|
|
|
|
|
|
|
|
|
import pytest
|
|
|
|
|
from pathlib import Path
|
|
|
|
|
from fastapi.testclient import TestClient
|
|
|
|
|
|
|
|
|
|
# Patch DB_PATH before importing app
|
|
|
|
|
import web.api as api_module
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
|
def client(tmp_path):
|
|
|
|
|
db_path = tmp_path / "test.db"
|
|
|
|
|
api_module.DB_PATH = db_path
|
|
|
|
|
from web.api import app
|
|
|
|
|
c = TestClient(app)
|
|
|
|
|
# Seed data
|
|
|
|
|
c.post("/api/projects", json={"id": "p1", "name": "P1", "path": "/p1"})
|
|
|
|
|
c.post("/api/tasks", json={"project_id": "p1", "title": "Fix bug"})
|
|
|
|
|
return c
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_get_task(client):
|
|
|
|
|
r = client.get("/api/tasks/P1-001")
|
|
|
|
|
assert r.status_code == 200
|
|
|
|
|
assert r.json()["title"] == "Fix bug"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_get_task_not_found(client):
|
|
|
|
|
r = client.get("/api/tasks/NOPE")
|
|
|
|
|
assert r.status_code == 404
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_task_pipeline_empty(client):
|
|
|
|
|
r = client.get("/api/tasks/P1-001/pipeline")
|
|
|
|
|
assert r.status_code == 200
|
|
|
|
|
assert r.json() == []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_task_pipeline_with_logs(client):
|
|
|
|
|
# Insert agent logs directly
|
|
|
|
|
from core.db import init_db
|
|
|
|
|
from core import models
|
|
|
|
|
conn = init_db(api_module.DB_PATH)
|
|
|
|
|
models.log_agent_run(conn, "p1", "debugger", "execute",
|
|
|
|
|
task_id="P1-001", output_summary="Found bug",
|
|
|
|
|
tokens_used=1000, duration_seconds=5, success=True)
|
|
|
|
|
models.log_agent_run(conn, "p1", "tester", "execute",
|
|
|
|
|
task_id="P1-001", output_summary="Tests pass",
|
|
|
|
|
tokens_used=500, duration_seconds=3, success=True)
|
|
|
|
|
conn.close()
|
|
|
|
|
|
|
|
|
|
r = client.get("/api/tasks/P1-001/pipeline")
|
|
|
|
|
assert r.status_code == 200
|
|
|
|
|
steps = r.json()
|
|
|
|
|
assert len(steps) == 2
|
|
|
|
|
assert steps[0]["agent_role"] == "debugger"
|
|
|
|
|
assert steps[0]["output_summary"] == "Found bug"
|
|
|
|
|
assert steps[1]["agent_role"] == "tester"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_task_full(client):
|
|
|
|
|
r = client.get("/api/tasks/P1-001/full")
|
|
|
|
|
assert r.status_code == 200
|
|
|
|
|
data = r.json()
|
|
|
|
|
assert data["id"] == "P1-001"
|
|
|
|
|
assert "pipeline_steps" in data
|
|
|
|
|
assert "related_decisions" in data
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_task_full_not_found(client):
|
|
|
|
|
r = client.get("/api/tasks/NOPE/full")
|
|
|
|
|
assert r.status_code == 404
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_approve_task(client):
|
|
|
|
|
# First set task to review
|
|
|
|
|
from core.db import init_db
|
|
|
|
|
from core import models
|
|
|
|
|
conn = init_db(api_module.DB_PATH)
|
|
|
|
|
models.update_task(conn, "P1-001", status="review")
|
|
|
|
|
conn.close()
|
|
|
|
|
|
|
|
|
|
r = client.post("/api/tasks/P1-001/approve", json={})
|
|
|
|
|
assert r.status_code == 200
|
|
|
|
|
assert r.json()["status"] == "done"
|
|
|
|
|
|
|
|
|
|
# Verify task is done
|
|
|
|
|
r = client.get("/api/tasks/P1-001")
|
|
|
|
|
assert r.json()["status"] == "done"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_approve_with_decision(client):
|
|
|
|
|
r = client.post("/api/tasks/P1-001/approve", json={
|
|
|
|
|
"decision_title": "Use AbortController",
|
|
|
|
|
"decision_description": "Fix race condition with AbortController",
|
|
|
|
|
"decision_type": "decision",
|
|
|
|
|
})
|
|
|
|
|
assert r.status_code == 200
|
|
|
|
|
assert r.json()["decision"] is not None
|
|
|
|
|
assert r.json()["decision"]["title"] == "Use AbortController"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_approve_not_found(client):
|
|
|
|
|
r = client.post("/api/tasks/NOPE/approve", json={})
|
|
|
|
|
assert r.status_code == 404
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_reject_task(client):
|
|
|
|
|
from core.db import init_db
|
|
|
|
|
from core import models
|
|
|
|
|
conn = init_db(api_module.DB_PATH)
|
|
|
|
|
models.update_task(conn, "P1-001", status="review")
|
|
|
|
|
conn.close()
|
|
|
|
|
|
|
|
|
|
r = client.post("/api/tasks/P1-001/reject", json={
|
|
|
|
|
"reason": "Didn't fix the root cause"
|
|
|
|
|
})
|
|
|
|
|
assert r.status_code == 200
|
|
|
|
|
assert r.json()["status"] == "pending"
|
|
|
|
|
|
|
|
|
|
# Verify task is pending with review reason
|
|
|
|
|
r = client.get("/api/tasks/P1-001")
|
|
|
|
|
data = r.json()
|
|
|
|
|
assert data["status"] == "pending"
|
|
|
|
|
assert data["review"]["rejected"] == "Didn't fix the root cause"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_reject_not_found(client):
|
|
|
|
|
r = client.post("/api/tasks/NOPE/reject", json={"reason": "bad"})
|
|
|
|
|
assert r.status_code == 404
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_task_pipeline_not_found(client):
|
|
|
|
|
r = client.get("/api/tasks/NOPE/pipeline")
|
|
|
|
|
assert r.status_code == 404
|
|
|
|
|
|
|
|
|
|
|
2026-03-15 15:29:05 +02:00
|
|
|
def test_running_endpoint_no_pipeline(client):
|
|
|
|
|
r = client.get("/api/tasks/P1-001/running")
|
|
|
|
|
assert r.status_code == 200
|
|
|
|
|
assert r.json()["running"] is False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_running_endpoint_with_pipeline(client):
|
|
|
|
|
from core.db import init_db
|
|
|
|
|
from core import models
|
|
|
|
|
conn = init_db(api_module.DB_PATH)
|
|
|
|
|
models.create_pipeline(conn, "P1-001", "p1", "debug",
|
|
|
|
|
[{"role": "debugger"}])
|
|
|
|
|
conn.close()
|
|
|
|
|
|
|
|
|
|
r = client.get("/api/tasks/P1-001/running")
|
|
|
|
|
assert r.status_code == 200
|
|
|
|
|
assert r.json()["running"] is True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_running_endpoint_not_found(client):
|
|
|
|
|
r = client.get("/api/tasks/NOPE/running")
|
|
|
|
|
assert r.status_code == 404
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_run_sets_in_progress(client):
|
|
|
|
|
"""POST /run should set task to in_progress immediately."""
|
|
|
|
|
r = client.post("/api/tasks/P1-001/run")
|
|
|
|
|
assert r.status_code == 202
|
|
|
|
|
|
|
|
|
|
r = client.get("/api/tasks/P1-001")
|
|
|
|
|
assert r.json()["status"] == "in_progress"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_run_not_found(client):
|
|
|
|
|
r = client.post("/api/tasks/NOPE/run")
|
|
|
|
|
assert r.status_code == 404
|
|
|
|
|
|
|
|
|
|
|
Add task detail view, pipeline visualization, approve/reject workflow
API (web/api.py) — 5 new endpoints:
GET /api/tasks/{id}/pipeline — agent_logs as pipeline steps
GET /api/tasks/{id}/full — task + steps + related decisions
POST /api/tasks/{id}/approve — mark done, optionally add decision
POST /api/tasks/{id}/reject — return to pending with reason
POST /api/tasks/{id}/run — launch pipeline in background (202)
Frontend:
TaskDetail (/task/:id) — full task page with:
- Pipeline graph: role cards with icons, arrows, status colors
- Click step → expand output (pre-formatted, JSON detected)
- Action bar: Approve (with optional decision), Reject, Run Pipeline
- Polling for live pipeline updates
Dashboard: review_tasks badge ("awaiting review" in yellow)
ProjectView: task rows are now clickable links to /task/:id
Runner: output_summary no longer truncated (full output for GUI).
Models: get_project_summary includes review_tasks count.
13 new API tests, 105 total, all passing. Frontend builds clean.
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-15 14:32:29 +02:00
|
|
|
def test_project_summary_includes_review(client):
|
|
|
|
|
from core.db import init_db
|
|
|
|
|
from core import models
|
|
|
|
|
conn = init_db(api_module.DB_PATH)
|
|
|
|
|
models.update_task(conn, "P1-001", status="review")
|
|
|
|
|
conn.close()
|
|
|
|
|
|
|
|
|
|
r = client.get("/api/projects")
|
|
|
|
|
projects = r.json()
|
|
|
|
|
assert projects[0]["review_tasks"] == 1
|