2026-03-17 22:09:29 +02:00
|
|
|
"""Regression tests for KIN-111 — two separate bug batches.
|
2026-03-17 21:25:12 +02:00
|
|
|
|
2026-03-17 22:09:29 +02:00
|
|
|
Batch A (empty/null pipeline):
|
|
|
|
|
Root cause: PM returns {"pipeline": null} or {"pipeline": []} — crashes or hangs.
|
2026-03-17 21:25:12 +02:00
|
|
|
|
2026-03-17 22:09:29 +02:00
|
|
|
Batch B (Deploy button, Settings JSON, Worktrees toggle):
|
|
|
|
|
Root cause: Vite dev server intercepts /api/* requests and returns HTML instead of
|
|
|
|
|
proxying to FastAPI. Fix: add server.proxy in vite.config.ts.
|
2026-03-17 21:25:12 +02:00
|
|
|
|
2026-03-17 22:09:29 +02:00
|
|
|
Batch A coverage:
|
2026-03-17 21:25:12 +02:00
|
|
|
(1) run_pipeline with steps=[] returns {success: False, error: 'empty_pipeline'}
|
|
|
|
|
(2) run_pipeline with steps=[] does NOT transition task to in_progress
|
|
|
|
|
(3) run_pipeline with steps=[] does NOT create a pipeline record in DB
|
|
|
|
|
(4) CLI run: PM returns {"pipeline": null} → exit(1) with error, not TypeError crash
|
|
|
|
|
(5) CLI run: PM returns {"pipeline": []} → exit(1) with error, run_pipeline not called
|
|
|
|
|
(6) run_pipeline with steps=[] — task status stays unchanged (not mutated to any other status)
|
|
|
|
|
(7) generate_followups: agent returns "[]" → {created: [], pending_actions: []}
|
|
|
|
|
(8) generate_followups: agent returns "[]" → no tasks created in DB
|
|
|
|
|
(9) generate_followups: task has no prior agent_logs → Claude still called (no early bail)
|
|
|
|
|
(10) API /followup: agent returns "[]" → needs_decision is False
|
2026-03-17 22:09:29 +02:00
|
|
|
|
|
|
|
|
Batch B coverage:
|
|
|
|
|
(11) GET /api/projects returns Content-Type: application/json (not text/html)
|
|
|
|
|
(12) PATCH /api/projects/{id} with worktrees_enabled=True → 200, not 400 Bad Request
|
|
|
|
|
(13) POST /api/projects/{id}/deploy without deploy config → 400 (button blocked correctly)
|
2026-03-18 14:06:23 +02:00
|
|
|
(14) vite.config.ts has server.proxy for /api → proxy to FastAPI (the actual fix)
|
|
|
|
|
|
|
|
|
|
KIN-P1-001 revision (empty array edge cases — deeper investigation):
|
|
|
|
|
(15) _row_to_dict: text field title='[]' must NOT be decoded to list (latent bug)
|
|
|
|
|
(16) _row_to_dict: JSON field brief='[]' must be decoded to [] list (correct behavior)
|
|
|
|
|
(17) Task round-trip with brief=[] — stored and retrieved as empty list
|
|
|
|
|
(18) Task round-trip with title='[]' — title must stay string (latent bug, needs whitelist fix)
|
|
|
|
|
(19) Task brief=None — stays None after round-trip
|
|
|
|
|
(20) Task brief=['developer'] (single-element) — round-trips correctly"""
|
2026-03-17 21:25:12 +02:00
|
|
|
|
|
|
|
|
import json
|
2026-03-18 14:06:23 +02:00
|
|
|
import sqlite3
|
2026-03-17 21:25:12 +02:00
|
|
|
|
|
|
|
|
import pytest
|
|
|
|
|
from click.testing import CliRunner
|
|
|
|
|
from unittest.mock import patch, MagicMock
|
|
|
|
|
|
|
|
|
|
from core.db import init_db
|
|
|
|
|
from core import models
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
# Shared fixture
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
|
def conn():
|
|
|
|
|
c = init_db(":memory:")
|
|
|
|
|
models.create_project(c, "proj", "Proj", "/tmp/proj", tech_stack=["python"])
|
|
|
|
|
models.create_task(c, "PROJ-001", "proj", "Fix bug", brief={"route_type": "debug"})
|
|
|
|
|
yield c
|
|
|
|
|
c.close()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
# (1/2/3) run_pipeline with steps=[] — early return, no DB side effects
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
class TestRunPipelineEmptySteps:
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner.check_claude_auth")
|
|
|
|
|
def test_empty_steps_returns_error_dict(self, mock_auth, conn):
|
|
|
|
|
"""run_pipeline with steps=[] must return {success: False, error: 'empty_pipeline'}."""
|
|
|
|
|
from agents.runner import run_pipeline
|
|
|
|
|
result = run_pipeline(conn, "PROJ-001", [])
|
|
|
|
|
assert result["success"] is False
|
|
|
|
|
assert result.get("error") == "empty_pipeline", (
|
|
|
|
|
f"Expected error='empty_pipeline', got: {result}"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner.check_claude_auth")
|
|
|
|
|
def test_empty_steps_does_not_set_task_in_progress(self, mock_auth, conn):
|
|
|
|
|
"""run_pipeline with steps=[] must NOT transition task to in_progress."""
|
|
|
|
|
from agents.runner import run_pipeline
|
|
|
|
|
run_pipeline(conn, "PROJ-001", [])
|
|
|
|
|
task = models.get_task(conn, "PROJ-001")
|
|
|
|
|
assert task["status"] != "in_progress", (
|
|
|
|
|
"Task must not be set to in_progress when pipeline has no steps"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner.check_claude_auth")
|
|
|
|
|
def test_empty_steps_does_not_create_pipeline_record(self, mock_auth, conn):
|
|
|
|
|
"""run_pipeline with steps=[] must NOT create any pipeline record in DB."""
|
|
|
|
|
from agents.runner import run_pipeline
|
|
|
|
|
run_pipeline(conn, "PROJ-001", [])
|
|
|
|
|
# No pipeline record must exist for this task
|
|
|
|
|
row = conn.execute(
|
|
|
|
|
"SELECT COUNT(*) FROM pipelines WHERE task_id = 'PROJ-001'"
|
|
|
|
|
).fetchone()
|
|
|
|
|
assert row[0] == 0, (
|
|
|
|
|
f"Expected 0 pipeline records, found {row[0]}. "
|
|
|
|
|
"run_pipeline must not persist to DB when steps=[]."
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
# (4/5) CLI run_task: PM returns null or empty pipeline
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
def _seed_db(tmp_path):
|
|
|
|
|
"""Create a real on-disk DB with test data and return its path string."""
|
|
|
|
|
db_path = tmp_path / "test.db"
|
|
|
|
|
c = init_db(str(db_path))
|
|
|
|
|
models.create_project(c, "proj", "Proj", str(tmp_path), tech_stack=["python"])
|
|
|
|
|
models.create_task(c, "PROJ-001", "proj", "Fix bug", brief={"route_type": "debug"})
|
|
|
|
|
c.close()
|
|
|
|
|
return str(db_path)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestCliRunTaskNullPipeline:
|
|
|
|
|
"""PM returns {"pipeline": null} — CLI must exit(1), not crash with TypeError."""
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner.run_pipeline")
|
|
|
|
|
@patch("agents.runner.run_agent")
|
|
|
|
|
def test_exit_code_is_1_not_exception(self, mock_run_agent, mock_run_pipeline, tmp_path):
|
|
|
|
|
"""PM returning pipeline=null → CLI exits with code 1 (not unhandled exception)."""
|
|
|
|
|
from cli.main import cli as kin_cli
|
|
|
|
|
db_path = _seed_db(tmp_path)
|
|
|
|
|
mock_run_agent.return_value = {
|
|
|
|
|
"success": True,
|
|
|
|
|
"output": json.dumps({"pipeline": None, "analysis": "nothing"}),
|
|
|
|
|
}
|
|
|
|
|
runner = CliRunner()
|
|
|
|
|
result = runner.invoke(kin_cli, ["--db", db_path, "run", "PROJ-001"])
|
|
|
|
|
assert result.exit_code == 1, (
|
|
|
|
|
f"Expected exit_code=1 for null pipeline, got {result.exit_code}"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner.run_pipeline")
|
|
|
|
|
@patch("agents.runner.run_agent")
|
|
|
|
|
def test_no_typeerror_on_null_pipeline(self, mock_run_agent, mock_run_pipeline, tmp_path):
|
|
|
|
|
"""PM returning pipeline=null must not crash with TypeError (len(None))."""
|
|
|
|
|
from cli.main import cli as kin_cli
|
|
|
|
|
db_path = _seed_db(tmp_path)
|
|
|
|
|
mock_run_agent.return_value = {
|
|
|
|
|
"success": True,
|
|
|
|
|
"output": json.dumps({"pipeline": None, "analysis": "nothing"}),
|
|
|
|
|
}
|
|
|
|
|
runner = CliRunner()
|
|
|
|
|
result = runner.invoke(kin_cli, ["--db", db_path, "run", "PROJ-001"])
|
|
|
|
|
# If a TypeError was raised, result.exception will contain it
|
|
|
|
|
if result.exception is not None:
|
|
|
|
|
assert not isinstance(result.exception, TypeError), (
|
|
|
|
|
"CLI crashed with TypeError when PM returned pipeline=null. "
|
|
|
|
|
"Missing validation in cli/main.py."
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner.run_pipeline")
|
|
|
|
|
@patch("agents.runner.run_agent")
|
|
|
|
|
def test_run_pipeline_not_called_on_null(self, mock_run_agent, mock_run_pipeline, tmp_path):
|
|
|
|
|
"""run_pipeline must NOT be called when PM returns pipeline=null."""
|
|
|
|
|
from cli.main import cli as kin_cli
|
|
|
|
|
db_path = _seed_db(tmp_path)
|
|
|
|
|
mock_run_agent.return_value = {
|
|
|
|
|
"success": True,
|
|
|
|
|
"output": json.dumps({"pipeline": None, "analysis": "nothing"}),
|
|
|
|
|
}
|
|
|
|
|
runner = CliRunner()
|
|
|
|
|
runner.invoke(kin_cli, ["--db", db_path, "run", "PROJ-001"])
|
|
|
|
|
mock_run_pipeline.assert_not_called()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestCliRunTaskEmptyPipeline:
|
|
|
|
|
"""PM returns {"pipeline": []} — CLI must exit(1), not create empty pipeline."""
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner.run_pipeline")
|
|
|
|
|
@patch("agents.runner.run_agent")
|
|
|
|
|
def test_exit_code_is_1(self, mock_run_agent, mock_run_pipeline, tmp_path):
|
|
|
|
|
"""PM returning pipeline=[] → CLI exits with code 1."""
|
|
|
|
|
from cli.main import cli as kin_cli
|
|
|
|
|
db_path = _seed_db(tmp_path)
|
|
|
|
|
mock_run_agent.return_value = {
|
|
|
|
|
"success": True,
|
|
|
|
|
"output": json.dumps({"pipeline": [], "analysis": "nothing to do"}),
|
|
|
|
|
}
|
|
|
|
|
runner = CliRunner()
|
|
|
|
|
result = runner.invoke(kin_cli, ["--db", db_path, "run", "PROJ-001"])
|
|
|
|
|
assert result.exit_code == 1, (
|
|
|
|
|
f"Expected exit_code=1 for empty pipeline, got {result.exit_code}"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner.run_pipeline")
|
|
|
|
|
@patch("agents.runner.run_agent")
|
|
|
|
|
def test_run_pipeline_not_called_on_empty(self, mock_run_agent, mock_run_pipeline, tmp_path):
|
|
|
|
|
"""run_pipeline must NOT be called when PM returns pipeline=[]."""
|
|
|
|
|
from cli.main import cli as kin_cli
|
|
|
|
|
db_path = _seed_db(tmp_path)
|
|
|
|
|
mock_run_agent.return_value = {
|
|
|
|
|
"success": True,
|
|
|
|
|
"output": json.dumps({"pipeline": [], "analysis": "nothing to do"}),
|
|
|
|
|
}
|
|
|
|
|
runner = CliRunner()
|
|
|
|
|
runner.invoke(kin_cli, ["--db", db_path, "run", "PROJ-001"])
|
|
|
|
|
mock_run_pipeline.assert_not_called()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
# (6) run_pipeline with steps=[] — task status stays at original value
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
class TestRunPipelineEmptyStepsStatusUnchanged:
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner.check_claude_auth")
|
|
|
|
|
def test_empty_steps_task_status_stays_todo(self, mock_auth, conn):
|
|
|
|
|
"""run_pipeline(steps=[]) must leave task.status unchanged (stays 'todo')."""
|
|
|
|
|
from agents.runner import run_pipeline
|
|
|
|
|
before = models.get_task(conn, "PROJ-001")["status"]
|
|
|
|
|
run_pipeline(conn, "PROJ-001", [])
|
|
|
|
|
after = models.get_task(conn, "PROJ-001")["status"]
|
|
|
|
|
assert after == before, (
|
|
|
|
|
f"Task status changed from '{before}' to '{after}' after empty pipeline. "
|
|
|
|
|
"run_pipeline must not mutate task status when steps=[]."
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
# (7/8/9) generate_followups: agent returns "[]"
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
class TestGenerateFollowupsEmptyArray:
|
|
|
|
|
"""Edge cases when the followup agent returns an empty JSON array '[]'."""
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_agent_returns_empty_array_gives_empty_result(self, mock_claude, conn):
|
|
|
|
|
"""generate_followups: agent returning '[]' → {created: [], pending_actions: []}."""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
mock_claude.return_value = {"output": "[]", "returncode": 0}
|
|
|
|
|
result = generate_followups(conn, "PROJ-001")
|
|
|
|
|
assert result["created"] == [], (
|
|
|
|
|
f"Expected created=[], got: {result['created']}"
|
|
|
|
|
)
|
|
|
|
|
assert result["pending_actions"] == [], (
|
|
|
|
|
f"Expected pending_actions=[], got: {result['pending_actions']}"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_agent_returns_empty_array_creates_no_tasks_in_db(self, mock_claude, conn):
|
|
|
|
|
"""generate_followups: agent returning '[]' must not create any task in DB."""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
mock_claude.return_value = {"output": "[]", "returncode": 0}
|
|
|
|
|
tasks_before = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
generate_followups(conn, "PROJ-001")
|
|
|
|
|
tasks_after = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
assert tasks_after == tasks_before, (
|
|
|
|
|
f"Expected no new tasks, but count went from {tasks_before} to {tasks_after}. "
|
|
|
|
|
"generate_followups must not create tasks when agent returns []."
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_no_pipeline_history_still_calls_claude(self, mock_claude, conn):
|
|
|
|
|
"""generate_followups: task with no agent_logs must still invoke Claude (no early bail)."""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
# Verify there are no agent_logs for this task
|
|
|
|
|
log_count = conn.execute(
|
|
|
|
|
"SELECT COUNT(*) FROM agent_logs WHERE task_id = 'PROJ-001'"
|
|
|
|
|
).fetchone()[0]
|
|
|
|
|
assert log_count == 0, "Precondition: no agent logs must exist"
|
|
|
|
|
mock_claude.return_value = {"output": "[]", "returncode": 0}
|
|
|
|
|
generate_followups(conn, "PROJ-001")
|
|
|
|
|
mock_claude.assert_called_once(), (
|
|
|
|
|
"Claude must be called even when there is no prior pipeline history. "
|
|
|
|
|
"The early-return 'if not pipeline_output' must be removed."
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
# (10) API /followup: needs_decision=False when agent returns []
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
class TestApiFollowupEmptyArrayNeedsDecision:
|
|
|
|
|
"""POST /api/tasks/{id}/followup: needs_decision must be False when agent returns []."""
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_needs_decision_false_when_empty_array(self, mock_claude, tmp_path):
|
|
|
|
|
"""API: agent returning '[]' → needs_decision is False in response."""
|
|
|
|
|
import web.api as api_module
|
|
|
|
|
db_path = tmp_path / "test.db"
|
|
|
|
|
api_module.DB_PATH = db_path
|
|
|
|
|
from web.api import app
|
|
|
|
|
from fastapi.testclient import TestClient
|
|
|
|
|
|
|
|
|
|
mock_claude.return_value = {"output": "[]", "returncode": 0}
|
|
|
|
|
c = TestClient(app)
|
|
|
|
|
c.post("/api/projects", json={"id": "p1", "name": "P1", "path": "/p1"})
|
|
|
|
|
c.post("/api/tasks", json={"project_id": "p1", "title": "Fix bug"})
|
|
|
|
|
|
|
|
|
|
r = c.post("/api/tasks/P1-001/followup", json={})
|
|
|
|
|
assert r.status_code == 200
|
|
|
|
|
data = r.json()
|
|
|
|
|
assert data["needs_decision"] is False, (
|
|
|
|
|
f"Expected needs_decision=False when agent returns [], got: {data['needs_decision']}"
|
|
|
|
|
)
|
|
|
|
|
assert data["created"] == []
|
|
|
|
|
assert data["pending_actions"] == []
|
2026-03-17 21:30:57 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
# Additional edge cases — deeper investigation
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
class TestRunPipelineNoneSteps:
|
|
|
|
|
"""run_pipeline with steps=None — must also return empty_pipeline, not crash."""
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner.check_claude_auth")
|
|
|
|
|
def test_none_steps_returns_empty_pipeline_error(self, mock_auth, conn):
|
|
|
|
|
"""run_pipeline(steps=None) must return {success: False, error: 'empty_pipeline'}."""
|
|
|
|
|
from agents.runner import run_pipeline
|
|
|
|
|
result = run_pipeline(conn, "PROJ-001", None)
|
|
|
|
|
assert result["success"] is False
|
|
|
|
|
assert result.get("error") == "empty_pipeline", (
|
|
|
|
|
f"Expected error='empty_pipeline' for None steps, got: {result}"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner.check_claude_auth")
|
|
|
|
|
def test_none_steps_does_not_mutate_task(self, mock_auth, conn):
|
|
|
|
|
"""run_pipeline(steps=None) must not change task status."""
|
|
|
|
|
from agents.runner import run_pipeline
|
|
|
|
|
before = models.get_task(conn, "PROJ-001")["status"]
|
|
|
|
|
run_pipeline(conn, "PROJ-001", None)
|
|
|
|
|
after = models.get_task(conn, "PROJ-001")["status"]
|
|
|
|
|
assert after == before, (
|
|
|
|
|
f"Task status changed from '{before}' to '{after}' after None steps"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestRunPipelineEmptyStepsDryRun:
|
|
|
|
|
"""run_pipeline(steps=[], dry_run=True) — must bail before auth check."""
|
|
|
|
|
|
|
|
|
|
def test_empty_steps_dry_run_returns_error_without_auth(self, conn):
|
|
|
|
|
"""run_pipeline(steps=[], dry_run=True) must return early without auth check."""
|
|
|
|
|
from agents.runner import run_pipeline
|
|
|
|
|
# No @patch for check_claude_auth — if auth is called, it may raise; empty guard must fire first
|
|
|
|
|
result = run_pipeline(conn, "PROJ-001", [], dry_run=True)
|
|
|
|
|
assert result["success"] is False
|
|
|
|
|
assert result.get("error") == "empty_pipeline"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestCliRunTaskNonListPipeline:
|
|
|
|
|
"""PM returns pipeline as a non-list non-null value (dict or string) — CLI must exit(1)."""
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner.run_pipeline")
|
|
|
|
|
@patch("agents.runner.run_agent")
|
|
|
|
|
def test_dict_pipeline_exits_1(self, mock_run_agent, mock_run_pipeline, tmp_path):
|
|
|
|
|
"""PM returning pipeline={} (dict) → CLI exits 1."""
|
|
|
|
|
from cli.main import cli as kin_cli
|
|
|
|
|
db_path = _seed_db(tmp_path)
|
|
|
|
|
mock_run_agent.return_value = {
|
|
|
|
|
"success": True,
|
|
|
|
|
"output": json.dumps({"pipeline": {"steps": []}, "analysis": "..."}),
|
|
|
|
|
}
|
|
|
|
|
runner = CliRunner()
|
|
|
|
|
result = runner.invoke(kin_cli, ["--db", db_path, "run", "PROJ-001"])
|
|
|
|
|
assert result.exit_code == 1, (
|
|
|
|
|
f"Expected exit_code=1 for dict pipeline, got {result.exit_code}"
|
|
|
|
|
)
|
|
|
|
|
mock_run_pipeline.assert_not_called()
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner.run_pipeline")
|
|
|
|
|
@patch("agents.runner.run_agent")
|
|
|
|
|
def test_string_pipeline_exits_1(self, mock_run_agent, mock_run_pipeline, tmp_path):
|
|
|
|
|
"""PM returning pipeline='[]' (JSON-string-encoded) → CLI exits 1."""
|
|
|
|
|
from cli.main import cli as kin_cli
|
|
|
|
|
db_path = _seed_db(tmp_path)
|
|
|
|
|
mock_run_agent.return_value = {
|
|
|
|
|
"success": True,
|
|
|
|
|
"output": json.dumps({"pipeline": "[]", "analysis": "..."}),
|
|
|
|
|
}
|
|
|
|
|
runner = CliRunner()
|
|
|
|
|
result = runner.invoke(kin_cli, ["--db", db_path, "run", "PROJ-001"])
|
|
|
|
|
assert result.exit_code == 1, (
|
|
|
|
|
f"Expected exit_code=1 for string pipeline, got {result.exit_code}"
|
|
|
|
|
)
|
|
|
|
|
mock_run_pipeline.assert_not_called()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestGenerateFollowupsNullAndDict:
|
|
|
|
|
"""Additional generate_followups edge cases: null output, dict with empty tasks."""
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_agent_returns_null_gives_empty_result(self, mock_claude, conn):
|
|
|
|
|
"""generate_followups: agent returning 'null' → {created: [], pending_actions: []}."""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
mock_claude.return_value = {"output": "null", "returncode": 0}
|
|
|
|
|
result = generate_followups(conn, "PROJ-001")
|
|
|
|
|
assert result["created"] == [], f"Expected created=[], got: {result['created']}"
|
|
|
|
|
assert result["pending_actions"] == []
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_agent_returns_null_creates_no_tasks(self, mock_claude, conn):
|
|
|
|
|
"""generate_followups: agent returning 'null' must not create any tasks."""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
mock_claude.return_value = {"output": "null", "returncode": 0}
|
|
|
|
|
tasks_before = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
generate_followups(conn, "PROJ-001")
|
|
|
|
|
tasks_after = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
assert tasks_after == tasks_before
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_agent_returns_dict_with_empty_tasks_list(self, mock_claude, conn):
|
|
|
|
|
"""generate_followups: agent returning {"tasks": []} → empty result, no tasks created."""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
mock_claude.return_value = {"output": '{"tasks": []}', "returncode": 0}
|
|
|
|
|
tasks_before = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
result = generate_followups(conn, "PROJ-001")
|
|
|
|
|
tasks_after = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
assert result["created"] == []
|
|
|
|
|
assert tasks_after == tasks_before
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_agent_returns_empty_string_gives_empty_result(self, mock_claude, conn):
|
|
|
|
|
"""generate_followups: agent returning '' (empty string) → {created: [], pending_actions: []}."""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
mock_claude.return_value = {"output": "", "returncode": 0}
|
|
|
|
|
result = generate_followups(conn, "PROJ-001")
|
|
|
|
|
assert result["created"] == [], (
|
|
|
|
|
f"Expected created=[] for empty string output, got: {result['created']}"
|
|
|
|
|
)
|
|
|
|
|
assert result["pending_actions"] == []
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_agent_returns_whitespace_wrapped_empty_array(self, mock_claude, conn):
|
|
|
|
|
"""generate_followups: agent returning ' [] ' (whitespace-wrapped) → no tasks created."""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
mock_claude.return_value = {"output": " [] ", "returncode": 0}
|
|
|
|
|
result = generate_followups(conn, "PROJ-001")
|
|
|
|
|
assert result["created"] == [], (
|
|
|
|
|
f"Expected created=[] for whitespace-wrapped '[]', got: {result['created']}"
|
|
|
|
|
)
|
|
|
|
|
assert result["pending_actions"] == []
|
2026-03-17 22:09:29 +02:00
|
|
|
|
2026-03-18 14:06:23 +02:00
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_dict_tasks_empty_followups_nonempty_does_not_create_tasks(self, mock_claude, conn):
|
|
|
|
|
"""generate_followups: {"tasks": [], "followups": [{"title": "X"}]} → no tasks created.
|
|
|
|
|
|
|
|
|
|
This tests the deeper edge case from KIN-P1-001 revision:
|
|
|
|
|
when tasks=[] (empty list is falsy), the `or` logic in line 129 of followup.py
|
|
|
|
|
incorrectly falls through to followups and creates tasks.
|
|
|
|
|
|
|
|
|
|
BUG: `parsed.get('tasks') or parsed.get('followups') or []`
|
|
|
|
|
— [] is falsy, so followups is used even though tasks was explicitly set.
|
|
|
|
|
|
|
|
|
|
Fix: use explicit key-presence check instead of truthiness check.
|
|
|
|
|
"""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
mock_claude.return_value = {
|
|
|
|
|
"output": '{"tasks": [], "followups": [{"title": "Follow-up task", "type": "backend_dev"}]}',
|
|
|
|
|
"returncode": 0,
|
|
|
|
|
}
|
|
|
|
|
tasks_before = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
result = generate_followups(conn, "PROJ-001")
|
|
|
|
|
tasks_after = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
assert result["created"] == [], (
|
|
|
|
|
f"Expected created=[] when tasks=[] (even with non-empty followups), got: {result['created']}. "
|
|
|
|
|
"Bug in followup.py line 129: empty list is falsy, so `or` falls through to followups key."
|
|
|
|
|
)
|
|
|
|
|
assert tasks_after == tasks_before, (
|
|
|
|
|
f"Task count changed from {tasks_before} to {tasks_after}. "
|
|
|
|
|
"No tasks must be created when tasks key is explicitly empty."
|
|
|
|
|
)
|
|
|
|
|
|
2026-03-17 22:09:29 +02:00
|
|
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
# Batch B — (11/12/13/14) Deploy button, Settings JSON, Worktrees toggle
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
|
def api_client(tmp_path):
|
|
|
|
|
"""TestClient with isolated DB, seeded project."""
|
|
|
|
|
import web.api as api_module
|
|
|
|
|
api_module.DB_PATH = tmp_path / "test.db"
|
|
|
|
|
from web.api import app
|
|
|
|
|
from fastapi.testclient import TestClient
|
|
|
|
|
c = TestClient(app)
|
|
|
|
|
c.post("/api/projects", json={"id": "p1", "name": "P1", "path": "/p1"})
|
|
|
|
|
return c
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestSettingsJsonResponse:
|
|
|
|
|
"""(11) GET /api/projects must return JSON content-type, not HTML."""
|
|
|
|
|
|
|
|
|
|
def test_get_projects_content_type_is_json(self, api_client):
|
|
|
|
|
"""GET /api/projects → Content-Type must be application/json."""
|
|
|
|
|
r = api_client.get("/api/projects")
|
|
|
|
|
assert r.status_code == 200, f"Expected 200, got {r.status_code}"
|
|
|
|
|
ct = r.headers.get("content-type", "")
|
|
|
|
|
assert "application/json" in ct, (
|
|
|
|
|
f"Expected content-type=application/json, got: {ct!r}. "
|
|
|
|
|
"If Vite is serving HTML for /api/* requests, the proxy is not configured."
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def test_get_projects_returns_list_not_html(self, api_client):
|
|
|
|
|
"""GET /api/projects → body must be a JSON list, not an HTML string."""
|
|
|
|
|
r = api_client.get("/api/projects")
|
|
|
|
|
assert r.status_code == 200
|
|
|
|
|
data = r.json() # raises JSONDecodeError if HTML was returned
|
|
|
|
|
assert isinstance(data, list), f"Expected list, got: {type(data).__name__}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestWorktreesTogglePatch:
|
|
|
|
|
"""(12) PATCH /api/projects/{id} with worktrees_enabled=True must return 200."""
|
|
|
|
|
|
|
|
|
|
def test_patch_worktrees_enabled_true_returns_200(self, api_client):
|
|
|
|
|
"""PATCH worktrees_enabled=True → 200, not 400 Bad Request."""
|
|
|
|
|
r = api_client.patch("/api/projects/p1", json={"worktrees_enabled": True})
|
|
|
|
|
assert r.status_code == 200, (
|
|
|
|
|
f"Expected 200 for worktrees_enabled patch, got {r.status_code}: {r.text}"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def test_patch_worktrees_enabled_true_persists(self, api_client):
|
|
|
|
|
"""PATCH worktrees_enabled=True → project reflects the change."""
|
|
|
|
|
api_client.patch("/api/projects/p1", json={"worktrees_enabled": True})
|
|
|
|
|
r = api_client.get("/api/projects/p1")
|
|
|
|
|
assert r.status_code == 200
|
|
|
|
|
assert r.json()["worktrees_enabled"], (
|
|
|
|
|
"worktrees_enabled must be truthy after PATCH"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def test_patch_worktrees_enabled_false_returns_200(self, api_client):
|
|
|
|
|
"""PATCH worktrees_enabled=False → 200, not 400 Bad Request."""
|
|
|
|
|
r = api_client.patch("/api/projects/p1", json={"worktrees_enabled": False})
|
|
|
|
|
assert r.status_code == 200, (
|
|
|
|
|
f"Expected 200 for worktrees_enabled=False patch, got {r.status_code}: {r.text}"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestDeployButtonBackend:
|
|
|
|
|
"""(13) Deploy endpoint must return 400 when no deploy config is set."""
|
|
|
|
|
|
|
|
|
|
def test_deploy_without_config_returns_400(self, api_client):
|
|
|
|
|
"""POST /deploy on unconfigured project → 400 (Deploy button correctly blocked)."""
|
|
|
|
|
r = api_client.post("/api/projects/p1/deploy")
|
|
|
|
|
assert r.status_code == 400, (
|
|
|
|
|
f"Expected 400 when neither deploy_runtime nor deploy_command is set, got {r.status_code}"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def test_deploy_not_found_returns_404(self, api_client):
|
|
|
|
|
"""POST /deploy on unknown project → 404."""
|
|
|
|
|
r = api_client.post("/api/projects/NOPE/deploy")
|
|
|
|
|
assert r.status_code == 404
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestViteProxyConfig:
|
|
|
|
|
"""(14) vite.config.ts must have server.proxy configured for /api.
|
|
|
|
|
|
|
|
|
|
This is the actual fix for KIN-111: without the proxy, Vite serves its
|
|
|
|
|
own HTML for /api/* requests in dev mode, causing JSON parse errors.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
def test_vite_config_has_api_proxy(self):
|
|
|
|
|
"""vite.config.ts must define server.proxy that includes '/api'."""
|
|
|
|
|
import pathlib
|
|
|
|
|
config_path = pathlib.Path(__file__).parent.parent / "web" / "frontend" / "vite.config.ts"
|
|
|
|
|
assert config_path.exists(), f"vite.config.ts not found at {config_path}"
|
|
|
|
|
content = config_path.read_text()
|
|
|
|
|
assert "proxy" in content, (
|
|
|
|
|
"vite.config.ts has no 'proxy' config. "
|
|
|
|
|
"Add server: { proxy: { '/api': 'http://localhost:8000' } } to fix "
|
|
|
|
|
"the Unexpected token '<' error in Settings and the Bad Request on Worktrees toggle."
|
|
|
|
|
)
|
|
|
|
|
assert "/api" in content or "'/api'" in content or '"/api"' in content, (
|
|
|
|
|
"vite.config.ts proxy must include '/api' route to FastAPI backend."
|
|
|
|
|
)
|
2026-03-18 14:06:23 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
# KIN-P1-001 revision — _row_to_dict latent bug: text fields decoded as JSON
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
def _make_sqlite_row(conn_in_memory, **kwargs):
|
|
|
|
|
"""Insert a row into a temp table and return it as sqlite3.Row."""
|
|
|
|
|
conn_in_memory.execute(
|
|
|
|
|
"CREATE TABLE IF NOT EXISTS _tmp_row_test (id TEXT, title TEXT, brief TEXT, description TEXT)"
|
|
|
|
|
)
|
|
|
|
|
conn_in_memory.execute(
|
|
|
|
|
"INSERT INTO _tmp_row_test VALUES (:id, :title, :brief, :description)",
|
|
|
|
|
{
|
|
|
|
|
"id": kwargs.get("id", "x"),
|
|
|
|
|
"title": kwargs.get("title", None),
|
|
|
|
|
"brief": kwargs.get("brief", None),
|
|
|
|
|
"description": kwargs.get("description", None),
|
|
|
|
|
},
|
|
|
|
|
)
|
|
|
|
|
row = conn_in_memory.execute(
|
|
|
|
|
"SELECT * FROM _tmp_row_test WHERE id = :id", {"id": kwargs.get("id", "x")}
|
|
|
|
|
).fetchone()
|
|
|
|
|
conn_in_memory.execute("DELETE FROM _tmp_row_test WHERE id = :id", {"id": kwargs.get("id", "x")})
|
|
|
|
|
return row
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
|
def raw_conn():
|
|
|
|
|
"""Bare sqlite3 in-memory connection with row_factory (no kin schema needed)."""
|
|
|
|
|
c = sqlite3.connect(":memory:")
|
|
|
|
|
c.row_factory = sqlite3.Row
|
|
|
|
|
yield c
|
|
|
|
|
c.close()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestRowToDictJsonWhitelist:
|
|
|
|
|
"""(15/16) _row_to_dict JSON decoding whitelist behavior.
|
|
|
|
|
|
|
|
|
|
Latent bug: _row_to_dict decodes ALL string fields starting with '[' or '{'
|
|
|
|
|
through JSON, including text fields (title, description, name).
|
|
|
|
|
Expected fix: only decode fields in a _JSON_COLUMNS whitelist.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
def test_text_title_with_empty_array_stays_string(self, raw_conn):
|
|
|
|
|
"""_row_to_dict: title='[]' must stay '[]' (string), not be decoded to [].
|
|
|
|
|
|
|
|
|
|
This test FAILS without the _JSON_COLUMNS whitelist fix in core/models.py.
|
|
|
|
|
"""
|
|
|
|
|
from core.models import _row_to_dict
|
|
|
|
|
row = _make_sqlite_row(raw_conn, title="[]", brief=None)
|
|
|
|
|
result = _row_to_dict(row)
|
|
|
|
|
assert isinstance(result["title"], str), (
|
|
|
|
|
f"Expected title to be str '[]', got {type(result['title'])}: {result['title']!r}. "
|
|
|
|
|
"Latent bug: _row_to_dict decodes text fields as JSON. "
|
|
|
|
|
"Fix: introduce _JSON_COLUMNS whitelist in core/models.py."
|
|
|
|
|
)
|
|
|
|
|
assert result["title"] == "[]"
|
|
|
|
|
|
|
|
|
|
def test_brief_empty_array_decoded_to_list(self, raw_conn):
|
|
|
|
|
"""_row_to_dict: brief='[]' must be decoded to [] list (it is a JSON column)."""
|
|
|
|
|
from core.models import _row_to_dict
|
|
|
|
|
row = _make_sqlite_row(raw_conn, title="Normal title", brief="[]")
|
|
|
|
|
result = _row_to_dict(row)
|
|
|
|
|
assert isinstance(result["brief"], list), (
|
|
|
|
|
f"Expected brief to be list [], got {type(result['brief'])}: {result['brief']!r}"
|
|
|
|
|
)
|
|
|
|
|
assert result["brief"] == []
|
|
|
|
|
|
|
|
|
|
def test_description_with_bracket_text_stays_string(self, raw_conn):
|
|
|
|
|
"""_row_to_dict: description='[deprecated]' must stay string (invalid JSON, not decoded)."""
|
|
|
|
|
from core.models import _row_to_dict
|
|
|
|
|
row = _make_sqlite_row(raw_conn, title="T", description="[deprecated]")
|
|
|
|
|
result = _row_to_dict(row)
|
|
|
|
|
assert isinstance(result["description"], str)
|
|
|
|
|
assert result["description"] == "[deprecated]"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestTaskModelEmptyArrayEdgeCases:
|
|
|
|
|
"""(17/18/19/20) End-to-end task creation/retrieval with empty array edge cases."""
|
|
|
|
|
|
|
|
|
|
def test_task_with_empty_brief_list_round_trips(self, conn):
|
|
|
|
|
"""(17) create_task(brief=[]) → get_task returns brief as [] list."""
|
|
|
|
|
models.create_task(conn, "PROJ-002", "proj", "Empty brief task", brief=[])
|
|
|
|
|
task = models.get_task(conn, "PROJ-002")
|
|
|
|
|
assert isinstance(task["brief"], list), (
|
|
|
|
|
f"Expected brief to be list, got: {type(task['brief'])}"
|
|
|
|
|
)
|
|
|
|
|
assert task["brief"] == [], (
|
|
|
|
|
f"Expected brief=[], got: {task['brief']!r}"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def test_task_with_json_looking_title_preserved(self, conn):
|
|
|
|
|
"""(18) create_task(title='[]') → get_task returns title as '[]' string, not list.
|
|
|
|
|
|
|
|
|
|
This test FAILS without the _JSON_COLUMNS whitelist fix in core/models.py.
|
|
|
|
|
Without the fix, _row_to_dict converts title='[]' (str) to [] (list).
|
|
|
|
|
"""
|
|
|
|
|
models.create_task(conn, "PROJ-003", "proj", "[]")
|
|
|
|
|
task = models.get_task(conn, "PROJ-003")
|
|
|
|
|
assert isinstance(task["title"], str), (
|
|
|
|
|
f"Expected task['title'] to be str '[]', got {type(task['title'])}: {task['title']!r}. "
|
|
|
|
|
"Latent bug in _row_to_dict: text field decoded as JSON. "
|
|
|
|
|
"Fix: introduce _JSON_COLUMNS whitelist in core/models.py."
|
|
|
|
|
)
|
|
|
|
|
assert task["title"] == "[]"
|
|
|
|
|
|
|
|
|
|
def test_task_brief_none_stays_none(self, conn):
|
|
|
|
|
"""(19) create_task without brief → get_task returns brief=None."""
|
|
|
|
|
models.create_task(conn, "PROJ-004", "proj", "No brief task")
|
|
|
|
|
task = models.get_task(conn, "PROJ-004")
|
|
|
|
|
assert task["brief"] is None, (
|
|
|
|
|
f"Expected brief=None, got: {task['brief']!r}"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def test_task_with_single_element_brief_round_trips(self, conn):
|
|
|
|
|
"""(20) create_task(brief=['developer']) → get_task returns brief=['developer']."""
|
|
|
|
|
models.create_task(conn, "PROJ-005", "proj", "Single step task", brief=["developer"])
|
|
|
|
|
task = models.get_task(conn, "PROJ-005")
|
|
|
|
|
assert isinstance(task["brief"], list), (
|
|
|
|
|
f"Expected brief to be list, got: {type(task['brief'])}"
|
|
|
|
|
)
|
|
|
|
|
assert task["brief"] == ["developer"], (
|
|
|
|
|
f"Expected brief=['developer'], got: {task['brief']!r}"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
# KIN-P1-001 deeper investigation — additional empty array edge cases
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestTryParseJsonEmptyArrayCases:
|
|
|
|
|
"""Direct unit tests for _try_parse_json with empty array edge cases.
|
|
|
|
|
|
|
|
|
|
_try_parse_json is used by followup.py and runner.py to parse all agent output.
|
|
|
|
|
Its correct handling of empty arrays is critical for the empty-array bug fix.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
def test_empty_array_string_returns_list(self):
|
|
|
|
|
"""_try_parse_json('[]') → [] (empty list)."""
|
|
|
|
|
from agents.runner import _try_parse_json
|
|
|
|
|
result = _try_parse_json("[]")
|
|
|
|
|
assert result == [], f"Expected [], got {result!r}"
|
|
|
|
|
assert isinstance(result, list)
|
|
|
|
|
|
|
|
|
|
def test_whitespace_wrapped_empty_array_returns_list(self):
|
|
|
|
|
"""_try_parse_json(' [] ') → [] (strips whitespace then parses)."""
|
|
|
|
|
from agents.runner import _try_parse_json
|
|
|
|
|
result = _try_parse_json(" [] ")
|
|
|
|
|
assert result == [], f"Expected [] for whitespace-wrapped '[]', got {result!r}"
|
|
|
|
|
|
|
|
|
|
def test_empty_string_returns_none(self):
|
|
|
|
|
"""_try_parse_json('') → None (empty input = no result)."""
|
|
|
|
|
from agents.runner import _try_parse_json
|
|
|
|
|
result = _try_parse_json("")
|
|
|
|
|
assert result is None, f"Expected None for empty string, got {result!r}"
|
|
|
|
|
|
|
|
|
|
def test_null_string_returns_none(self):
|
|
|
|
|
"""_try_parse_json('null') → None (JSON null decodes to Python None)."""
|
|
|
|
|
from agents.runner import _try_parse_json
|
|
|
|
|
result = _try_parse_json("null")
|
|
|
|
|
assert result is None, f"Expected None for 'null', got {result!r}"
|
|
|
|
|
|
|
|
|
|
def test_empty_dict_string_returns_dict(self):
|
|
|
|
|
"""_try_parse_json('{}') → {} (empty dict)."""
|
|
|
|
|
from agents.runner import _try_parse_json
|
|
|
|
|
result = _try_parse_json("{}")
|
|
|
|
|
assert result == {}, f"Expected {{}}, got {result!r}"
|
|
|
|
|
assert isinstance(result, dict)
|
|
|
|
|
|
|
|
|
|
def test_empty_array_in_markdown_fence_returns_list(self):
|
|
|
|
|
"""_try_parse_json with ```json\\n[]\\n``` code fence → []."""
|
|
|
|
|
from agents.runner import _try_parse_json
|
|
|
|
|
result = _try_parse_json("```json\n[]\n```")
|
|
|
|
|
assert result == [], f"Expected [] from markdown fence, got {result!r}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestRowToDictEmptyDictAndTextFields:
|
|
|
|
|
"""Deeper _row_to_dict tests: empty dict JSON column, text field with dict-like content."""
|
|
|
|
|
|
|
|
|
|
def test_brief_empty_dict_decoded(self, raw_conn):
|
|
|
|
|
"""_row_to_dict: brief='{}' → decoded to {} (empty dict, JSON column)."""
|
|
|
|
|
from core.models import _row_to_dict
|
|
|
|
|
row = _make_sqlite_row(raw_conn, brief="{}")
|
|
|
|
|
result = _row_to_dict(row)
|
|
|
|
|
assert isinstance(result["brief"], dict), (
|
|
|
|
|
f"Expected brief to be dict {{}}, got {type(result['brief'])}: {result['brief']!r}"
|
|
|
|
|
)
|
|
|
|
|
assert result["brief"] == {}
|
|
|
|
|
|
|
|
|
|
def test_title_with_dict_content_stays_string(self, raw_conn):
|
|
|
|
|
"""_row_to_dict: title='{\"key\": \"val\"}' must stay string (not in _JSON_COLUMNS)."""
|
|
|
|
|
from core.models import _row_to_dict
|
|
|
|
|
row = _make_sqlite_row(raw_conn, title='{"key": "val"}')
|
|
|
|
|
result = _row_to_dict(row)
|
|
|
|
|
assert isinstance(result["title"], str), (
|
|
|
|
|
f"Expected title to remain str, got {type(result['title'])}: {result['title']!r}. "
|
|
|
|
|
"title is not in _JSON_COLUMNS and must never be decoded."
|
|
|
|
|
)
|
|
|
|
|
assert result["title"] == '{"key": "val"}'
|
|
|
|
|
|
|
|
|
|
def test_description_with_dict_content_stays_string(self, raw_conn):
|
|
|
|
|
"""_row_to_dict: description='{\"a\": 1}' must stay string (not in _JSON_COLUMNS)."""
|
|
|
|
|
from core.models import _row_to_dict
|
|
|
|
|
row = _make_sqlite_row(raw_conn, description='{"a": 1}')
|
|
|
|
|
result = _row_to_dict(row)
|
|
|
|
|
assert isinstance(result["description"], str), (
|
|
|
|
|
f"Expected description to remain str, got {type(result['description'])}"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestTaskBriefEmptyDictRoundTrip:
|
|
|
|
|
"""create_task/get_task round-trip with empty dict brief."""
|
|
|
|
|
|
|
|
|
|
def test_task_with_empty_dict_brief_round_trips(self, conn):
|
|
|
|
|
"""create_task(brief={}) → get_task returns brief as {} dict."""
|
|
|
|
|
models.create_task(conn, "PROJ-006", "proj", "Empty dict brief", brief={})
|
|
|
|
|
task = models.get_task(conn, "PROJ-006")
|
|
|
|
|
assert isinstance(task["brief"], dict), (
|
|
|
|
|
f"Expected brief to be dict, got: {type(task['brief'])}"
|
|
|
|
|
)
|
|
|
|
|
assert task["brief"] == {}, f"Expected brief={{}}, got: {task['brief']!r}"
|
|
|
|
|
|
|
|
|
|
def test_project_tech_stack_empty_list_round_trips(self, conn):
|
|
|
|
|
"""create_project(tech_stack=[]) → get_project returns tech_stack as [] list."""
|
|
|
|
|
models.create_project(conn, "proj2", "Proj2", "/tmp/proj2", tech_stack=[])
|
|
|
|
|
project = models.get_project(conn, "proj2")
|
|
|
|
|
assert isinstance(project["tech_stack"], list), (
|
|
|
|
|
f"Expected tech_stack to be list, got: {type(project['tech_stack'])}"
|
|
|
|
|
)
|
|
|
|
|
assert project["tech_stack"] == [], (
|
|
|
|
|
f"Expected tech_stack=[], got: {project['tech_stack']!r}"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def test_project_name_with_brackets_stays_string(self, conn):
|
|
|
|
|
"""create_project(name='[Proj]') → get_project returns name as string, not list."""
|
|
|
|
|
models.create_project(conn, "proj3", "[Proj]", "/tmp/proj3")
|
|
|
|
|
project = models.get_project(conn, "proj3")
|
|
|
|
|
assert isinstance(project["name"], str), (
|
|
|
|
|
f"Expected name to be str '[Proj]', got {type(project['name'])}: {project['name']!r}. "
|
|
|
|
|
"name is a text field and must never be JSON-decoded."
|
|
|
|
|
)
|
|
|
|
|
assert project["name"] == "[Proj]"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestGenerateFollowupsEmptyFollowupsKey:
|
|
|
|
|
"""generate_followups deeper edge cases: empty followups key, items without title."""
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_dict_with_empty_followups_key_creates_no_tasks(self, mock_claude, conn):
|
|
|
|
|
"""generate_followups: {"followups": []} → no tasks, empty result."""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
mock_claude.return_value = {"output": '{"followups": []}', "returncode": 0}
|
|
|
|
|
tasks_before = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
result = generate_followups(conn, "PROJ-001")
|
|
|
|
|
tasks_after = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
assert result["created"] == [], (
|
|
|
|
|
f"Expected created=[], got: {result['created']}"
|
|
|
|
|
)
|
|
|
|
|
assert tasks_after == tasks_before, (
|
|
|
|
|
f"Task count changed from {tasks_before} to {tasks_after}"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_list_with_items_missing_title_creates_no_tasks(self, mock_claude, conn):
|
|
|
|
|
"""generate_followups: [{"type": "backend_dev"}] (no title) → items skipped, no tasks."""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
mock_claude.return_value = {
|
|
|
|
|
"output": '[{"type": "backend_dev", "brief": "Do something"}]',
|
|
|
|
|
"returncode": 0,
|
|
|
|
|
}
|
|
|
|
|
tasks_before = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
result = generate_followups(conn, "PROJ-001")
|
|
|
|
|
tasks_after = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
assert result["created"] == [], (
|
|
|
|
|
f"Expected created=[] for items missing 'title', got: {result['created']}. "
|
|
|
|
|
"Items without 'title' key must be skipped."
|
|
|
|
|
)
|
|
|
|
|
assert tasks_after == tasks_before
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_list_with_mixed_valid_and_titleless_items(self, mock_claude, conn):
|
|
|
|
|
"""generate_followups: one valid item + one without title → only valid one creates task."""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
mock_claude.return_value = {
|
|
|
|
|
"output": '[{"title": "Valid task", "type": "backend_dev"}, {"type": "frontend_dev"}]',
|
|
|
|
|
"returncode": 0,
|
|
|
|
|
}
|
|
|
|
|
tasks_before = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
result = generate_followups(conn, "PROJ-001")
|
|
|
|
|
tasks_after = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
assert len(result["created"]) == 1, (
|
|
|
|
|
f"Expected 1 task created (item with title), got: {len(result['created'])}"
|
|
|
|
|
)
|
|
|
|
|
assert tasks_after == tasks_before + 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
# KIN-P1-001 deeper investigation — null-valued dict keys
|
|
|
|
|
#
|
|
|
|
|
# Bug: followup.py extracts `parsed["tasks"]` or `parsed["followups"]` from dict,
|
|
|
|
|
# but does NOT check if the extracted value is None/non-iterable.
|
|
|
|
|
# `for item in None:` → TypeError: 'NoneType' object is not iterable
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestGenerateFollowupsNullValuedDictKeys:
|
|
|
|
|
"""generate_followups: dict with null tasks/followups values — must not crash.
|
|
|
|
|
|
|
|
|
|
Bug: when agent returns {"tasks": null} or {"followups": null}, the code does:
|
|
|
|
|
parsed = parsed["tasks"] # parsed = None
|
|
|
|
|
for item in parsed: # TypeError: 'NoneType' is not iterable
|
|
|
|
|
|
|
|
|
|
Fix: guard against None after extraction, e.g. `parsed = parsed["tasks"] or []`
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_dict_with_null_tasks_key_returns_empty_no_crash(self, mock_claude, conn):
|
|
|
|
|
"""generate_followups: {"tasks": null} → {created: [], pending_actions: []}, no TypeError."""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
mock_claude.return_value = {"output": '{"tasks": null}', "returncode": 0}
|
|
|
|
|
result = generate_followups(conn, "PROJ-001")
|
|
|
|
|
assert result["created"] == [], (
|
|
|
|
|
f"Expected created=[], got: {result['created']}. "
|
|
|
|
|
"Bug: parsed['tasks']=None causes TypeError in for-loop."
|
|
|
|
|
)
|
|
|
|
|
assert result["pending_actions"] == []
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_dict_with_null_followups_key_returns_empty_no_crash(self, mock_claude, conn):
|
|
|
|
|
"""generate_followups: {"followups": null} → empty result, no TypeError."""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
mock_claude.return_value = {"output": '{"followups": null}', "returncode": 0}
|
|
|
|
|
result = generate_followups(conn, "PROJ-001")
|
|
|
|
|
assert result["created"] == [], (
|
|
|
|
|
f"Expected created=[], got: {result['created']}. "
|
|
|
|
|
"Bug: parsed['followups']=None causes TypeError in for-loop."
|
|
|
|
|
)
|
|
|
|
|
assert result["pending_actions"] == []
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_dict_with_null_tasks_and_nonempty_followups_no_crash(self, mock_claude, conn):
|
|
|
|
|
"""generate_followups: {"tasks": null, "followups": [...]} → takes tasks path, returns empty, no crash.
|
|
|
|
|
|
|
|
|
|
tasks key is present (even as null), so followups fallback must NOT be reached.
|
|
|
|
|
But extracted None must be handled gracefully — no tasks created, no TypeError.
|
|
|
|
|
"""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
mock_claude.return_value = {
|
|
|
|
|
"output": '{"tasks": null, "followups": [{"title": "Should be ignored"}]}',
|
|
|
|
|
"returncode": 0,
|
|
|
|
|
}
|
|
|
|
|
tasks_before = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
result = generate_followups(conn, "PROJ-001")
|
|
|
|
|
tasks_after = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
assert result["created"] == [], (
|
|
|
|
|
"tasks key is present (even as null) — tasks path taken, followups must be ignored. "
|
|
|
|
|
"No tasks must be created."
|
|
|
|
|
)
|
|
|
|
|
assert tasks_after == tasks_before
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_dict_null_tasks_creates_no_tasks_in_db(self, mock_claude, conn):
|
|
|
|
|
"""generate_followups: {"tasks": null} → no tasks created in DB."""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
mock_claude.return_value = {"output": '{"tasks": null}', "returncode": 0}
|
|
|
|
|
tasks_before = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
generate_followups(conn, "PROJ-001")
|
|
|
|
|
tasks_after = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
assert tasks_after == tasks_before, (
|
|
|
|
|
f"Task count changed from {tasks_before} to {tasks_after}. "
|
|
|
|
|
"No tasks must be created when tasks=null."
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
# KIN-P1-001 deeper revision — additional empty array edge cases
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestGenerateFollowupsTasksKeyPriority:
|
|
|
|
|
"""tasks key present and non-empty: followups key must be completely ignored."""
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_nonempty_tasks_ignores_followups_key(self, mock_claude, conn):
|
|
|
|
|
"""{"tasks": [{"title": "T1"}], "followups": [{"title": "F1"}]} → only T1 created.
|
|
|
|
|
|
|
|
|
|
Verifies that when tasks key is present AND non-empty, the followups key
|
|
|
|
|
is not consulted at all. Result must contain exactly 1 task (T1), not 2.
|
|
|
|
|
"""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
mock_claude.return_value = {
|
|
|
|
|
"output": '{"tasks": [{"title": "T1", "type": "backend_dev"}], "followups": [{"title": "F1"}]}',
|
|
|
|
|
"returncode": 0,
|
|
|
|
|
}
|
|
|
|
|
result = generate_followups(conn, "PROJ-001")
|
|
|
|
|
assert len(result["created"]) == 1, (
|
|
|
|
|
f"Expected 1 task (from tasks key), got {len(result['created'])}. "
|
|
|
|
|
"followups key must be ignored when tasks key is present and non-empty."
|
|
|
|
|
)
|
|
|
|
|
assert result["created"][0]["title"] == "T1"
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_nonempty_tasks_does_not_create_followups_tasks(self, mock_claude, conn):
|
|
|
|
|
"""Verify only the task from tasks[] is created in DB, not the one from followups[]."""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
mock_claude.return_value = {
|
|
|
|
|
"output": '{"tasks": [{"title": "Real task"}], "followups": [{"title": "Should not exist"}]}',
|
|
|
|
|
"returncode": 0,
|
|
|
|
|
}
|
|
|
|
|
tasks_before = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
generate_followups(conn, "PROJ-001")
|
|
|
|
|
tasks_after = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
assert tasks_after == tasks_before + 1, (
|
|
|
|
|
f"Expected exactly 1 new task, got {tasks_after - tasks_before}. "
|
|
|
|
|
"followups items must not be created when tasks key present."
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestGenerateFollowupsFalsyNonListTasksValue:
|
|
|
|
|
"""generate_followups: tasks key maps to falsy non-null, non-list values — must not crash."""
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_tasks_is_integer_zero_returns_empty(self, mock_claude, conn):
|
|
|
|
|
"""{"tasks": 0} → empty result, no crash. Guard `if not isinstance(parsed, list)` fires."""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
mock_claude.return_value = {"output": '{"tasks": 0}', "returncode": 0}
|
|
|
|
|
result = generate_followups(conn, "PROJ-001")
|
|
|
|
|
assert result["created"] == [], f"Expected created=[], got: {result['created']}"
|
|
|
|
|
assert result["pending_actions"] == []
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_tasks_is_false_returns_empty(self, mock_claude, conn):
|
|
|
|
|
"""{"tasks": false} → empty result, no crash."""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
mock_claude.return_value = {"output": '{"tasks": false}', "returncode": 0}
|
|
|
|
|
result = generate_followups(conn, "PROJ-001")
|
|
|
|
|
assert result["created"] == [], f"Expected created=[], got: {result['created']}"
|
|
|
|
|
assert result["pending_actions"] == []
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_tasks_is_empty_string_returns_empty(self, mock_claude, conn):
|
|
|
|
|
"""{"tasks": ""} → empty result, no crash."""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
mock_claude.return_value = {"output": '{"tasks": ""}', "returncode": 0}
|
|
|
|
|
result = generate_followups(conn, "PROJ-001")
|
|
|
|
|
assert result["created"] == [], f"Expected created=[], got: {result['created']}"
|
|
|
|
|
assert result["pending_actions"] == []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestGenerateFollowupsMixedListItems:
|
|
|
|
|
"""generate_followups: list with non-dict and non-title elements mixed with valid items."""
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_list_with_null_and_valid_item_creates_only_valid(self, mock_claude, conn):
|
|
|
|
|
"""[null, {"title": "Valid task"}] → only the valid dict item creates a task."""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
mock_claude.return_value = {
|
|
|
|
|
"output": '[null, {"title": "Valid task", "type": "backend_dev"}]',
|
|
|
|
|
"returncode": 0,
|
|
|
|
|
}
|
|
|
|
|
tasks_before = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
result = generate_followups(conn, "PROJ-001")
|
|
|
|
|
tasks_after = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
assert len(result["created"]) == 1, (
|
|
|
|
|
f"Expected 1 task from valid dict item, got {len(result['created'])}"
|
|
|
|
|
)
|
|
|
|
|
assert tasks_after == tasks_before + 1
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_list_with_integers_strings_and_valid_item(self, mock_claude, conn):
|
|
|
|
|
"""[42, "string", {"title": "T1"}] → only the valid dict item creates a task."""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
mock_claude.return_value = {
|
|
|
|
|
"output": '[42, "string", {"title": "T1"}]',
|
|
|
|
|
"returncode": 0,
|
|
|
|
|
}
|
|
|
|
|
result = generate_followups(conn, "PROJ-001")
|
|
|
|
|
assert len(result["created"]) == 1, (
|
|
|
|
|
f"Expected 1 task (only dict with title), got {len(result['created'])}"
|
|
|
|
|
)
|
|
|
|
|
assert result["created"][0]["title"] == "T1"
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_list_with_all_non_dict_items_creates_no_tasks(self, mock_claude, conn):
|
|
|
|
|
"""[null, 1, "string", true] → no tasks created (no dict items with title)."""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
mock_claude.return_value = {
|
|
|
|
|
"output": '[null, 1, "string", true]',
|
|
|
|
|
"returncode": 0,
|
|
|
|
|
}
|
|
|
|
|
tasks_before = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
result = generate_followups(conn, "PROJ-001")
|
|
|
|
|
tasks_after = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
assert result["created"] == [], (
|
|
|
|
|
f"Expected no tasks created, got: {result['created']}"
|
|
|
|
|
)
|
|
|
|
|
assert tasks_after == tasks_before
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestTaskSpecEmptyArrayRoundTrip:
|
|
|
|
|
"""spec field (JSON column) round-trip with empty array."""
|
|
|
|
|
|
|
|
|
|
def test_task_spec_empty_list_round_trips(self, conn):
|
|
|
|
|
"""create_task(spec=[]) → get_task returns spec as [] list."""
|
|
|
|
|
models.create_task(conn, "PROJ-007", "proj", "Spec test", spec=[])
|
|
|
|
|
task = models.get_task(conn, "PROJ-007")
|
|
|
|
|
assert isinstance(task["spec"], list), (
|
|
|
|
|
f"Expected spec to be list, got: {type(task['spec'])}"
|
|
|
|
|
)
|
|
|
|
|
assert task["spec"] == [], f"Expected spec=[], got: {task['spec']!r}"
|
|
|
|
|
|
|
|
|
|
def test_task_spec_nonempty_list_round_trips(self, conn):
|
|
|
|
|
"""create_task(spec=["step1"]) → get_task returns spec=['step1'] list."""
|
|
|
|
|
models.create_task(conn, "PROJ-008", "proj", "Spec test 2", spec=["step1"])
|
|
|
|
|
task = models.get_task(conn, "PROJ-008")
|
|
|
|
|
assert isinstance(task["spec"], list)
|
|
|
|
|
assert task["spec"] == ["step1"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
# KIN-P1-001 regression — followups key positive path (no tasks key)
|
|
|
|
|
#
|
|
|
|
|
# Bug: the old `or` chain `parsed.get('tasks') or parsed.get('followups') or []`
|
|
|
|
|
# made tasks=[] fall through to followups, but the followups path itself must
|
|
|
|
|
# still work when only the followups key is present.
|
|
|
|
|
# Fix: explicit key-presence check with `in` — elif branch handles this case.
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestGenerateFollowupsFollowupsPathPositive:
|
|
|
|
|
"""Followups fallback path works after the `or`→`in` fix.
|
|
|
|
|
|
|
|
|
|
When agent returns {"followups": [...]} without a tasks key, items must be
|
|
|
|
|
processed via the elif branch. This confirms the fix did not break the
|
|
|
|
|
followups fallback.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_followups_key_only_with_valid_item_creates_task(self, mock_claude, conn):
|
|
|
|
|
"""{"followups": [{"title": "X"}]} (no tasks key) → task created via followups path."""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
mock_claude.return_value = {
|
|
|
|
|
"output": '{"followups": [{"title": "Followup task", "type": "backend_dev"}]}',
|
|
|
|
|
"returncode": 0,
|
|
|
|
|
}
|
|
|
|
|
tasks_before = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
result = generate_followups(conn, "PROJ-001")
|
|
|
|
|
tasks_after = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
assert len(result["created"]) == 1, (
|
|
|
|
|
f"Expected 1 task from followups key, got {len(result['created'])}. "
|
|
|
|
|
"The elif 'followups' branch must be reachable when tasks key is absent."
|
|
|
|
|
)
|
|
|
|
|
assert result["created"][0]["title"] == "Followup task"
|
|
|
|
|
assert tasks_after == tasks_before + 1
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_dict_with_unknown_key_only_creates_no_tasks(self, mock_claude, conn):
|
|
|
|
|
"""{"steps": [{"title": "X"}]} (neither tasks nor followups key) → no tasks, else branch."""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
mock_claude.return_value = {
|
|
|
|
|
"output": '{"steps": [{"title": "Should be ignored"}]}',
|
|
|
|
|
"returncode": 0,
|
|
|
|
|
}
|
|
|
|
|
tasks_before = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
result = generate_followups(conn, "PROJ-001")
|
|
|
|
|
tasks_after = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
assert result["created"] == [], (
|
|
|
|
|
f"Expected no tasks when dict has neither tasks nor followups key, got: {result['created']}"
|
|
|
|
|
)
|
|
|
|
|
assert tasks_after == tasks_before
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_tasks_is_truthy_nonlist_string_no_crash(self, mock_claude, conn):
|
|
|
|
|
"""{"tasks": "pending_review"} (truthy non-list string) → empty result, no crash.
|
|
|
|
|
|
|
|
|
|
Regression for the `if not isinstance(parsed, list): parsed = []` guard.
|
|
|
|
|
A truthy string like "pending_review" passes `or` truthiness but is not iterable
|
|
|
|
|
as a task list. The isinstance guard must convert it to [].
|
|
|
|
|
"""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
mock_claude.return_value = {"output": '{"tasks": "pending_review"}', "returncode": 0}
|
|
|
|
|
result = generate_followups(conn, "PROJ-001")
|
|
|
|
|
assert result["created"] == [], (
|
|
|
|
|
f"Expected created=[] for tasks='pending_review', got: {result['created']}. "
|
|
|
|
|
"Non-list tasks value must be replaced with [] by isinstance guard."
|
|
|
|
|
)
|
|
|
|
|
assert result["pending_actions"] == []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
# KIN-P1-001 deeper revision — update_task with empty list fields
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestUpdateTaskEmptyListFields:
|
|
|
|
|
"""update_task correctly handles empty list fields after the _JSON_COLUMNS whitelist fix.
|
|
|
|
|
|
|
|
|
|
Previous tests only covered create_task round-trips. This class tests that
|
|
|
|
|
update_task also correctly encodes/decodes empty lists for JSON columns.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
def test_update_task_brief_to_empty_list_round_trips(self, conn):
|
|
|
|
|
"""update_task(brief=[]) on existing task → get_task returns brief as [] list."""
|
|
|
|
|
models.update_task(conn, "PROJ-001", brief=[])
|
|
|
|
|
task = models.get_task(conn, "PROJ-001")
|
|
|
|
|
assert isinstance(task["brief"], list), (
|
|
|
|
|
f"Expected brief=[] after update, got {type(task['brief'])}: {task['brief']!r}"
|
|
|
|
|
)
|
|
|
|
|
assert task["brief"] == []
|
|
|
|
|
|
|
|
|
|
def test_update_task_labels_to_empty_list_round_trips(self, conn):
|
|
|
|
|
"""update_task(labels=[]) on existing task → get_task returns labels as [] list."""
|
|
|
|
|
models.update_task(conn, "PROJ-001", labels=[])
|
|
|
|
|
task = models.get_task(conn, "PROJ-001")
|
|
|
|
|
assert isinstance(task["labels"], list), (
|
|
|
|
|
f"Expected labels=[] after update, got {type(task['labels'])}: {task['labels']!r}"
|
|
|
|
|
)
|
|
|
|
|
assert task["labels"] == []
|
|
|
|
|
|
|
|
|
|
def test_update_task_brief_from_dict_to_empty_list(self, conn):
|
|
|
|
|
"""update_task: overwriting dict brief with [] → retrieved as []."""
|
|
|
|
|
task_before = models.get_task(conn, "PROJ-001")
|
|
|
|
|
assert isinstance(task_before["brief"], dict), "Precondition: brief must be a dict"
|
|
|
|
|
models.update_task(conn, "PROJ-001", brief=[])
|
|
|
|
|
task_after = models.get_task(conn, "PROJ-001")
|
|
|
|
|
assert task_after["brief"] == [], (
|
|
|
|
|
f"Expected brief=[] after overwrite from dict, got: {task_after['brief']!r}"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
# KIN-P1-001 deeper revision — _row_to_dict: None input and "null" string
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestRowToDictNoneAndNullString:
|
|
|
|
|
"""_row_to_dict(None) → None; brief='null' stays string (not decoded via JSON)."""
|
|
|
|
|
|
|
|
|
|
def test_row_to_dict_none_returns_none(self):
|
|
|
|
|
"""_row_to_dict(None) must return None without raising."""
|
|
|
|
|
from core.models import _row_to_dict
|
|
|
|
|
result = _row_to_dict(None)
|
|
|
|
|
assert result is None, f"Expected None for None input, got: {result!r}"
|
|
|
|
|
|
|
|
|
|
def test_brief_null_string_stays_string(self, raw_conn):
|
|
|
|
|
"""_row_to_dict: brief='null' stays as string — 'null' doesn't start with '[' or '{'.
|
|
|
|
|
|
|
|
|
|
Contrast with _try_parse_json('null') → None.
|
|
|
|
|
_row_to_dict relies on startswith('[', '{') guard, so 'null' is NOT decoded.
|
|
|
|
|
"""
|
|
|
|
|
from core.models import _row_to_dict
|
|
|
|
|
row = _make_sqlite_row(raw_conn, brief="null")
|
|
|
|
|
result = _row_to_dict(row)
|
|
|
|
|
assert isinstance(result["brief"], str), (
|
|
|
|
|
f"Expected brief to remain str 'null', got {type(result['brief'])}: {result['brief']!r}. "
|
|
|
|
|
"'null' does not start with '[' or '{{', so _row_to_dict must leave it as a string."
|
|
|
|
|
)
|
|
|
|
|
assert result["brief"] == "null"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
# KIN-P1-001 deeper revision — decisions.tags empty list round-trip
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestDecisionsTagsEmptyList:
|
|
|
|
|
"""add_decision with tags=[] — tags stored as JSON and retrieved as empty list."""
|
|
|
|
|
|
|
|
|
|
def test_add_decision_tags_empty_list_round_trips(self, conn):
|
|
|
|
|
"""add_decision(tags=[]) → retrieved decision has tags=[] (list, not None)."""
|
|
|
|
|
dec = models.add_decision(
|
|
|
|
|
conn, "proj", "gotcha", "Empty tags decision", "Some description", tags=[]
|
|
|
|
|
)
|
|
|
|
|
assert isinstance(dec["tags"], list), (
|
|
|
|
|
f"Expected tags=[] after add_decision, got {type(dec['tags'])}: {dec['tags']!r}"
|
|
|
|
|
)
|
|
|
|
|
assert dec["tags"] == []
|
|
|
|
|
|
|
|
|
|
def test_add_decision_tags_none_stays_none(self, conn):
|
|
|
|
|
"""add_decision(tags=None) → decision.tags is None (not encoded as JSON)."""
|
|
|
|
|
dec = models.add_decision(
|
|
|
|
|
conn, "proj", "decision", "No tags decision", "description", tags=None
|
|
|
|
|
)
|
|
|
|
|
assert dec["tags"] is None, f"Expected tags=None for tags=None input, got: {dec['tags']!r}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
# KIN-P1-001 deeper revision — _try_parse_json: whitespace-only and embedded []
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestTryParseJsonWhitespaceAndEmbedded:
|
|
|
|
|
"""_try_parse_json edge cases: whitespace-only input; [] embedded in prose text."""
|
|
|
|
|
|
|
|
|
|
def test_newline_only_returns_none(self):
|
|
|
|
|
"""_try_parse_json('\\n') → None (strips to empty string)."""
|
|
|
|
|
from agents.runner import _try_parse_json
|
|
|
|
|
result = _try_parse_json("\n")
|
|
|
|
|
assert result is None, f"Expected None for newline-only input, got: {result!r}"
|
|
|
|
|
|
|
|
|
|
def test_whitespace_and_newlines_returns_none(self):
|
|
|
|
|
"""_try_parse_json('\\t \\n ') → None (strips to empty string)."""
|
|
|
|
|
from agents.runner import _try_parse_json
|
|
|
|
|
result = _try_parse_json("\t \n ")
|
|
|
|
|
assert result is None, f"Expected None for whitespace-only input, got: {result!r}"
|
|
|
|
|
|
|
|
|
|
def test_empty_array_embedded_in_prose_extracted(self):
|
|
|
|
|
"""_try_parse_json with '[]' embedded in prose → extracts and returns [].
|
|
|
|
|
|
|
|
|
|
The bracket-scanning fallback in _try_parse_json finds the first '[...]'
|
|
|
|
|
block even when surrounded by text. This is the same code path that
|
|
|
|
|
extracts JSON from agent outputs that contain prose before the JSON.
|
|
|
|
|
"""
|
|
|
|
|
from agents.runner import _try_parse_json
|
|
|
|
|
result = _try_parse_json("The result is: [] as expected")
|
|
|
|
|
assert result == [], (
|
|
|
|
|
f"Expected [] extracted from prose, got: {result!r}. "
|
|
|
|
|
"The bracket-scanning fallback must find the first '[...]' block."
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
# KIN-P1-001 deeper revision — null-title item guard
|
|
|
|
|
#
|
|
|
|
|
# Bug: followup.py filters items with `"title" not in item`, but does NOT check
|
|
|
|
|
# if item["title"] is None (JSON null). When agent returns {"title": null}, the
|
|
|
|
|
# key IS present → filter passes → create_task(title=None) → IntegrityError
|
|
|
|
|
# (title is NOT NULL in DB schema).
|
|
|
|
|
#
|
|
|
|
|
# Deeper edge: {"title": ""} (empty string) — key present, string is falsy
|
|
|
|
|
# but valid. Currently creates a task with empty title in DB.
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestGenerateFollowupsNullAndEmptyTitle:
|
|
|
|
|
"""Items with title=null or title='' — should be skipped or handled gracefully.
|
|
|
|
|
|
|
|
|
|
title TEXT NOT NULL in DB schema, so title=None would cause an IntegrityError.
|
|
|
|
|
Items with null title must be skipped (same as items with no title key).
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_item_with_null_title_does_not_crash(self, mock_claude, conn):
|
|
|
|
|
"""[{"title": null}] — must NOT raise IntegrityError or any exception.
|
|
|
|
|
|
|
|
|
|
DB schema: title TEXT NOT NULL. Inserting None crashes unless guarded.
|
|
|
|
|
Filter `"title" not in item` does NOT catch this — key exists, value is null.
|
|
|
|
|
Fix required: also check `if not item.get("title"):` or `if not item["title"]:`.
|
|
|
|
|
"""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
mock_claude.return_value = {
|
|
|
|
|
"output": '[{"title": null, "type": "backend_dev"}]',
|
|
|
|
|
"returncode": 0,
|
|
|
|
|
}
|
|
|
|
|
# Must not raise — either skips item or handles gracefully
|
|
|
|
|
result = generate_followups(conn, "PROJ-001")
|
|
|
|
|
assert result["created"] == [], (
|
|
|
|
|
f"Expected created=[] for item with null title, got: {result['created']}. "
|
|
|
|
|
"Items with null title must be skipped (title NOT NULL in DB schema)."
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_item_with_null_title_creates_no_tasks_in_db(self, mock_claude, conn):
|
|
|
|
|
"""[{"title": null}] — no tasks must be created in DB."""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
mock_claude.return_value = {
|
|
|
|
|
"output": '[{"title": null, "type": "backend_dev"}]',
|
|
|
|
|
"returncode": 0,
|
|
|
|
|
}
|
|
|
|
|
tasks_before = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
generate_followups(conn, "PROJ-001")
|
|
|
|
|
tasks_after = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
assert tasks_after == tasks_before, (
|
|
|
|
|
f"Task count changed from {tasks_before} to {tasks_after}. "
|
|
|
|
|
"Items with null title must be skipped."
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_mixed_null_title_and_valid_item(self, mock_claude, conn):
|
|
|
|
|
"""[{"title": null}, {"title": "Valid"}] — null skipped, valid item creates task."""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
mock_claude.return_value = {
|
|
|
|
|
"output": '[{"title": null}, {"title": "Valid task", "type": "backend_dev"}]',
|
|
|
|
|
"returncode": 0,
|
|
|
|
|
}
|
|
|
|
|
tasks_before = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
result = generate_followups(conn, "PROJ-001")
|
|
|
|
|
tasks_after = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
# Null-title item must be skipped; valid item must create 1 task
|
|
|
|
|
assert len(result["created"]) == 1, (
|
|
|
|
|
f"Expected 1 task (valid item only), got {len(result['created'])}. "
|
|
|
|
|
"Null-title item must be skipped without affecting subsequent items."
|
|
|
|
|
)
|
|
|
|
|
assert tasks_after == tasks_before + 1
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_item_with_empty_string_title_does_not_crash(self, mock_claude, conn):
|
|
|
|
|
"""[{"title": ""}] — empty string title must be skipped, not create a task.
|
|
|
|
|
|
|
|
|
|
Fix: `not item.get("title")` guards against both title=null and title="".
|
|
|
|
|
Empty string is invalid as a task title (title NOT NULL in DB schema requires
|
|
|
|
|
a meaningful value; an empty string would silently create a broken task).
|
|
|
|
|
"""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
mock_claude.return_value = {
|
|
|
|
|
"output": '[{"title": "", "type": "backend_dev"}]',
|
|
|
|
|
"returncode": 0,
|
|
|
|
|
}
|
|
|
|
|
result = generate_followups(conn, "PROJ-001")
|
|
|
|
|
assert result["created"] == [], (
|
|
|
|
|
f"Expected created=[] for item with empty string title, got: {result['created']}. "
|
|
|
|
|
"Items with title='' must be skipped (not item.get('title') guard)."
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_item_with_empty_string_title_creates_no_tasks_in_db(self, mock_claude, conn):
|
|
|
|
|
"""[{"title": ""}] — no tasks must be created in DB."""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
mock_claude.return_value = {
|
|
|
|
|
"output": '[{"title": "", "type": "backend_dev"}]',
|
|
|
|
|
"returncode": 0,
|
|
|
|
|
}
|
|
|
|
|
tasks_before = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
generate_followups(conn, "PROJ-001")
|
|
|
|
|
tasks_after = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
assert tasks_after == tasks_before, (
|
|
|
|
|
f"Task count changed from {tasks_before} to {tasks_after}. "
|
|
|
|
|
"Items with empty string title must be skipped."
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_mixed_empty_string_title_and_valid_item(self, mock_claude, conn):
|
|
|
|
|
"""[{"title": ""}, {"title": "Valid"}] — empty-string skipped, valid creates task."""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
mock_claude.return_value = {
|
|
|
|
|
"output": '[{"title": ""}, {"title": "Valid task", "type": "backend_dev"}]',
|
|
|
|
|
"returncode": 0,
|
|
|
|
|
}
|
|
|
|
|
tasks_before = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
result = generate_followups(conn, "PROJ-001")
|
|
|
|
|
tasks_after = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
assert len(result["created"]) == 1, (
|
|
|
|
|
f"Expected 1 task (valid item only), got {len(result['created'])}. "
|
|
|
|
|
"Empty-string-title item must be skipped without blocking subsequent items."
|
|
|
|
|
)
|
|
|
|
|
assert tasks_after == tasks_before + 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestTryParseJsonInternalWhitespace:
|
|
|
|
|
"""_try_parse_json with arrays containing internal whitespace."""
|
|
|
|
|
|
|
|
|
|
def test_array_with_internal_spaces_returns_empty_list(self):
|
|
|
|
|
"""_try_parse_json('[ ]') — array with space inside → [] (valid JSON)."""
|
|
|
|
|
from agents.runner import _try_parse_json
|
|
|
|
|
result = _try_parse_json("[ ]")
|
|
|
|
|
assert result == [], f"Expected [] for '[ ]', got: {result!r}"
|
|
|
|
|
assert isinstance(result, list)
|
|
|
|
|
|
|
|
|
|
def test_array_with_newline_inside_returns_empty_list(self):
|
|
|
|
|
"""_try_parse_json('[\\n]') — array with newline inside → []."""
|
|
|
|
|
from agents.runner import _try_parse_json
|
|
|
|
|
result = _try_parse_json("[\n]")
|
|
|
|
|
assert result == [], f"Expected [] for '[\\n]', got: {result!r}"
|
|
|
|
|
|
|
|
|
|
def test_nested_empty_array_extracted_from_tasks(self):
|
|
|
|
|
"""_try_parse_json('{"tasks": [[]]}') → dict with tasks=[[]].
|
|
|
|
|
|
|
|
|
|
tasks is a list containing one empty list. Not a valid task item (non-dict),
|
|
|
|
|
so generate_followups will skip it. Verifies _try_parse_json handles it.
|
|
|
|
|
"""
|
|
|
|
|
from agents.runner import _try_parse_json
|
|
|
|
|
result = _try_parse_json('{"tasks": [[]]}')
|
|
|
|
|
assert isinstance(result, dict), f"Expected dict, got {type(result)}: {result!r}"
|
|
|
|
|
assert result == {"tasks": [[]]}, f"Got: {result!r}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestGenerateFollowupsNestedEmptyList:
|
|
|
|
|
"""generate_followups: items list contains nested empty lists — must skip them."""
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_nested_empty_list_skipped_valid_item_creates_task(self, mock_claude, conn):
|
|
|
|
|
"""[[], {"title": "T1"}] — inner [] is non-dict, skipped; T1 creates task."""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
mock_claude.return_value = {
|
|
|
|
|
"output": '[[], {"title": "T1", "type": "backend_dev"}]',
|
|
|
|
|
"returncode": 0,
|
|
|
|
|
}
|
|
|
|
|
tasks_before = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
result = generate_followups(conn, "PROJ-001")
|
|
|
|
|
tasks_after = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
assert len(result["created"]) == 1, (
|
|
|
|
|
f"Expected 1 task (T1 only), inner [] must be skipped, got: {len(result['created'])}"
|
|
|
|
|
)
|
|
|
|
|
assert tasks_after == tasks_before + 1
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_tasks_nested_empty_list_via_tasks_key(self, mock_claude, conn):
|
|
|
|
|
"""{"tasks": [[]]} — tasks contains a list, not a dict — all items skipped."""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
mock_claude.return_value = {
|
|
|
|
|
"output": '{"tasks": [[]]}',
|
|
|
|
|
"returncode": 0,
|
|
|
|
|
}
|
|
|
|
|
tasks_before = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
result = generate_followups(conn, "PROJ-001")
|
|
|
|
|
tasks_after = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
assert result["created"] == [], (
|
|
|
|
|
f"Expected no tasks from nested [[]], got: {result['created']}"
|
|
|
|
|
)
|
|
|
|
|
assert tasks_after == tasks_before
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
# KIN-P1-001 deeper revision — _JSON_COLUMNS whitelist completeness
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestJsonColumnsWhitelist:
|
|
|
|
|
"""Regression guard: _JSON_COLUMNS whitelist must include/exclude correct columns.
|
|
|
|
|
|
|
|
|
|
This test locks down the whitelist so that future accidental additions or
|
|
|
|
|
removals are caught immediately. Both the inclusion of JSON fields and the
|
|
|
|
|
exclusion of text fields are verified.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
def test_json_columns_includes_required_fields(self):
|
|
|
|
|
"""_JSON_COLUMNS must contain all columns that store JSON arrays/dicts."""
|
|
|
|
|
from core.models import _JSON_COLUMNS
|
|
|
|
|
required = {
|
|
|
|
|
"tech_stack", "brief", "spec", "review", "test_result",
|
|
|
|
|
"security_result", "labels", "tags", "dependencies",
|
|
|
|
|
"steps", "artifacts", "decisions_made", "blockers",
|
|
|
|
|
"extra_json", "pending_actions",
|
|
|
|
|
}
|
|
|
|
|
missing = required - _JSON_COLUMNS
|
|
|
|
|
assert not missing, (
|
|
|
|
|
f"_JSON_COLUMNS is missing expected JSON columns: {missing}. "
|
|
|
|
|
"These columns store JSON and must be decoded by _row_to_dict."
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def test_json_columns_excludes_text_fields(self):
|
|
|
|
|
"""_JSON_COLUMNS must NOT contain text fields that should never be JSON-decoded."""
|
|
|
|
|
from core.models import _JSON_COLUMNS
|
|
|
|
|
text_fields = {"title", "description", "name", "path", "status",
|
|
|
|
|
"acceptance_criteria", "assigned_role", "id", "project_id"}
|
|
|
|
|
wrongly_included = text_fields & _JSON_COLUMNS
|
|
|
|
|
assert not wrongly_included, (
|
|
|
|
|
f"Text fields found in _JSON_COLUMNS: {wrongly_included}. "
|
|
|
|
|
"These fields are plain text and must never be JSON-decoded."
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestAcceptanceCriteriaTextFieldNotDecoded:
|
|
|
|
|
"""acceptance_criteria is a text field — must never be JSON-decoded by _row_to_dict."""
|
|
|
|
|
|
|
|
|
|
def test_acceptance_criteria_with_brackets_stays_string(self, conn):
|
|
|
|
|
"""create_task(acceptance_criteria='[]') → field stays as '[]' string, not list."""
|
|
|
|
|
models.create_task(
|
|
|
|
|
conn, "PROJ-009", "proj", "AC test",
|
|
|
|
|
acceptance_criteria="[]",
|
|
|
|
|
)
|
|
|
|
|
task = models.get_task(conn, "PROJ-009")
|
|
|
|
|
assert isinstance(task["acceptance_criteria"], str), (
|
|
|
|
|
f"Expected acceptance_criteria to be str '[]', got "
|
|
|
|
|
f"{type(task['acceptance_criteria'])}: {task['acceptance_criteria']!r}. "
|
|
|
|
|
"acceptance_criteria is not in _JSON_COLUMNS and must stay as string."
|
|
|
|
|
)
|
|
|
|
|
assert task["acceptance_criteria"] == "[]"
|
|
|
|
|
|
|
|
|
|
def test_acceptance_criteria_with_json_dict_stays_string(self, conn):
|
|
|
|
|
"""create_task(acceptance_criteria='{\"k\":1}') → stays as JSON-encoded string."""
|
|
|
|
|
models.create_task(
|
|
|
|
|
conn, "PROJ-010", "proj", "AC dict test",
|
|
|
|
|
acceptance_criteria='{"must": "pass"}',
|
|
|
|
|
)
|
|
|
|
|
task = models.get_task(conn, "PROJ-010")
|
|
|
|
|
assert isinstance(task["acceptance_criteria"], str), (
|
|
|
|
|
f"Expected acceptance_criteria to remain str, got "
|
|
|
|
|
f"{type(task['acceptance_criteria'])}: {task['acceptance_criteria']!r}"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestGenerateFollowupsWithEmptyBriefTask:
|
|
|
|
|
"""generate_followups called on a task that has brief=[] (empty list).
|
|
|
|
|
|
|
|
|
|
Verifies that the context-building step in generate_followups handles
|
|
|
|
|
brief=[] without crashing — the empty list must serialize cleanly to the
|
|
|
|
|
prompt context and Claude must still be invoked.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_task_with_empty_brief_list_does_not_crash(self, mock_claude, conn):
|
|
|
|
|
"""generate_followups on task with brief=[] must not raise any exception."""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
models.create_task(conn, "PROJ-011", "proj", "Empty brief followup test", brief=[])
|
|
|
|
|
mock_claude.return_value = {"output": "[]", "returncode": 0}
|
|
|
|
|
result = generate_followups(conn, "PROJ-011")
|
|
|
|
|
assert result["created"] == [], (
|
|
|
|
|
f"Expected created=[], got: {result['created']}"
|
|
|
|
|
)
|
|
|
|
|
assert result["pending_actions"] == []
|
|
|
|
|
mock_claude.assert_called_once()
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_task_with_empty_brief_list_claude_called(self, mock_claude, conn):
|
|
|
|
|
"""generate_followups on task with brief=[] must still call Claude (no early bail)."""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
models.create_task(conn, "PROJ-012", "proj", "Claude called test", brief=[])
|
|
|
|
|
mock_claude.return_value = {"output": "[]", "returncode": 0}
|
|
|
|
|
generate_followups(conn, "PROJ-012")
|
|
|
|
|
mock_claude.assert_called_once()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
# KIN-P1-001 fix 3 — _detect_test_command: role='backend_dev' bypasses Makefile
|
|
|
|
|
#
|
|
|
|
|
# Bug: in mixed (frontend + backend) projects, make test runs both frontend
|
|
|
|
|
# (vitest) and backend (pytest) suites. A backend_dev agent change should only
|
|
|
|
|
# validate Python tests, not fail on unrelated vitest failures.
|
|
|
|
|
#
|
|
|
|
|
# Fix: when role='backend_dev' and a Python marker (pyproject.toml / setup.py)
|
|
|
|
|
# is present, _detect_test_command returns pytest directly — bypassing Makefile.
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestDetectTestCommandRoleBackendDev:
|
|
|
|
|
"""_detect_test_command with role='backend_dev' — pytest bypasses Makefile in mixed projects."""
|
|
|
|
|
|
|
|
|
|
def test_backend_dev_with_pyproject_bypasses_makefile(self, tmp_path):
|
|
|
|
|
"""role='backend_dev' + pyproject.toml + Makefile → pytest returned, not make test.
|
|
|
|
|
|
|
|
|
|
Without the fix, Makefile has priority and make test is returned — which
|
|
|
|
|
runs frontend tests that are unrelated to backend changes.
|
|
|
|
|
"""
|
|
|
|
|
import sys
|
|
|
|
|
from agents.runner import _detect_test_command
|
|
|
|
|
(tmp_path / "pyproject.toml").write_text("[tool.pytest.ini_options]\n")
|
|
|
|
|
(tmp_path / "Makefile").write_text("test:\n\tpytest && vitest run\n")
|
|
|
|
|
result = _detect_test_command(str(tmp_path), role="backend_dev")
|
|
|
|
|
assert result == f"{sys.executable} -m pytest", (
|
|
|
|
|
f"Expected pytest for backend_dev+pyproject.toml, got: {result!r}. "
|
|
|
|
|
"backend_dev role must bypass Makefile and return pytest directly."
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def test_backend_dev_with_setup_py_bypasses_makefile(self, tmp_path):
|
|
|
|
|
"""role='backend_dev' + setup.py + Makefile → pytest returned, not make test."""
|
|
|
|
|
import sys
|
|
|
|
|
from agents.runner import _detect_test_command
|
|
|
|
|
(tmp_path / "setup.py").write_text("from setuptools import setup\nsetup(name='x')\n")
|
|
|
|
|
(tmp_path / "Makefile").write_text("test:\n\tpytest && npm test\n")
|
|
|
|
|
result = _detect_test_command(str(tmp_path), role="backend_dev")
|
|
|
|
|
assert result == f"{sys.executable} -m pytest", (
|
|
|
|
|
f"Expected pytest for backend_dev+setup.py, got: {result!r}. "
|
|
|
|
|
"setup.py is a Python project marker — backend_dev must bypass Makefile."
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def test_backend_dev_without_python_marker_still_uses_makefile(self, tmp_path):
|
|
|
|
|
"""role='backend_dev' + only Makefile (no pyproject/setup.py) → make test returned.
|
|
|
|
|
|
|
|
|
|
If no Python project marker is present, the backend_dev shortcut does NOT
|
|
|
|
|
apply — normal priority order is used (Makefile first).
|
|
|
|
|
"""
|
|
|
|
|
from agents.runner import _detect_test_command
|
|
|
|
|
(tmp_path / "Makefile").write_text("test:\n\tmake check\n")
|
|
|
|
|
result = _detect_test_command(str(tmp_path), role="backend_dev")
|
|
|
|
|
assert result == "make test", (
|
|
|
|
|
f"Expected 'make test' when no Python marker present, got: {result!r}. "
|
|
|
|
|
"Without pyproject.toml or setup.py, backend_dev must use normal priority."
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def test_role_none_pyproject_and_makefile_uses_makefile(self, tmp_path):
|
|
|
|
|
"""role=None + pyproject.toml + Makefile → make test (normal priority order).
|
|
|
|
|
|
|
|
|
|
The backend_dev bypass is role-specific. With role=None, Makefile takes
|
|
|
|
|
priority over pyproject.toml as before.
|
|
|
|
|
"""
|
|
|
|
|
from agents.runner import _detect_test_command
|
|
|
|
|
(tmp_path / "pyproject.toml").write_text("[tool.pytest.ini_options]\n")
|
|
|
|
|
(tmp_path / "Makefile").write_text("test:\n\tpytest\n")
|
|
|
|
|
result = _detect_test_command(str(tmp_path), role=None)
|
|
|
|
|
assert result == "make test", (
|
|
|
|
|
f"Expected 'make test' for role=None, got: {result!r}. "
|
|
|
|
|
"Makefile must take priority over pyproject.toml when role is not backend_dev."
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def test_role_frontend_dev_pyproject_and_makefile_uses_makefile(self, tmp_path):
|
|
|
|
|
"""role='frontend_dev' + pyproject.toml + Makefile → make test (bypass not triggered)."""
|
|
|
|
|
from agents.runner import _detect_test_command
|
|
|
|
|
(tmp_path / "pyproject.toml").write_text("[tool.pytest.ini_options]\n")
|
|
|
|
|
(tmp_path / "Makefile").write_text("test:\n\tvitest run\n")
|
|
|
|
|
result = _detect_test_command(str(tmp_path), role="frontend_dev")
|
|
|
|
|
assert result == "make test", (
|
|
|
|
|
f"Expected 'make test' for frontend_dev role, got: {result!r}. "
|
|
|
|
|
"Only backend_dev role triggers the Python-marker bypass."
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def test_backend_dev_pyproject_only_no_makefile_returns_pytest(self, tmp_path):
|
|
|
|
|
"""role='backend_dev' + only pyproject.toml (no Makefile) → pytest returned."""
|
|
|
|
|
import sys
|
|
|
|
|
from agents.runner import _detect_test_command
|
|
|
|
|
(tmp_path / "pyproject.toml").write_text("[tool.pytest.ini_options]\n")
|
|
|
|
|
result = _detect_test_command(str(tmp_path), role="backend_dev")
|
|
|
|
|
assert result == f"{sys.executable} -m pytest", (
|
|
|
|
|
f"Expected pytest for backend_dev+pyproject.toml (no Makefile), got: {result!r}"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def test_backend_dev_no_files_returns_none(self, tmp_path):
|
|
|
|
|
"""role='backend_dev' + empty directory → None (no framework detected)."""
|
|
|
|
|
from agents.runner import _detect_test_command
|
|
|
|
|
result = _detect_test_command(str(tmp_path), role="backend_dev")
|
|
|
|
|
assert result is None, (
|
|
|
|
|
f"Expected None for backend_dev with no project files, got: {result!r}"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
# KIN-P1-001 deeper revision — empty array edge cases: both keys empty/null
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestGenerateFollowupsBothKeysEmptyOrNull:
|
|
|
|
|
"""Empty array edge cases when BOTH tasks and followups keys are empty or null.
|
|
|
|
|
|
|
|
|
|
These complement test_dict_tasks_empty_followups_nonempty_does_not_create_tasks
|
|
|
|
|
by covering scenarios where the followups side is also empty/null.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_tasks_empty_and_followups_empty_creates_no_tasks(self, mock_claude, conn):
|
|
|
|
|
"""{"tasks": [], "followups": []} — both empty → no tasks created.
|
|
|
|
|
|
|
|
|
|
tasks key is present (even as empty list) → followups key ignored entirely.
|
|
|
|
|
Both branches produce empty results, so no tasks should be created.
|
|
|
|
|
"""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
mock_claude.return_value = {
|
|
|
|
|
"output": '{"tasks": [], "followups": []}',
|
|
|
|
|
"returncode": 0,
|
|
|
|
|
}
|
|
|
|
|
tasks_before = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
result = generate_followups(conn, "PROJ-001")
|
|
|
|
|
tasks_after = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
assert result["created"] == [], (
|
|
|
|
|
f"Expected created=[] when tasks=[] and followups=[], got: {result['created']}"
|
|
|
|
|
)
|
|
|
|
|
assert tasks_after == tasks_before
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_tasks_empty_and_followups_null_creates_no_tasks(self, mock_claude, conn):
|
|
|
|
|
"""{"tasks": [], "followups": null} — tasks empty, followups null → no tasks, no crash.
|
|
|
|
|
|
|
|
|
|
tasks key present and empty → followups key ignored.
|
|
|
|
|
The null followups value is never accessed so no crash should occur.
|
|
|
|
|
"""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
mock_claude.return_value = {
|
|
|
|
|
"output": '{"tasks": [], "followups": null}',
|
|
|
|
|
"returncode": 0,
|
|
|
|
|
}
|
|
|
|
|
tasks_before = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
result = generate_followups(conn, "PROJ-001")
|
|
|
|
|
tasks_after = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
assert result["created"] == [], (
|
|
|
|
|
f"Expected created=[] when tasks=[], followups=null, got: {result['created']}"
|
|
|
|
|
)
|
|
|
|
|
assert result["pending_actions"] == []
|
|
|
|
|
assert tasks_after == tasks_before
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
def test_tasks_empty_followups_nonempty_list_ignores_all_followups(self, mock_claude, conn):
|
|
|
|
|
"""{"tasks": [], "followups": [X, Y, Z]} — tasks path taken, all 3 followups ignored.
|
|
|
|
|
|
|
|
|
|
Three items in followups key, but none must create tasks because tasks key
|
|
|
|
|
(even empty) takes priority and is iterated as an empty list.
|
|
|
|
|
"""
|
|
|
|
|
from core.followup import generate_followups
|
|
|
|
|
mock_claude.return_value = {
|
|
|
|
|
"output": '{"tasks": [], "followups": [{"title": "F1"}, {"title": "F2"}, {"title": "F3"}]}',
|
|
|
|
|
"returncode": 0,
|
|
|
|
|
}
|
|
|
|
|
tasks_before = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
result = generate_followups(conn, "PROJ-001")
|
|
|
|
|
tasks_after = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
|
|
|
|
|
assert result["created"] == [], (
|
|
|
|
|
f"Expected no tasks (3 followups all ignored due to empty tasks key), "
|
|
|
|
|
f"got: {result['created']}. "
|
|
|
|
|
"When tasks=[], the followups key must be completely ignored regardless of its contents."
|
|
|
|
|
)
|
|
|
|
|
assert tasks_after == tasks_before
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
# KIN-P1-001 deeper revision — create_task with labels=[] (JSON column)
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestCreateTaskLabelsEmptyList:
|
|
|
|
|
"""create_task with labels=[] — labels is a JSON column, must round-trip as []."""
|
|
|
|
|
|
|
|
|
|
def test_create_task_labels_empty_list_round_trips(self, conn):
|
|
|
|
|
"""create_task(labels=[]) → get_task returns labels as [] list (not None).
|
|
|
|
|
|
|
|
|
|
Existing tests cover update_task(labels=[]). This tests the create path.
|
|
|
|
|
"""
|
|
|
|
|
models.create_task(conn, "PROJ-013", "proj", "Labels test", labels=[])
|
|
|
|
|
task = models.get_task(conn, "PROJ-013")
|
|
|
|
|
assert isinstance(task["labels"], list), (
|
|
|
|
|
f"Expected labels to be list [], got {type(task['labels'])}: {task['labels']!r}. "
|
|
|
|
|
"labels is in _JSON_COLUMNS and must be decoded from '[]' to []."
|
|
|
|
|
)
|
|
|
|
|
assert task["labels"] == [], f"Expected labels=[], got: {task['labels']!r}"
|
|
|
|
|
|
|
|
|
|
def test_create_task_labels_nonempty_list_round_trips(self, conn):
|
|
|
|
|
"""create_task(labels=['bug', 'urgent']) → get_task returns correct list."""
|
|
|
|
|
models.create_task(conn, "PROJ-014", "proj", "Labels nonempty test", labels=["bug", "urgent"])
|
|
|
|
|
task = models.get_task(conn, "PROJ-014")
|
|
|
|
|
assert isinstance(task["labels"], list)
|
|
|
|
|
assert task["labels"] == ["bug", "urgent"]
|
|
|
|
|
|
|
|
|
|
def test_create_task_labels_none_stays_none(self, conn):
|
|
|
|
|
"""create_task without labels → get_task returns labels=None."""
|
|
|
|
|
models.create_task(conn, "PROJ-015", "proj", "No labels task")
|
|
|
|
|
task = models.get_task(conn, "PROJ-015")
|
|
|
|
|
assert task["labels"] is None, f"Expected labels=None, got: {task['labels']!r}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
# KIN-P1-001 deeper revision — multiple JSON columns with empty arrays in one row
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestMultipleJsonColumnsEmptyArraySingleRow:
|
|
|
|
|
"""Stress test: task with multiple JSON columns all set to empty arrays.
|
|
|
|
|
|
|
|
|
|
Verifies that _row_to_dict correctly decodes all JSON columns simultaneously
|
|
|
|
|
without any cross-contamination or decoding errors.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
def test_task_with_multiple_empty_array_json_columns(self, conn):
|
|
|
|
|
"""create_task with brief=[], spec=[], labels=[] → all three fields round-trip as [].
|
|
|
|
|
|
|
|
|
|
Tests that _row_to_dict handles multiple JSON columns with empty arrays
|
|
|
|
|
in a single row without any field being decoded incorrectly.
|
|
|
|
|
"""
|
|
|
|
|
models.create_task(
|
|
|
|
|
conn, "PROJ-016", "proj", "Multi empty arrays",
|
|
|
|
|
brief=[], spec=[], labels=[],
|
|
|
|
|
)
|
|
|
|
|
task = models.get_task(conn, "PROJ-016")
|
|
|
|
|
assert task["brief"] == [], f"Expected brief=[], got: {task['brief']!r}"
|
|
|
|
|
assert task["spec"] == [], f"Expected spec=[], got: {task['spec']!r}"
|
|
|
|
|
assert task["labels"] == [], f"Expected labels=[], got: {task['labels']!r}"
|
|
|
|
|
assert isinstance(task["brief"], list)
|
|
|
|
|
assert isinstance(task["spec"], list)
|
|
|
|
|
assert isinstance(task["labels"], list)
|
|
|
|
|
|
|
|
|
|
def test_task_title_stays_string_when_other_columns_are_empty_arrays(self, conn):
|
|
|
|
|
"""title stays string even when brief=[], spec=[], labels=[] in same row.
|
|
|
|
|
|
|
|
|
|
Regression guard: ensuring that the whitelist fix doesn't affect title
|
|
|
|
|
decoding when many JSON columns in the same row have empty arrays.
|
|
|
|
|
"""
|
|
|
|
|
models.create_task(
|
|
|
|
|
conn, "PROJ-017", "proj", "Normal title",
|
|
|
|
|
brief=[], spec=[], labels=[],
|
|
|
|
|
)
|
|
|
|
|
task = models.get_task(conn, "PROJ-017")
|
|
|
|
|
assert isinstance(task["title"], str), (
|
|
|
|
|
f"Expected title to be str, got {type(task['title'])}: {task['title']!r}"
|
|
|
|
|
)
|
|
|
|
|
assert task["title"] == "Normal title"
|