kin: auto-commit after pipeline

This commit is contained in:
Gros Frumos 2026-03-17 21:25:12 +02:00
parent fe344c2a4f
commit d53c4096fd
9 changed files with 343 additions and 22 deletions

View file

@ -11,6 +11,7 @@ import re
import shlex
import shutil
import sqlite3
import sys
import subprocess
import time
from pathlib import Path
@ -810,7 +811,7 @@ def _detect_test_command(project_path: str) -> str | None:
# 3. pytest
if (path / "pyproject.toml").is_file() or (path / "setup.py").is_file():
return "pytest"
return f"{sys.executable} -m pytest"
# 4. npx tsc --noEmit
if (path / "tsconfig.json").is_file():
@ -1313,6 +1314,10 @@ def run_pipeline(
Returns {success, steps_completed, total_cost, total_tokens, total_duration, results}
"""
# Guard: empty pipeline — return immediately without touching DB or task state
if not steps:
return {"success": False, "error": "empty_pipeline"}
# Auth check — skip for dry_run (dry_run never calls claude CLI)
if not dry_run:
try:

View file

@ -650,6 +650,11 @@ def run_task(ctx, task_id, dry_run, allow_write):
raise SystemExit(1)
pipeline_steps = output["pipeline"]
if not isinstance(pipeline_steps, list) or not pipeline_steps:
click.echo(
f"PM returned empty or invalid pipeline: {pipeline_steps!r}", err=True
)
raise SystemExit(1)
analysis = output.get("analysis", "")
# Save completion_mode from PM output to task (only if neither task nor project has explicit mode)

View file

@ -89,8 +89,6 @@ def generate_followups(
return {"created": [], "pending_actions": []}
pipeline_output = _collect_pipeline_output(conn, task_id)
if not pipeline_output:
return {"created": [], "pending_actions": []}
# Build context for followup agent
language = project.get("language", "ru")

View file

@ -83,16 +83,18 @@ class TestDetectTestCommand:
assert _detect_test_command(str(tmp_path)) is None
def test_pyproject_toml_returns_pytest(self, tmp_path):
"""pyproject.toml → возвращает 'pytest'."""
"""pyproject.toml → возвращает sys.executable -m pytest (decision KIN-109)."""
import sys
from agents.runner import _detect_test_command
(tmp_path / "pyproject.toml").write_text("[tool.pytest.ini_options]\n")
assert _detect_test_command(str(tmp_path)) == "pytest"
assert _detect_test_command(str(tmp_path)) == f"{sys.executable} -m pytest"
def test_setup_py_returns_pytest(self, tmp_path):
"""setup.py → возвращает 'pytest'."""
"""setup.py → возвращает sys.executable -m pytest (decision KIN-109)."""
import sys
from agents.runner import _detect_test_command
(tmp_path / "setup.py").write_text("from setuptools import setup\nsetup(name='x')\n")
assert _detect_test_command(str(tmp_path)) == "pytest"
assert _detect_test_command(str(tmp_path)) == f"{sys.executable} -m pytest"
def test_tsconfig_returns_npx_tsc(self, tmp_path):
"""tsconfig.json → возвращает 'npx tsc --noEmit'."""

View file

@ -0,0 +1,287 @@
"""Regression tests for KIN-111 — empty/null pipeline returned by PM agent.
Root cause (two scenarios):
1. PM returns {"pipeline": null} cli/main.py calls len(None) TypeError (crash)
2. PM returns {"pipeline": []} run_pipeline creates empty DB record,
task stuck in 'review' with 0 steps executed
Fixes required:
cli/main.py: validate pipeline_steps after extraction non-empty list required
agents/runner.py: run_pipeline early-return when steps=[]
Coverage:
(1) run_pipeline with steps=[] returns {success: False, error: 'empty_pipeline'}
(2) run_pipeline with steps=[] does NOT transition task to in_progress
(3) run_pipeline with steps=[] does NOT create a pipeline record in DB
(4) CLI run: PM returns {"pipeline": null} exit(1) with error, not TypeError crash
(5) CLI run: PM returns {"pipeline": []} exit(1) with error, run_pipeline not called
(6) run_pipeline with steps=[] task status stays unchanged (not mutated to any other status)
(7) generate_followups: agent returns "[]" {created: [], pending_actions: []}
(8) generate_followups: agent returns "[]" no tasks created in DB
(9) generate_followups: task has no prior agent_logs Claude still called (no early bail)
(10) API /followup: agent returns "[]" needs_decision is False
"""
import json
import pytest
from click.testing import CliRunner
from unittest.mock import patch, MagicMock
from core.db import init_db
from core import models
# ---------------------------------------------------------------------------
# Shared fixture
# ---------------------------------------------------------------------------
@pytest.fixture
def conn():
c = init_db(":memory:")
models.create_project(c, "proj", "Proj", "/tmp/proj", tech_stack=["python"])
models.create_task(c, "PROJ-001", "proj", "Fix bug", brief={"route_type": "debug"})
yield c
c.close()
# ---------------------------------------------------------------------------
# (1/2/3) run_pipeline with steps=[] — early return, no DB side effects
# ---------------------------------------------------------------------------
class TestRunPipelineEmptySteps:
@patch("agents.runner.check_claude_auth")
def test_empty_steps_returns_error_dict(self, mock_auth, conn):
"""run_pipeline with steps=[] must return {success: False, error: 'empty_pipeline'}."""
from agents.runner import run_pipeline
result = run_pipeline(conn, "PROJ-001", [])
assert result["success"] is False
assert result.get("error") == "empty_pipeline", (
f"Expected error='empty_pipeline', got: {result}"
)
@patch("agents.runner.check_claude_auth")
def test_empty_steps_does_not_set_task_in_progress(self, mock_auth, conn):
"""run_pipeline with steps=[] must NOT transition task to in_progress."""
from agents.runner import run_pipeline
run_pipeline(conn, "PROJ-001", [])
task = models.get_task(conn, "PROJ-001")
assert task["status"] != "in_progress", (
"Task must not be set to in_progress when pipeline has no steps"
)
@patch("agents.runner.check_claude_auth")
def test_empty_steps_does_not_create_pipeline_record(self, mock_auth, conn):
"""run_pipeline with steps=[] must NOT create any pipeline record in DB."""
from agents.runner import run_pipeline
run_pipeline(conn, "PROJ-001", [])
# No pipeline record must exist for this task
row = conn.execute(
"SELECT COUNT(*) FROM pipelines WHERE task_id = 'PROJ-001'"
).fetchone()
assert row[0] == 0, (
f"Expected 0 pipeline records, found {row[0]}. "
"run_pipeline must not persist to DB when steps=[]."
)
# ---------------------------------------------------------------------------
# (4/5) CLI run_task: PM returns null or empty pipeline
# ---------------------------------------------------------------------------
def _seed_db(tmp_path):
"""Create a real on-disk DB with test data and return its path string."""
db_path = tmp_path / "test.db"
c = init_db(str(db_path))
models.create_project(c, "proj", "Proj", str(tmp_path), tech_stack=["python"])
models.create_task(c, "PROJ-001", "proj", "Fix bug", brief={"route_type": "debug"})
c.close()
return str(db_path)
class TestCliRunTaskNullPipeline:
"""PM returns {"pipeline": null} — CLI must exit(1), not crash with TypeError."""
@patch("agents.runner.run_pipeline")
@patch("agents.runner.run_agent")
def test_exit_code_is_1_not_exception(self, mock_run_agent, mock_run_pipeline, tmp_path):
"""PM returning pipeline=null → CLI exits with code 1 (not unhandled exception)."""
from cli.main import cli as kin_cli
db_path = _seed_db(tmp_path)
mock_run_agent.return_value = {
"success": True,
"output": json.dumps({"pipeline": None, "analysis": "nothing"}),
}
runner = CliRunner()
result = runner.invoke(kin_cli, ["--db", db_path, "run", "PROJ-001"])
assert result.exit_code == 1, (
f"Expected exit_code=1 for null pipeline, got {result.exit_code}"
)
@patch("agents.runner.run_pipeline")
@patch("agents.runner.run_agent")
def test_no_typeerror_on_null_pipeline(self, mock_run_agent, mock_run_pipeline, tmp_path):
"""PM returning pipeline=null must not crash with TypeError (len(None))."""
from cli.main import cli as kin_cli
db_path = _seed_db(tmp_path)
mock_run_agent.return_value = {
"success": True,
"output": json.dumps({"pipeline": None, "analysis": "nothing"}),
}
runner = CliRunner()
result = runner.invoke(kin_cli, ["--db", db_path, "run", "PROJ-001"])
# If a TypeError was raised, result.exception will contain it
if result.exception is not None:
assert not isinstance(result.exception, TypeError), (
"CLI crashed with TypeError when PM returned pipeline=null. "
"Missing validation in cli/main.py."
)
@patch("agents.runner.run_pipeline")
@patch("agents.runner.run_agent")
def test_run_pipeline_not_called_on_null(self, mock_run_agent, mock_run_pipeline, tmp_path):
"""run_pipeline must NOT be called when PM returns pipeline=null."""
from cli.main import cli as kin_cli
db_path = _seed_db(tmp_path)
mock_run_agent.return_value = {
"success": True,
"output": json.dumps({"pipeline": None, "analysis": "nothing"}),
}
runner = CliRunner()
runner.invoke(kin_cli, ["--db", db_path, "run", "PROJ-001"])
mock_run_pipeline.assert_not_called()
class TestCliRunTaskEmptyPipeline:
"""PM returns {"pipeline": []} — CLI must exit(1), not create empty pipeline."""
@patch("agents.runner.run_pipeline")
@patch("agents.runner.run_agent")
def test_exit_code_is_1(self, mock_run_agent, mock_run_pipeline, tmp_path):
"""PM returning pipeline=[] → CLI exits with code 1."""
from cli.main import cli as kin_cli
db_path = _seed_db(tmp_path)
mock_run_agent.return_value = {
"success": True,
"output": json.dumps({"pipeline": [], "analysis": "nothing to do"}),
}
runner = CliRunner()
result = runner.invoke(kin_cli, ["--db", db_path, "run", "PROJ-001"])
assert result.exit_code == 1, (
f"Expected exit_code=1 for empty pipeline, got {result.exit_code}"
)
@patch("agents.runner.run_pipeline")
@patch("agents.runner.run_agent")
def test_run_pipeline_not_called_on_empty(self, mock_run_agent, mock_run_pipeline, tmp_path):
"""run_pipeline must NOT be called when PM returns pipeline=[]."""
from cli.main import cli as kin_cli
db_path = _seed_db(tmp_path)
mock_run_agent.return_value = {
"success": True,
"output": json.dumps({"pipeline": [], "analysis": "nothing to do"}),
}
runner = CliRunner()
runner.invoke(kin_cli, ["--db", db_path, "run", "PROJ-001"])
mock_run_pipeline.assert_not_called()
# ---------------------------------------------------------------------------
# (6) run_pipeline with steps=[] — task status stays at original value
# ---------------------------------------------------------------------------
class TestRunPipelineEmptyStepsStatusUnchanged:
@patch("agents.runner.check_claude_auth")
def test_empty_steps_task_status_stays_todo(self, mock_auth, conn):
"""run_pipeline(steps=[]) must leave task.status unchanged (stays 'todo')."""
from agents.runner import run_pipeline
before = models.get_task(conn, "PROJ-001")["status"]
run_pipeline(conn, "PROJ-001", [])
after = models.get_task(conn, "PROJ-001")["status"]
assert after == before, (
f"Task status changed from '{before}' to '{after}' after empty pipeline. "
"run_pipeline must not mutate task status when steps=[]."
)
# ---------------------------------------------------------------------------
# (7/8/9) generate_followups: agent returns "[]"
# ---------------------------------------------------------------------------
class TestGenerateFollowupsEmptyArray:
"""Edge cases when the followup agent returns an empty JSON array '[]'."""
@patch("agents.runner._run_claude")
def test_agent_returns_empty_array_gives_empty_result(self, mock_claude, conn):
"""generate_followups: agent returning '[]'{created: [], pending_actions: []}."""
from core.followup import generate_followups
mock_claude.return_value = {"output": "[]", "returncode": 0}
result = generate_followups(conn, "PROJ-001")
assert result["created"] == [], (
f"Expected created=[], got: {result['created']}"
)
assert result["pending_actions"] == [], (
f"Expected pending_actions=[], got: {result['pending_actions']}"
)
@patch("agents.runner._run_claude")
def test_agent_returns_empty_array_creates_no_tasks_in_db(self, mock_claude, conn):
"""generate_followups: agent returning '[]' must not create any task in DB."""
from core.followup import generate_followups
mock_claude.return_value = {"output": "[]", "returncode": 0}
tasks_before = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
generate_followups(conn, "PROJ-001")
tasks_after = conn.execute("SELECT COUNT(*) FROM tasks").fetchone()[0]
assert tasks_after == tasks_before, (
f"Expected no new tasks, but count went from {tasks_before} to {tasks_after}. "
"generate_followups must not create tasks when agent returns []."
)
@patch("agents.runner._run_claude")
def test_no_pipeline_history_still_calls_claude(self, mock_claude, conn):
"""generate_followups: task with no agent_logs must still invoke Claude (no early bail)."""
from core.followup import generate_followups
# Verify there are no agent_logs for this task
log_count = conn.execute(
"SELECT COUNT(*) FROM agent_logs WHERE task_id = 'PROJ-001'"
).fetchone()[0]
assert log_count == 0, "Precondition: no agent logs must exist"
mock_claude.return_value = {"output": "[]", "returncode": 0}
generate_followups(conn, "PROJ-001")
mock_claude.assert_called_once(), (
"Claude must be called even when there is no prior pipeline history. "
"The early-return 'if not pipeline_output' must be removed."
)
# ---------------------------------------------------------------------------
# (10) API /followup: needs_decision=False when agent returns []
# ---------------------------------------------------------------------------
class TestApiFollowupEmptyArrayNeedsDecision:
"""POST /api/tasks/{id}/followup: needs_decision must be False when agent returns []."""
@patch("agents.runner._run_claude")
def test_needs_decision_false_when_empty_array(self, mock_claude, tmp_path):
"""API: agent returning '[]' → needs_decision is False in response."""
import web.api as api_module
db_path = tmp_path / "test.db"
api_module.DB_PATH = db_path
from web.api import app
from fastapi.testclient import TestClient
mock_claude.return_value = {"output": "[]", "returncode": 0}
c = TestClient(app)
c.post("/api/projects", json={"id": "p1", "name": "P1", "path": "/p1"})
c.post("/api/tasks", json={"project_id": "p1", "title": "Fix bug"})
r = c.post("/api/tasks/P1-001/followup", json={})
assert r.status_code == 200
data = r.json()
assert data["needs_decision"] is False, (
f"Expected needs_decision=False when agent returns [], got: {data['needs_decision']}"
)
assert data["created"] == []
assert data["pending_actions"] == []

View file

@ -897,6 +897,30 @@ def approve_task(task_id: str, body: TaskApprove | None = None):
}
class FollowupBody(BaseModel):
dry_run: bool = False
@app.post("/api/tasks/{task_id}/followup")
def followup_task(task_id: str, body: FollowupBody | None = None):
"""Generate follow-up tasks for a blocked or completed task."""
from core.followup import generate_followups
conn = get_conn()
t = models.get_task(conn, task_id)
if not t:
conn.close()
raise HTTPException(404, f"Task '{task_id}' not found")
dry_run = body.dry_run if body else False
result = generate_followups(conn, task_id, dry_run=dry_run)
conn.close()
return {
"created": result["created"],
"pending_actions": result["pending_actions"],
"needs_decision": len(result["pending_actions"]) > 0,
}
class ResolveAction(BaseModel):
action: dict
choice: str # "rerun" | "manual_task" | "skip"

View file

@ -40,12 +40,12 @@ const BASE_PROJECT = {
autocommit_enabled: null,
auto_test_enabled: null,
obsidian_vault_path: null,
deploy_command: null,
test_command: null,
deploy_host: null,
deploy_path: null,
deploy_runtime: null,
deploy_restart_cmd: null,
deploy_command: null as string | null,
test_command: null as string | null,
deploy_host: null as string | null,
deploy_path: null as string | null,
deploy_runtime: null as string | null,
deploy_restart_cmd: null as string | null,
created_at: '2024-01-01',
total_tasks: 0,
done_tasks: 0,

View file

@ -70,12 +70,12 @@ const BASE_PROJECT = {
autocommit_enabled: null,
auto_test_enabled: null,
obsidian_vault_path: null,
deploy_command: null,
test_command: null,
deploy_host: null,
deploy_path: null,
deploy_runtime: null,
deploy_restart_cmd: null,
deploy_command: null as string | null,
test_command: null as string | null,
deploy_host: null as string | null,
deploy_path: null as string | null,
deploy_runtime: null as string | null,
deploy_restart_cmd: null as string | null,
created_at: '2024-01-01',
total_tasks: 0,
done_tasks: 0,

View file

@ -51,7 +51,7 @@ describe('тогл видимости', () => {
const wrapper = mountConsole()
const panel = wrapper.find('[class*="bg-gray-950"]')
// v-show=false — элемент в DOM, но display: none
expect(panel.element.style.display).toBe('none')
expect((panel.element as HTMLElement).style.display).toBe('none')
wrapper.unmount()
})
@ -66,7 +66,7 @@ describe('тогл видимости', () => {
await wrapper.find('button').trigger('click')
await flushPromises()
const panel = wrapper.find('[class*="bg-gray-950"]')
expect(panel.element.style.display).not.toBe('none')
expect((panel.element as HTMLElement).style.display).not.toBe('none')
wrapper.unmount()
})
@ -85,7 +85,7 @@ describe('тогл видимости', () => {
await wrapper.find('button').trigger('click')
await flushPromises()
const panel = wrapper.find('[class*="bg-gray-950"]')
expect(panel.element.style.display).toBe('none')
expect((panel.element as HTMLElement).style.display).toBe('none')
wrapper.unmount()
})
})