kin/tests/test_models.py

332 lines
12 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

"""Tests for core/models.py — all functions, in-memory SQLite."""
import pytest
from core.db import init_db
from core import models
@pytest.fixture
def conn():
"""Fresh in-memory DB for each test."""
c = init_db(db_path=":memory:")
yield c
c.close()
# -- Projects --
def test_create_and_get_project(conn):
p = models.create_project(conn, "vdol", "В долю поперёк", "~/projects/vdolipoperek",
tech_stack=["vue3", "nuxt"])
assert p["id"] == "vdol"
assert p["tech_stack"] == ["vue3", "nuxt"]
assert p["status"] == "active"
fetched = models.get_project(conn, "vdol")
assert fetched["name"] == "В долю поперёк"
def test_get_project_not_found(conn):
assert models.get_project(conn, "nope") is None
def test_list_projects_filter(conn):
models.create_project(conn, "a", "A", "/a", status="active")
models.create_project(conn, "b", "B", "/b", status="paused")
models.create_project(conn, "c", "C", "/c", status="active")
assert len(models.list_projects(conn)) == 3
assert len(models.list_projects(conn, status="active")) == 2
assert len(models.list_projects(conn, status="paused")) == 1
def test_update_project(conn):
models.create_project(conn, "x", "X", "/x", priority=5)
updated = models.update_project(conn, "x", priority=1, status="maintenance")
assert updated["priority"] == 1
assert updated["status"] == "maintenance"
def test_update_project_tech_stack_json(conn):
models.create_project(conn, "x", "X", "/x", tech_stack=["python"])
updated = models.update_project(conn, "x", tech_stack=["python", "fastapi"])
assert updated["tech_stack"] == ["python", "fastapi"]
# -- validate_completion_mode (KIN-063) --
def test_validate_completion_mode_valid_auto_complete():
"""validate_completion_mode принимает 'auto_complete'."""
assert models.validate_completion_mode("auto_complete") == "auto_complete"
def test_validate_completion_mode_valid_review():
"""validate_completion_mode принимает 'review'."""
assert models.validate_completion_mode("review") == "review"
def test_validate_completion_mode_invalid_fallback():
"""validate_completion_mode возвращает 'review' для невалидных значений (фоллбэк)."""
assert models.validate_completion_mode("auto") == "review"
assert models.validate_completion_mode("") == "review"
assert models.validate_completion_mode("unknown") == "review"
# -- get_effective_mode (KIN-063) --
def test_get_effective_mode_task_overrides_project(conn):
"""Task execution_mode имеет приоритет над project execution_mode."""
models.create_project(conn, "p1", "P1", "/p1", execution_mode="review")
models.create_task(conn, "P1-001", "p1", "Task", execution_mode="auto_complete")
mode = models.get_effective_mode(conn, "p1", "P1-001")
assert mode == "auto_complete"
def test_get_effective_mode_falls_back_to_project(conn):
"""Если задача без execution_mode — применяется project execution_mode."""
models.create_project(conn, "p1", "P1", "/p1", execution_mode="auto_complete")
models.create_task(conn, "P1-001", "p1", "Task") # execution_mode=None
mode = models.get_effective_mode(conn, "p1", "P1-001")
assert mode == "auto_complete"
def test_get_effective_mode_project_review_overrides_default(conn):
"""Project execution_mode='review' + task без override → возвращает 'review'.
Сценарий: PM хотел auto_complete, но проект настроен на review человеком.
get_effective_mode должен вернуть project-level 'review'.
"""
models.create_project(conn, "p1", "P1", "/p1", execution_mode="review")
models.create_task(conn, "P1-001", "p1", "Task") # нет task-level override
mode = models.get_effective_mode(conn, "p1", "P1-001")
assert mode == "review"
# -- Tasks --
def test_create_and_get_task(conn):
models.create_project(conn, "p1", "P1", "/p1")
t = models.create_task(conn, "P1-001", "p1", "Fix bug",
brief={"summary": "broken login"})
assert t["id"] == "P1-001"
assert t["brief"] == {"summary": "broken login"}
assert t["status"] == "pending"
def test_list_tasks_filters(conn):
models.create_project(conn, "p1", "P1", "/p1")
models.create_project(conn, "p2", "P2", "/p2")
models.create_task(conn, "P1-001", "p1", "Task A", status="pending")
models.create_task(conn, "P1-002", "p1", "Task B", status="done")
models.create_task(conn, "P2-001", "p2", "Task C", status="pending")
assert len(models.list_tasks(conn)) == 3
assert len(models.list_tasks(conn, project_id="p1")) == 2
assert len(models.list_tasks(conn, status="pending")) == 2
assert len(models.list_tasks(conn, project_id="p1", status="done")) == 1
def test_update_task(conn):
models.create_project(conn, "p1", "P1", "/p1")
models.create_task(conn, "P1-001", "p1", "Task")
updated = models.update_task(conn, "P1-001", status="in_progress",
spec={"steps": [1, 2, 3]})
assert updated["status"] == "in_progress"
assert updated["spec"] == {"steps": [1, 2, 3]}
assert updated["updated_at"] is not None
def test_subtask(conn):
models.create_project(conn, "p1", "P1", "/p1")
models.create_task(conn, "P1-001", "p1", "Parent")
child = models.create_task(conn, "P1-001a", "p1", "Child",
parent_task_id="P1-001")
assert child["parent_task_id"] == "P1-001"
# -- Decisions --
def test_add_and_get_decisions(conn):
models.create_project(conn, "p1", "P1", "/p1")
d = models.add_decision(conn, "p1", "gotcha", "iOS Safari bottom sheet",
"position:fixed breaks on iOS Safari",
category="ui", tags=["ios-safari", "css"])
assert d["type"] == "gotcha"
assert d["tags"] == ["ios-safari", "css"]
results = models.get_decisions(conn, "p1")
assert len(results) == 1
def test_decisions_filter_by_category(conn):
models.create_project(conn, "p1", "P1", "/p1")
models.add_decision(conn, "p1", "decision", "Use WAL", "perf",
category="architecture")
models.add_decision(conn, "p1", "gotcha", "Safari bug", "css",
category="ui")
assert len(models.get_decisions(conn, "p1", category="ui")) == 1
def test_decisions_filter_by_tags(conn):
models.create_project(conn, "p1", "P1", "/p1")
models.add_decision(conn, "p1", "gotcha", "Bug A", "desc",
tags=["safari", "css"])
models.add_decision(conn, "p1", "gotcha", "Bug B", "desc",
tags=["chrome", "js"])
models.add_decision(conn, "p1", "gotcha", "Bug C", "desc",
tags=["safari", "js"])
assert len(models.get_decisions(conn, "p1", tags=["safari"])) == 2
assert len(models.get_decisions(conn, "p1", tags=["js"])) == 2
assert len(models.get_decisions(conn, "p1", tags=["css"])) == 1
def test_decisions_filter_by_types(conn):
models.create_project(conn, "p1", "P1", "/p1")
models.add_decision(conn, "p1", "decision", "A", "a")
models.add_decision(conn, "p1", "gotcha", "B", "b")
models.add_decision(conn, "p1", "workaround", "C", "c")
assert len(models.get_decisions(conn, "p1", types=["gotcha", "workaround"])) == 2
def test_decisions_limit(conn):
models.create_project(conn, "p1", "P1", "/p1")
for i in range(10):
models.add_decision(conn, "p1", "decision", f"D{i}", f"desc{i}")
assert len(models.get_decisions(conn, "p1", limit=3)) == 3
# -- Modules --
def test_add_and_get_modules(conn):
models.create_project(conn, "p1", "P1", "/p1")
m = models.add_module(conn, "p1", "search", "frontend", "src/search/",
description="Search UI", dependencies=["auth"])
assert m["name"] == "search"
assert m["dependencies"] == ["auth"]
mods = models.get_modules(conn, "p1")
assert len(mods) == 1
# -- Agent Logs --
def test_log_agent_run(conn):
models.create_project(conn, "p1", "P1", "/p1")
log = models.log_agent_run(conn, "p1", "developer", "implement",
tokens_used=5000, model="sonnet",
cost_usd=0.015, duration_seconds=45)
assert log["agent_role"] == "developer"
assert log["cost_usd"] == 0.015
assert log["success"] == 1 # SQLite boolean
# -- Pipelines --
def test_create_and_update_pipeline(conn):
models.create_project(conn, "p1", "P1", "/p1")
models.create_task(conn, "P1-001", "p1", "Task")
pipe = models.create_pipeline(conn, "P1-001", "p1", "feature",
[{"step": "architect"}, {"step": "dev"}])
assert pipe["status"] == "running"
assert pipe["steps"] == [{"step": "architect"}, {"step": "dev"}]
updated = models.update_pipeline(conn, pipe["id"], status="completed",
total_cost_usd=0.05, total_tokens=10000)
assert updated["status"] == "completed"
assert updated["completed_at"] is not None
# -- Support --
def test_create_and_list_tickets(conn):
models.create_project(conn, "p1", "P1", "/p1")
t = models.create_ticket(conn, "p1", "telegram_bot", "Не работает поиск",
client_id="tg:12345", classification="bug")
assert t["source"] == "telegram_bot"
assert t["status"] == "new"
tickets = models.list_tickets(conn, project_id="p1")
assert len(tickets) == 1
assert len(models.list_tickets(conn, status="resolved")) == 0
# -- Statistics --
def test_project_summary(conn):
models.create_project(conn, "p1", "P1", "/p1")
models.create_task(conn, "P1-001", "p1", "A", status="done")
models.create_task(conn, "P1-002", "p1", "B", status="in_progress")
models.create_task(conn, "P1-003", "p1", "C", status="blocked")
summary = models.get_project_summary(conn)
assert len(summary) == 1
s = summary[0]
assert s["total_tasks"] == 3
assert s["done_tasks"] == 1
assert s["active_tasks"] == 1
assert s["blocked_tasks"] == 1
def test_cost_summary(conn):
models.create_project(conn, "p1", "P1", "/p1")
models.log_agent_run(conn, "p1", "dev", "implement",
cost_usd=0.10, tokens_used=5000)
models.log_agent_run(conn, "p1", "reviewer", "review",
cost_usd=0.05, tokens_used=2000)
costs = models.get_cost_summary(conn, days=1)
assert len(costs) == 1
assert costs[0]["total_cost_usd"] == pytest.approx(0.15)
assert costs[0]["total_tokens"] == 7000
assert costs[0]["runs"] == 2
def test_cost_summary_empty(conn):
models.create_project(conn, "p1", "P1", "/p1")
assert models.get_cost_summary(conn, days=7) == []
# -- add_decision_if_new --
def test_add_decision_if_new_adds_new_decision(conn):
models.create_project(conn, "p1", "P1", "/p1")
d = models.add_decision_if_new(conn, "p1", "gotcha", "Use WAL mode", "description")
assert d is not None
assert d["title"] == "Use WAL mode"
assert d["type"] == "gotcha"
def test_add_decision_if_new_skips_exact_duplicate(conn):
models.create_project(conn, "p1", "P1", "/p1")
models.add_decision(conn, "p1", "gotcha", "Use WAL mode", "desc1")
result = models.add_decision_if_new(conn, "p1", "gotcha", "Use WAL mode", "desc2")
assert result is None
# Existing decision not duplicated
assert len(models.get_decisions(conn, "p1")) == 1
def test_add_decision_if_new_skips_case_insensitive_duplicate(conn):
models.create_project(conn, "p1", "P1", "/p1")
models.add_decision(conn, "p1", "decision", "Use UUID for task IDs", "desc")
result = models.add_decision_if_new(conn, "p1", "decision", "use uuid for task ids", "other desc")
assert result is None
assert len(models.get_decisions(conn, "p1")) == 1
def test_add_decision_if_new_allows_same_title_different_type(conn):
models.create_project(conn, "p1", "P1", "/p1")
models.add_decision(conn, "p1", "gotcha", "SQLite WAL", "desc")
result = models.add_decision_if_new(conn, "p1", "convention", "SQLite WAL", "other desc")
assert result is not None
assert len(models.get_decisions(conn, "p1")) == 2
def test_add_decision_if_new_skips_whitespace_duplicate(conn):
models.create_project(conn, "p1", "P1", "/p1")
models.add_decision(conn, "p1", "convention", "Run tests after each change", "desc")
result = models.add_decision_if_new(conn, "p1", "convention", " Run tests after each change ", "desc2")
assert result is None
assert len(models.get_decisions(conn, "p1")) == 1