kin/tests/test_models.py

524 lines
20 KiB
Python
Raw Normal View History

"""Tests for core/models.py — all functions, in-memory SQLite."""
import re
import pytest
from core.db import init_db
from core import models
from core.models import TASK_CATEGORIES
@pytest.fixture
def conn():
"""Fresh in-memory DB for each test."""
c = init_db(db_path=":memory:")
yield c
c.close()
# -- Projects --
def test_create_and_get_project(conn):
p = models.create_project(conn, "vdol", "В долю поперёк", "~/projects/vdolipoperek",
tech_stack=["vue3", "nuxt"])
assert p["id"] == "vdol"
assert p["tech_stack"] == ["vue3", "nuxt"]
assert p["status"] == "active"
fetched = models.get_project(conn, "vdol")
assert fetched["name"] == "В долю поперёк"
def test_get_project_not_found(conn):
assert models.get_project(conn, "nope") is None
def test_list_projects_filter(conn):
models.create_project(conn, "a", "A", "/a", status="active")
models.create_project(conn, "b", "B", "/b", status="paused")
models.create_project(conn, "c", "C", "/c", status="active")
assert len(models.list_projects(conn)) == 3
assert len(models.list_projects(conn, status="active")) == 2
assert len(models.list_projects(conn, status="paused")) == 1
def test_update_project(conn):
models.create_project(conn, "x", "X", "/x", priority=5)
updated = models.update_project(conn, "x", priority=1, status="maintenance")
assert updated["priority"] == 1
assert updated["status"] == "maintenance"
def test_update_project_tech_stack_json(conn):
models.create_project(conn, "x", "X", "/x", tech_stack=["python"])
updated = models.update_project(conn, "x", tech_stack=["python", "fastapi"])
assert updated["tech_stack"] == ["python", "fastapi"]
# -- project_type and SSH fields (KIN-071) --
def test_create_operations_project(conn):
"""KIN-071: operations project stores SSH fields."""
p = models.create_project(
conn, "srv1", "My Server", "",
project_type="operations",
ssh_host="10.0.0.1",
ssh_user="root",
ssh_key_path="~/.ssh/id_rsa",
ssh_proxy_jump="jumpt",
)
assert p["project_type"] == "operations"
assert p["ssh_host"] == "10.0.0.1"
assert p["ssh_user"] == "root"
assert p["ssh_key_path"] == "~/.ssh/id_rsa"
assert p["ssh_proxy_jump"] == "jumpt"
def test_create_development_project_defaults(conn):
"""KIN-071: development is default project_type."""
p = models.create_project(conn, "devp", "Dev Project", "/path")
assert p["project_type"] == "development"
assert p["ssh_host"] is None
def test_update_project_ssh_fields(conn):
"""KIN-071: update_project can set SSH fields."""
models.create_project(conn, "srv2", "Server 2", "", project_type="operations")
updated = models.update_project(conn, "srv2", ssh_host="192.168.1.1", ssh_user="pelmen")
assert updated["ssh_host"] == "192.168.1.1"
assert updated["ssh_user"] == "pelmen"
# ---------------------------------------------------------------------------
# KIN-ARCH-003 — path nullable для operations-проектов
# Исправляет баг: workaround с пустой строкой ("") для operations-проектов
# ---------------------------------------------------------------------------
def test_kin_arch_003_operations_project_without_path_stores_null(conn):
"""KIN-ARCH-003: operations-проект без path сохраняется с path=NULL, не пустой строкой.
До фикса: workaround передавать path='' чтобы обойти NOT NULL constraint.
После фикса: path=None (NULL в БД) допустим для operations-проектов.
"""
p = models.create_project(
conn, "ops_null", "Ops Null Path",
project_type="operations",
ssh_host="10.0.0.1",
)
assert p["path"] is None, (
"KIN-ARCH-003 регрессия: path должен быть NULL, а не пустой строкой"
)
def test_kin_arch_003_check_constraint_rejects_null_path_for_development(conn):
"""KIN-ARCH-003: CHECK constraint (path IS NOT NULL OR project_type='operations')
отклоняет path=NULL для development-проектов."""
import sqlite3 as _sqlite3
with pytest.raises(_sqlite3.IntegrityError):
models.create_project(
conn, "dev_no_path", "Dev No Path",
path=None, project_type="development",
)
# -- validate_completion_mode (KIN-063) --
def test_validate_completion_mode_valid_auto_complete():
"""validate_completion_mode принимает 'auto_complete'."""
assert models.validate_completion_mode("auto_complete") == "auto_complete"
def test_validate_completion_mode_valid_review():
"""validate_completion_mode принимает 'review'."""
assert models.validate_completion_mode("review") == "review"
def test_validate_completion_mode_invalid_fallback():
"""validate_completion_mode возвращает 'review' для невалидных значений (фоллбэк)."""
assert models.validate_completion_mode("auto") == "review"
assert models.validate_completion_mode("") == "review"
assert models.validate_completion_mode("unknown") == "review"
# -- get_effective_mode (KIN-063) --
def test_get_effective_mode_task_overrides_project(conn):
"""Task execution_mode имеет приоритет над project execution_mode."""
models.create_project(conn, "p1", "P1", "/p1", execution_mode="review")
models.create_task(conn, "P1-001", "p1", "Task", execution_mode="auto_complete")
mode = models.get_effective_mode(conn, "p1", "P1-001")
assert mode == "auto_complete"
def test_get_effective_mode_falls_back_to_project(conn):
"""Если задача без execution_mode — применяется project execution_mode."""
models.create_project(conn, "p1", "P1", "/p1", execution_mode="auto_complete")
models.create_task(conn, "P1-001", "p1", "Task") # execution_mode=None
mode = models.get_effective_mode(conn, "p1", "P1-001")
assert mode == "auto_complete"
def test_get_effective_mode_project_review_overrides_default(conn):
"""Project execution_mode='review' + task без override → возвращает 'review'.
Сценарий: PM хотел auto_complete, но проект настроен на review человеком.
get_effective_mode должен вернуть project-level 'review'.
"""
models.create_project(conn, "p1", "P1", "/p1", execution_mode="review")
models.create_task(conn, "P1-001", "p1", "Task") # нет task-level override
mode = models.get_effective_mode(conn, "p1", "P1-001")
assert mode == "review"
# -- Tasks --
def test_create_and_get_task(conn):
models.create_project(conn, "p1", "P1", "/p1")
t = models.create_task(conn, "P1-001", "p1", "Fix bug",
brief={"summary": "broken login"})
assert t["id"] == "P1-001"
assert t["brief"] == {"summary": "broken login"}
assert t["status"] == "pending"
def test_list_tasks_filters(conn):
models.create_project(conn, "p1", "P1", "/p1")
models.create_project(conn, "p2", "P2", "/p2")
models.create_task(conn, "P1-001", "p1", "Task A", status="pending")
models.create_task(conn, "P1-002", "p1", "Task B", status="done")
models.create_task(conn, "P2-001", "p2", "Task C", status="pending")
assert len(models.list_tasks(conn)) == 3
assert len(models.list_tasks(conn, project_id="p1")) == 2
assert len(models.list_tasks(conn, status="pending")) == 2
assert len(models.list_tasks(conn, project_id="p1", status="done")) == 1
def test_update_task(conn):
models.create_project(conn, "p1", "P1", "/p1")
models.create_task(conn, "P1-001", "p1", "Task")
updated = models.update_task(conn, "P1-001", status="in_progress",
spec={"steps": [1, 2, 3]})
assert updated["status"] == "in_progress"
assert updated["spec"] == {"steps": [1, 2, 3]}
assert updated["updated_at"] is not None
def test_subtask(conn):
models.create_project(conn, "p1", "P1", "/p1")
models.create_task(conn, "P1-001", "p1", "Parent")
child = models.create_task(conn, "P1-001a", "p1", "Child",
parent_task_id="P1-001")
assert child["parent_task_id"] == "P1-001"
# -- Decisions --
def test_add_and_get_decisions(conn):
models.create_project(conn, "p1", "P1", "/p1")
d = models.add_decision(conn, "p1", "gotcha", "iOS Safari bottom sheet",
"position:fixed breaks on iOS Safari",
category="ui", tags=["ios-safari", "css"])
assert d["type"] == "gotcha"
assert d["tags"] == ["ios-safari", "css"]
results = models.get_decisions(conn, "p1")
assert len(results) == 1
def test_decisions_filter_by_category(conn):
models.create_project(conn, "p1", "P1", "/p1")
models.add_decision(conn, "p1", "decision", "Use WAL", "perf",
category="architecture")
models.add_decision(conn, "p1", "gotcha", "Safari bug", "css",
category="ui")
assert len(models.get_decisions(conn, "p1", category="ui")) == 1
def test_decisions_filter_by_tags(conn):
models.create_project(conn, "p1", "P1", "/p1")
models.add_decision(conn, "p1", "gotcha", "Bug A", "desc",
tags=["safari", "css"])
models.add_decision(conn, "p1", "gotcha", "Bug B", "desc",
tags=["chrome", "js"])
models.add_decision(conn, "p1", "gotcha", "Bug C", "desc",
tags=["safari", "js"])
assert len(models.get_decisions(conn, "p1", tags=["safari"])) == 2
assert len(models.get_decisions(conn, "p1", tags=["js"])) == 2
assert len(models.get_decisions(conn, "p1", tags=["css"])) == 1
def test_decisions_filter_by_types(conn):
models.create_project(conn, "p1", "P1", "/p1")
models.add_decision(conn, "p1", "decision", "A", "a")
models.add_decision(conn, "p1", "gotcha", "B", "b")
models.add_decision(conn, "p1", "workaround", "C", "c")
assert len(models.get_decisions(conn, "p1", types=["gotcha", "workaround"])) == 2
def test_decisions_limit(conn):
models.create_project(conn, "p1", "P1", "/p1")
for i in range(10):
models.add_decision(conn, "p1", "decision", f"D{i}", f"desc{i}")
assert len(models.get_decisions(conn, "p1", limit=3)) == 3
# -- Modules --
def test_add_and_get_modules(conn):
models.create_project(conn, "p1", "P1", "/p1")
m = models.add_module(conn, "p1", "search", "frontend", "src/search/",
description="Search UI", dependencies=["auth"])
assert m["name"] == "search"
assert m["dependencies"] == ["auth"]
mods = models.get_modules(conn, "p1")
assert len(mods) == 1
# -- Agent Logs --
def test_log_agent_run(conn):
models.create_project(conn, "p1", "P1", "/p1")
log = models.log_agent_run(conn, "p1", "developer", "implement",
tokens_used=5000, model="sonnet",
cost_usd=0.015, duration_seconds=45)
assert log["agent_role"] == "developer"
assert log["cost_usd"] == 0.015
assert log["success"] == 1 # SQLite boolean
# -- Pipelines --
def test_create_and_update_pipeline(conn):
models.create_project(conn, "p1", "P1", "/p1")
models.create_task(conn, "P1-001", "p1", "Task")
pipe = models.create_pipeline(conn, "P1-001", "p1", "feature",
[{"step": "architect"}, {"step": "dev"}])
assert pipe["status"] == "running"
assert pipe["steps"] == [{"step": "architect"}, {"step": "dev"}]
updated = models.update_pipeline(conn, pipe["id"], status="completed",
total_cost_usd=0.05, total_tokens=10000)
assert updated["status"] == "completed"
assert updated["completed_at"] is not None
# -- Support --
def test_create_and_list_tickets(conn):
models.create_project(conn, "p1", "P1", "/p1")
t = models.create_ticket(conn, "p1", "telegram_bot", "Не работает поиск",
client_id="tg:12345", classification="bug")
assert t["source"] == "telegram_bot"
assert t["status"] == "new"
tickets = models.list_tickets(conn, project_id="p1")
assert len(tickets) == 1
assert len(models.list_tickets(conn, status="resolved")) == 0
# -- Statistics --
def test_project_summary(conn):
models.create_project(conn, "p1", "P1", "/p1")
models.create_task(conn, "P1-001", "p1", "A", status="done")
models.create_task(conn, "P1-002", "p1", "B", status="in_progress")
models.create_task(conn, "P1-003", "p1", "C", status="blocked")
summary = models.get_project_summary(conn)
assert len(summary) == 1
s = summary[0]
assert s["total_tasks"] == 3
assert s["done_tasks"] == 1
assert s["active_tasks"] == 1
assert s["blocked_tasks"] == 1
def test_cost_summary(conn):
models.create_project(conn, "p1", "P1", "/p1")
models.log_agent_run(conn, "p1", "dev", "implement",
cost_usd=0.10, tokens_used=5000)
models.log_agent_run(conn, "p1", "reviewer", "review",
cost_usd=0.05, tokens_used=2000)
costs = models.get_cost_summary(conn, days=1)
assert len(costs) == 1
assert costs[0]["total_cost_usd"] == pytest.approx(0.15)
assert costs[0]["total_tokens"] == 7000
assert costs[0]["runs"] == 2
def test_cost_summary_empty(conn):
models.create_project(conn, "p1", "P1", "/p1")
assert models.get_cost_summary(conn, days=7) == []
# -- add_decision_if_new --
def test_add_decision_if_new_adds_new_decision(conn):
models.create_project(conn, "p1", "P1", "/p1")
d = models.add_decision_if_new(conn, "p1", "gotcha", "Use WAL mode", "description")
assert d is not None
assert d["title"] == "Use WAL mode"
assert d["type"] == "gotcha"
def test_add_decision_if_new_skips_exact_duplicate(conn):
models.create_project(conn, "p1", "P1", "/p1")
models.add_decision(conn, "p1", "gotcha", "Use WAL mode", "desc1")
result = models.add_decision_if_new(conn, "p1", "gotcha", "Use WAL mode", "desc2")
assert result is None
# Existing decision not duplicated
assert len(models.get_decisions(conn, "p1")) == 1
def test_add_decision_if_new_skips_case_insensitive_duplicate(conn):
models.create_project(conn, "p1", "P1", "/p1")
models.add_decision(conn, "p1", "decision", "Use UUID for task IDs", "desc")
result = models.add_decision_if_new(conn, "p1", "decision", "use uuid for task ids", "other desc")
assert result is None
assert len(models.get_decisions(conn, "p1")) == 1
def test_add_decision_if_new_allows_same_title_different_type(conn):
models.create_project(conn, "p1", "P1", "/p1")
models.add_decision(conn, "p1", "gotcha", "SQLite WAL", "desc")
result = models.add_decision_if_new(conn, "p1", "convention", "SQLite WAL", "other desc")
assert result is not None
assert len(models.get_decisions(conn, "p1")) == 2
def test_add_decision_if_new_skips_whitespace_duplicate(conn):
models.create_project(conn, "p1", "P1", "/p1")
models.add_decision(conn, "p1", "convention", "Run tests after each change", "desc")
result = models.add_decision_if_new(conn, "p1", "convention", " Run tests after each change ", "desc2")
assert result is None
assert len(models.get_decisions(conn, "p1")) == 1
# -- next_task_id (KIN-OBS-009) --
def test_next_task_id_with_category_first(conn):
"""Первая задача с category='SEC''VDOL-SEC-001'."""
models.create_project(conn, "vdol", "VDOL", "/vdol")
task_id = models.next_task_id(conn, "vdol", category="SEC")
assert task_id == "VDOL-SEC-001"
def test_next_task_id_with_category_increments(conn):
"""Вторая задача с category='SEC''VDOL-SEC-002'."""
models.create_project(conn, "vdol", "VDOL", "/vdol")
models.create_task(conn, "VDOL-SEC-001", "vdol", "Task 1", category="SEC")
task_id = models.next_task_id(conn, "vdol", category="SEC")
assert task_id == "VDOL-SEC-002"
def test_next_task_id_category_counters_independent(conn):
"""Счётчики категорий независимы: SEC-002 не влияет на UI-001."""
models.create_project(conn, "vdol", "VDOL", "/vdol")
models.create_task(conn, "VDOL-SEC-001", "vdol", "Sec Task 1", category="SEC")
models.create_task(conn, "VDOL-SEC-002", "vdol", "Sec Task 2", category="SEC")
task_id = models.next_task_id(conn, "vdol", category="UI")
assert task_id == "VDOL-UI-001"
def test_next_task_id_without_category_backward_compat(conn):
"""Задача без category → 'VDOL-001' (backward compat)."""
models.create_project(conn, "vdol", "VDOL", "/vdol")
task_id = models.next_task_id(conn, "vdol")
assert task_id == "VDOL-001"
def test_next_task_id_mixed_formats_no_collision(conn):
"""Смешанный проект: счётчики старого и нового форматов не пересекаются."""
models.create_project(conn, "kin", "KIN", "/kin")
models.create_task(conn, "KIN-001", "kin", "Old style task")
models.create_task(conn, "KIN-002", "kin", "Old style task 2")
# Новый формат с категорией не мешает старому
cat_id = models.next_task_id(conn, "kin", category="OBS")
assert cat_id == "KIN-OBS-001"
# Старый формат не мешает новому
old_id = models.next_task_id(conn, "kin")
assert old_id == "KIN-003"
# -- Obsidian sync regex (KIN-OBS-009, решение #75) --
_OBSIDIAN_TASK_PATTERN = re.compile(
r"^[-*]\s+\[([xX ])\]\s+([A-Z][A-Z0-9]*-(?:[A-Z][A-Z0-9]*-)?\d+)\s+(.+)$"
)
def test_obsidian_regex_matches_old_format():
"""Старый формат KIN-001 матчится."""
m = _OBSIDIAN_TASK_PATTERN.match("- [x] KIN-001 Fix login bug")
assert m is not None
assert m.group(2) == "KIN-001"
def test_obsidian_regex_matches_new_format():
"""Новый формат VDOL-SEC-001 матчится."""
m = _OBSIDIAN_TASK_PATTERN.match("- [ ] VDOL-SEC-001 Security audit")
assert m is not None
assert m.group(2) == "VDOL-SEC-001"
def test_obsidian_regex_matches_obs_format():
"""Формат KIN-OBS-009 матчится (проверяем задачу этой фичи)."""
m = _OBSIDIAN_TASK_PATTERN.match("* [X] KIN-OBS-009 Task ID по категориям")
assert m is not None
assert m.group(2) == "KIN-OBS-009"
def test_obsidian_regex_no_match_lowercase():
"""Нижний регистр не матчится."""
assert _OBSIDIAN_TASK_PATTERN.match("- [x] proj-001 lowercase id") is None
def test_obsidian_regex_no_match_numeric_prefix():
"""Числовой префикс не матчится."""
assert _OBSIDIAN_TASK_PATTERN.match("- [x] 123-abc invalid format") is None
def test_obsidian_regex_done_state(conn):
"""Статус done/pending корректно извлекается."""
m_done = _OBSIDIAN_TASK_PATTERN.match("- [x] KIN-UI-003 Done task")
m_pending = _OBSIDIAN_TASK_PATTERN.match("- [ ] KIN-UI-004 Pending task")
assert m_done.group(1) == "x"
assert m_pending.group(1) == " "
# -- next_task_id для всех 12 категорий (KIN-OBS-009) --
@pytest.mark.parametrize("cat", TASK_CATEGORIES)
def test_next_task_id_all_categories_generate_correct_format(conn, cat):
"""next_task_id генерирует ID формата PROJ-CAT-001 для каждой из 12 категорий."""
models.create_project(conn, "vdol", "VDOL", "/vdol")
task_id = models.next_task_id(conn, "vdol", category=cat)
assert task_id == f"VDOL-{cat}-001"
# -- update_task category не ломает brief (KIN-OBS-009, решение #74) --
def test_update_task_category_preserves_brief(conn):
"""update_task(category=...) не перетирает существующее поле brief."""
models.create_project(conn, "p1", "P1", "/p1")
models.create_task(conn, "P1-001", "p1", "Task", brief={"summary": "important context"})
updated = models.update_task(conn, "P1-001", category="SEC")
assert updated["category"] == "SEC"
assert updated["brief"] == {"summary": "important context"}
def test_update_task_category_preserves_status_and_priority(conn):
"""update_task(category=...) не меняет остальные поля задачи."""
models.create_project(conn, "p1", "P1", "/p1")
models.create_task(conn, "P1-001", "p1", "Task", status="in_progress", priority=3)
updated = models.update_task(conn, "P1-001", category="UI")
assert updated["category"] == "UI"
assert updated["status"] == "in_progress"
assert updated["priority"] == 3