diff --git a/.gitignore b/.gitignore index ab3e8ce..d06cd30 100644 --- a/.gitignore +++ b/.gitignore @@ -162,3 +162,10 @@ cython_debug/ # option (not recommended) you can uncomment the following to ignore the entire idea folder. #.idea/ +# Kin +kin.db +kin.db-wal +kin.db-shm +PROGRESS.md +node_modules/ +web/frontend/dist/ diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000..b5ef99b --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,19 @@ +# Kin — мультиагентный оркестратор проектов + +## Что это +Виртуальная софтверная компания. Intake → PM → специалисты. +Каждый агент = отдельный Claude Code процесс с изолированным контекстом. + +## Стек +Python 3.11+, SQLite, FastAPI (будущее), Vue 3 (GUI, будущее) + +## Архитектура +Полная спека: DESIGN.md + +## Правила +- НЕ создавать файлы без необходимости +- Коммитить после каждого рабочего этапа +- SQLite kin.db — единственный source of truth +- Промпты агентов в agents/prompts/*.md +- Тесты обязательны для core/ +- Общие инструкции: ~/projects/CLAUDE.md diff --git a/DESIGN.md b/DESIGN.md new file mode 100644 index 0000000..8d7aff2 --- /dev/null +++ b/DESIGN.md @@ -0,0 +1,1291 @@ +# Исследование мультиагентных оркестраторов и проект собственного + +## Дата: 15 марта 2026 +## Проект: Kin — виртуальная софтверная компания + +--- + +## ЧАСТЬ 1: Анализ Ruflo (ex Claude Flow) + +### 1.1 Общие сведения + +- **Репо**: github.com/ruvnet/ruflo +- **Автор**: ruvnet (один разработчик) +- **Звёзды**: ~20K, форки: ~2.3K +- **Коммиты**: 5,800+, 55 alpha-итераций +- **Текущая версия**: v3.5.0 (февраль 2026) — первый "стабильный" релиз +- **Стек**: TypeScript/Node.js, WASM (Rust) для policy engine и embeddings +- **Пакеты**: @claude-flow/cli, claude-flow, ruflo — все три являются обёртками над @claude-flow/cli + +### 1.2 Архитектура (заявленная) + +``` +User → Ruflo CLI/MCP → Router → Swarm → Agents → Memory → LLM Providers + ↑ ↓ + └──────────────── Learning Loop ←────────────────────────┘ +``` + +**Ключевые компоненты:** +- **CLI/MCP интерфейс** — входная точка, 175+ MCP-тулов +- **Router** — маршрутизация задач к агентам, автоматический выбор модели (haiku/sonnet/opus) +- **Swarm Manager** — управление топологией (hierarchical, mesh, ring, star) +- **Agent System** — 60+ предопределённых агентов в 16 категориях +- **Memory** — SQLite (.swarm/memory.db), ReasoningBank с hash-based embeddings +- **Hive Mind** — queen/worker иерархия с "consensus" протоколами + +### 1.3 Что РЕАЛЬНО работает (по issue tracker и коду) + +**Механизм запуска агентов:** +- Ключевая функция: `launchClaudeCodeWithSwarm` в `src/cli/commands/swarm-new.ts` +- ЧТО ОНА ДЕЛАЕТ: формирует гигантский `swarmPrompt` (текстовую строку) и передаёт его в `claude` CLI +- ПО СУТИ: это prompt engineering — агенты "существуют" как инструкции в промпте одного Claude Code +- Файлы генерируются прямо в корень проекта (issue #398 — нет контроля output directory) + +**Критический баг (issue #955):** +- `--claude` флаг в `hive-mind spawn` ДОКУМЕНТИРОВАН, но НЕ РЕАЛИЗОВАН +- Команда спавнит "worker agent" (запись в БД), но НЕ запускает Claude Code +- Флаг молча игнорируется + +**Memory система:** +- SQLite-based, работает +- ReasoningBank использует hash-based embeddings (не требует API) — быстрые, но примитивные +- 2-3ms latency для поиска — это хорошо +- Persistent между сессиями + +**GUI:** +- Web UI на порту 3000 (WebSocket) +- Терминальный эмулятор в браузере +- @liamhelmer/claude-flow-ui — отдельный npm-пакет для UI + +### 1.4 Что МАРКЕТИНГ vs РЕАЛЬНОСТЬ + +| Заявлено | Реальность | +|----------|-----------| +| 60+ специализированных агентов | Папки с CLAUDE.md промптами в .agents/skills/, по сути — шаблоны для system prompt | +| Byzantine consensus | Протокол описан, но по issue tracker - не используется в реальных сценариях | +| Neural pattern recognition | Hash-based embeddings + паттерн-матчинг, не нейросеть | +| 127 параллельных агентов (из issue #125) | Это wishlist/epic, не реализация. Реально — один claude процесс с большим промптом | +| Self-learning | Запись success/failure в SQLite + маршрутизация на основе прошлых результатов. Работает, но это не ML | +| WASM SIMD acceleration | Rust-based WASM для embeddings — реально работает, даёт скорость | +| Hive Mind | Queen = координирующий промпт, Workers = записи в БД, НЕ отдельные Claude процессы | + +### 1.5 Что ВЗЯТЬ из Ruflo + +**Сильные стороны (стоит перенять):** + +1. **MCP-интеграция** — агент-оркестратор как MCP-сервер для Claude Code. Это позволяет Claude Code вызывать оркестратор как тулзу, а не наоборот. Элегантно. + +2. **SQLite memory с namespace** — простая, надёжная, persistent. Key-value с namespace (architecture/, bugs/, decisions/) — хороший паттерн. + +3. **Model routing** — автоматический выбор haiku для простых задач, opus для сложных. Экономит деньги. + +4. **Anti-drift defaults** — `topology: hierarchical` + `maxAgents: 8` + `strategy: specialized`. Маленькие команды с чёткими ролями дрифтят меньше. + +5. **ADR (Architecture Decision Records)** — spec-first подход, где архитектура описана в ADR-файлах и агенты обязаны следовать. + +6. **Hook system** — pre-task/post-task хуки для автоматизации (pre: загрузить контекст, post: сохранить результат). + +7. **Структура .agents/skills/** — каждый агент = директория с CLAUDE.md (system prompt) + tools + config. Модульно, расширяемо. + +**Слабые стороны (НЕ повторять):** + +1. **Fake parallelism** — агенты не являются отдельными процессами. Это один Claude Code с большим промптом, притворяющимся несколькими агентами. ГЛАВНАЯ ПРОБЛЕМА — контекст всё равно один, compaction убивает всех одновременно. + +2. **Over-engineering** — 215 MCP tools, 87 в wiki, byzantine consensus — для одного разработчика это красные флаги. Много surface area, мало глубины. + +3. **Размытие ответственности пакетов** — 3 npm-пакета (@claude-flow/cli, claude-flow, ruflo) — это один и тот же код. Ребрендинг создаёт путаницу. + +4. **Нет реальной изоляции контекста** — это ключевое. Если агент-программист и агент-тестировщик живут в одном контексте, compaction убивает нюансы обоих. + +5. **Документация как продукт** — README и Wiki описывают фичи, которые не реализованы (issue #955). Доверять нельзя. + +--- + +## ЧАСТЬ 2: Анализ других фреймворков (ключевые выводы) + +### 2.1 CrewAI (Python) + +- **Модель**: Role-based crews + Event-driven Flows +- **Сильное**: Быстрое прототипирование, MCP + A2A поддержка, 44K+ GitHub stars +- **Слабое**: Ограниченный checkpointing, роли через prompt engineering (не настоящая изоляция) +- **Для нас**: Flows (event-driven оркестрация поверх crews) — хорошая концепция + +### 2.2 LangGraph (Python) + +- **Модель**: Directed graph с conditional edges, checkpointing +- **Сильное**: Лучшая в индустрии persistence/state, time-travel debug +- **Слабое**: Высокий порог входа, привязка к LangChain экосистеме +- **Для нас**: Концепция checkpointing + graph-based flow + +### 2.3 Claude Agent SDK + +- **Модель**: Tool-use chain с sub-agents +- **Сильное**: Нативная интеграция с Claude, safety-first +- **Слабое**: Только Claude модели, лёгкий на оркестрацию +- **Для нас**: Lifecycle hooks + +### 2.4 Нативные субагенты Claude Code + +- **Как работают**: `claude -p "task" --session-id "id"` запускает ОТДЕЛЬНЫЙ процесс +- **КЛЮЧЕВОЕ**: Это РЕАЛЬНАЯ изоляция контекста! Каждый субагент — отдельное контекстное окно +- **Ограничение**: Нет координации "из коробки", нет shared memory, нет PM-слоя + +--- + +## ЧАСТЬ 3: Архитектура Kin (наш проект) + +### 3.1 Ключевой принцип + +> **Каждый агент = отдельный Claude Code процесс с изолированным контекстом.** +> Compaction в рамках одного агента не убивает нюансы, потому что его контекст маленький и специализированный. +> PM-агент держит мета-уровень, а не весь код. + +### 3.2 Живая иерархия с динамической маршрутизацией + +Не pipeline (A→B→C→D), а **живая организация**: каждый уровень понимает +свою зону ответственности и собирает нужную команду под задачу. + +``` +[Ты (Михаил)] ← человеческая речь, свободная форма: + │ "клиент пишет что фильтры глючат на айфоне когда быстро тыкаешь" + │ "нужен агрегатор туров, чтобы парсить предложения" + │ "что у меня горит?" + │ "посмотри что там с NeverDNS, давно не трогали" + ▼ +[Intake-менеджер] — УМНЫЙ агент (Sonnet), НЕ код на Python. + │ Почему агент, а не код: + │ - Клиент пишет "фильтры глючат на айфоне когда быстро тыкаешь" + │ → код не поймёт нюанс "быстро тыкаешь" = race condition + │ → агент переформулирует для команды + │ - Ты пишешь "посмотри что там с NeverDNS" + │ → это не задача, это запрос на статус + возможно ревью + │ → агент разберётся + │ + │ Что делает: + │ 1. Понимает контекст (ты — директор и продажник, клиенты через тебя) + │ 2. Определяет проект, тип задачи, срочность + │ 3. Задаёт уточняющие вопросы если надо + │ 4. Формулирует задачу на языке команды + │ 5. Для простых запросов (статус) — SQL к БД, без агентов + │ 6. Для задач — маршрутизирует к нужному PM проекта + │ 7. Для новых проектов — запускает цепочку research → design → architecture + │ + │ Его контекст: список проектов + статусы (из БД, маленький) + │ НЕ знает: код, архитектуру, детали — только "кто чем занимается" + │ + ├── [PM:vdolipoperek] ── знает проект ГЛУБОКО + │ │ Что знает: модули, tech stack, decisions, грабли, текущий статус + │ │ Что умеет: декомпозировать задачу, выбрать нужных специалистов + │ │ Его контекст: decisions + modules + текущие tasks (из БД) + │ │ + │ │ Intake передаёт: "Баг: фильтры поиска не применяются при + │ │ быстром переключении на iOS Safari. + │ │ Источник: жалоба клиента. Приоритет: высокий." + │ │ + │ │ PM думает: "фильтры — это модуль search. iOS Safari — + │ │ у нас уже была decision #15 про position:fixed. + │ │ Нужен дебагер на модуль search." + │ │ + │ │ ┌─── [Дебагер] ← описание бага + код модуля + decision #15 + │ │ │ │ ищет проблему + │ │ │ │ нашёл: "race condition в async фильтре" + │ │ │ ▼ + │ │ │ [Тестировщик] ← найденный баг + модуль + │ │ │ │ regression test, подтверждает баг + │ │ │ ▼ + │ │ │ [Фронтендер] ← баг + тест + spec модуля + │ │ │ │ фиксит, тест проходит + │ │ │ ▼ + │ │ └─── [PM] ← результат. Записывает decision: + │ │ "race condition в SearchFilters — debounce + AbortController" + │ │ → Intake сообщает тебе → ты сообщаешь клиенту + │ │ + │ │ Другой пример: "добавить оплату на сайт" + │ │ PM: "новый модуль, нужна полная команда" + │ │ + │ │ ┌─── [Маркетолог] ← "платежи на сайте турагентства" + │ │ │ │ исследует: как конкуренты делают checkout, + │ │ │ │ какие conversion-паттерны, trust signals + │ │ │ ▼ + │ │ │ [UX-дизайнер] ← research маркетолога + brief + │ │ │ │ проектирует: user flow оплаты, wireframes + │ │ │ ▼ + │ │ │ [Архитектор] ← UX flow + brief + все decisions проекта + │ │ │ │ spec: модули, API, БД, интеграция с платёжкой + │ │ │ ▼ + │ │ │ [Безопасник] ← spec (PCI DSS для платежей!) + │ │ │ │ security requirements + │ │ │ ▼ + │ │ │ [Бэкендер] ← spec + security reqs (параллельно!) + │ │ │ [Фронтендер] ← spec + UX wireframes + │ │ │ ▼ + │ │ │ [Ревьюер] + [Тестировщик] + [Безопасник] + │ │ └─── [PM] ← всё готово, decisions обновлены + │ │ + ├── [PM:sharedbox] ── знает свой проект так же глубоко + │ └── (своя динамическая команда) + │ + ├── [PM:neverdns] ── знает: готов, в маркетинг-фазе + │ └── (маркетолог, копирайтер, SEO — другая команда!) + │ + └── ... (остальные проекты) + + +[Для НОВЫХ проектов — отдельная цепочка:] + + Intake: "нужен агрегатор туров" + │ + ├── [Бизнес-аналитик] ← хотелки + контекст (турагентство) + │ │ исследует: бизнес-модель, монетизация, целевая аудитория + │ │ может спавнить: + │ │ [Исследователь рынка] ← конкуренты, ниша + │ │ [Исследователь API] ← какие API поставщиков туров есть + │ │ [Исследователь юридики] ← лицензии, договора + │ ▼ + ├── [UX-дизайнер] ← research + хотелки + │ │ user journey, wireframes ключевых страниц + │ │ смотрит конкурентов, лучшие практики + │ ▼ + ├── [Маркетолог] ← research + UX + │ │ стратегия продвижения, SEO, механики удержания + │ │ что учесть при разработке для маркетинга + │ ▼ + ├── [Архитектор] ← research + UX + marketing reqs + │ │ project_blueprint: модули, tech stack, план + │ │ учитывает существующий стек (Vue/Nuxt) + │ ▼ + └── Создаётся проект в БД → назначается PM → работа начинается +``` + +### 3.3 Типы задач и маршруты (PM выбирает динамически) + +PM проекта — это не тупой маршрутизатор, это агент, который ПОНИМАЕТ задачу. +Но чтобы понимал хорошо, ему нужны "шаблоны маршрутов" как подсказки: + +```yaml +# В промпте PM: "ты знаешь эти типы задач и кого вызывать" + +routes: + debug: + description: "Найти и исправить баг" + typical_flow: + - debugger: "найди причину, опиши" + - tester: "напиши regression test, подтверди баг" + - developer: "исправь, тест должен пройти" + pm_decides: + - какой модуль затронут (из знания проекта) + - frontend или backend баг + - нужен ли security review (если баг в auth/payments) + + feature: + description: "Новая фича" + typical_flow: + - architect: "спроектируй" + - developer: "реализуй" (может быть несколько параллельно) + - reviewer: "проверь" + - tester: "протестируй" + pm_decides: + - масштаб (один компонент или новый модуль) + - нужен ли architect (мелкая фича → сразу developer) + - параллелить ли frontend/backend + + refactor: + description: "Рефакторинг существующего кода" + typical_flow: + - architect: "оцени scope, предложи план" + - developer: "рефактори по плану" + - tester: "прогони существующие тесты" + pm_decides: + - затрагивает ли другие модули + - нужна ли миграция данных + + security_audit: + description: "Проверка безопасности" + typical_flow: + - security: "проверь по OWASP" + - developer: "исправь найденное" + - security: "подтверди исправления" + + new_project: + description: "Создание нового проекта с нуля" + typical_flow: + - analyst: "исследуй рынок, конкурентов, API" + - architect: "спроектируй на основе исследования" + - pm: "декомпозируй blueprint на задачи" + - # далее — обычные feature/debug задачи + + hotfix: + description: "Срочное исправление в продакшене" + typical_flow: + - debugger: "найди причину" + - developer: "минимальный fix" + - tester: "smoke test" + constraints: + - максимум 1 час + - минимум изменений + - deploy сразу +``` + +### 3.4 Пул специалистов (агенты-рабочие) + +Рабочие агенты — НЕ фиксированный набор. Это **пул ролей**, из которых PM +собирает команду под задачу. Каждый — отдельный Claude Code процесс. + +```yaml +specialists: + + # ═══════════════════════════════════════════════ + # ИССЛЕДОВАНИЯ И АНАЛИТИКА + # ═══════════════════════════════════════════════ + + business_analyst: + prompt: prompts/business_analyst.md + context: "задание + бизнес-контекст проекта" + tools: [WebSearch, WebFetch, Read, Write] + model: opus # стратегические решения + description: > + Бизнес-аналитик. Исследует бизнес-модель, монетизацию, целевую + аудиторию, юридические аспекты. Может спавнить исследователей. + + market_researcher: + prompt: prompts/market_researcher.md + context: "тема исследования + рамки" + tools: [WebSearch, WebFetch, Write] + model: sonnet + description: > + Исследователь рынка. Конкуренты, ниша, тренды, ценообразование. + Подчинённый аналитика — копает конкретную тему. + + tech_researcher: + prompt: prompts/tech_researcher.md + context: "что исследовать + ограничения" + tools: [WebSearch, WebFetch, Read, Write] + model: sonnet + description: > + Технический исследователь. API поставщиков, библиотеки, + интеграции, бенчмарки. Знает где искать доки, changelog, issues. + + # ═══════════════════════════════════════════════ + # ДИЗАЙН И UX + # ═══════════════════════════════════════════════ + + ux_designer: + prompt: prompts/ux_designer.md + context: "brief + research + примеры конкурентов" + tools: [WebSearch, WebFetch, Read, Write] + model: opus # UX-решения критичны для продукта + description: > + UX-дизайнер. User journey, wireframes (текстовые/Mermaid), + information architecture, interaction patterns. + Смотрит на конкурентов, лучшие практики, accessibility. + + ui_designer: + prompt: prompts/ui_designer.md + context: "wireframes + style guide проекта" + tools: [Read, Write] + model: sonnet + description: > + UI-дизайнер. Визуальный дизайн, компонентная система, + типографика, цвета, spacing. Описывает на уровне CSS tokens. + + # ═══════════════════════════════════════════════ + # МАРКЕТИНГ И КОНТЕНТ + # ═══════════════════════════════════════════════ + + marketer: + prompt: prompts/marketer.md + context: "research + продукт + целевая аудитория" + tools: [WebSearch, WebFetch, Read, Write] + model: sonnet + description: > + Маркетолог. Стратегия продвижения, SEO-требования для разработки, + conversion-паттерны, A/B тест гипотезы, trust signals. + Знает исследования по поведению пользователей. + Даёт требования разработчикам: что учесть в коде для маркетинга. + + copywriter: + prompt: prompts/copywriter.md + context: "brief + tone of voice + целевая аудитория" + tools: [Read, Write] + model: sonnet + description: > + Копирайтер. Тексты для UI (кнопки, заголовки, ошибки), + лендинги, описания, meta-теги. Знает русский и английский. + + seo_specialist: + prompt: prompts/seo_specialist.md + context: "сайт + ниша + текущие метрики (если есть)" + tools: [WebSearch, WebFetch, Read, Write, Bash] + model: sonnet + description: > + SEO-специалист. Техническое SEO, структура URL, meta-теги, + schema.org разметка, Core Web Vitals, sitemap. + Даёт конкретные требования фронтендеру и бэкендеру. + + # ═══════════════════════════════════════════════ + # ПРОЕКТИРОВАНИЕ + # ═══════════════════════════════════════════════ + + architect: + prompt: prompts/architect.md + context: "brief + ВСЕ decisions проекта + tech_stack + research (если есть)" + tools: [Read, Write] + model: opus # критические решения — максимум мозгов + description: > + Системный архитектор. Проектирует архитектуру, модули, API, + схему БД, интеграции. Выдаёт implementation spec. + Не пишет код — пишет спецификации. + + db_architect: + prompt: prompts/db_architect.md + context: "spec + текущая схема БД" + tools: [Read, Write] + model: opus + description: > + Архитектор БД. Схема, миграции, индексы, нормализация. + Когда SQLite хватит, когда переходить на PostgreSQL. + + # ═══════════════════════════════════════════════ + # РАЗРАБОТКА + # ═══════════════════════════════════════════════ + + frontend_dev: + prompt: prompts/frontend_dev.md + context: "spec модуля + wireframes + relevant decisions (gotchas)" + tools: [Read, Write, Edit, Bash] + model: sonnet + working_dir: "{project_path}" + description: > + Фронтендер. Vue/Nuxt/React, CSS, анимации, responsive. + Работает в директории проекта. + + backend_dev: + prompt: prompts/backend_dev.md + context: "spec модуля + API contracts + relevant decisions" + tools: [Read, Write, Edit, Bash] + model: sonnet + working_dir: "{project_path}" + description: > + Бэкендер. Node.js/Python, API, интеграции, бизнес-логика. + + fullstack_dev: + prompt: prompts/fullstack_dev.md + context: "spec модуля + relevant decisions" + tools: [Read, Write, Edit, Bash] + model: sonnet + working_dir: "{project_path}" + description: > + Фулстекер. Для мелких задач, где нет смысла делить. + + # ═══════════════════════════════════════════════ + # КАЧЕСТВО + # ═══════════════════════════════════════════════ + + debugger: + prompt: prompts/debugger.md + context: "описание бага + код модуля + логи (если есть)" + tools: [Read, Bash, Write] # НЕТ Edit! Дебагер ищет, не чинит. + model: opus # дебаг требует глубокого reasoning + description: > + Дебагер. Ищет причину бага, описывает root cause, НЕ исправляет. + Предлагает решение, но не трогает код. + + reviewer: + prompt: prompts/reviewer.md + context: "код + spec + conventions проекта" + tools: [Read] # ТОЛЬКО чтение! Ревьюер не правит. + model: sonnet + description: > + Ревьюер. Code review: соответствие spec, качество, паттерны, + naming, edge cases. Только читает, не правит. + + tester: + prompt: prompts/tester.md + context: "код + spec" + tools: [Read, Write, Edit, Bash] + model: sonnet + working_dir: "{project_path}" + description: > + Тестировщик. Unit, integration, e2e тесты. Гоняет, ищет edge cases. + + qa_analyst: + prompt: prompts/qa_analyst.md + context: "spec + UX flow + текущие тесты" + tools: [Read, Write] + model: sonnet + description: > + QA-аналитик. Тест-планы, тест-кейсы, acceptance criteria. + Не пишет код тестов — описывает ЧТО тестировать. + + # ═══════════════════════════════════════════════ + # ИНФРАСТРУКТУРА И БЕЗОПАСНОСТЬ + # ═══════════════════════════════════════════════ + + sysadmin: + prompt: prompts/sysadmin.md + context: "инфраструктура проекта + текущий стек" + tools: [Read, Write, Edit, Bash] + model: sonnet + description: > + Сисадмин. Docker, nginx, CI/CD, мониторинг, бэкапы. + Знает когда SQLite хватит и когда нужен PostgreSQL. + Настраивает фаерволы, SSL, деплой. Ставит пакеты/модули. + + devops: + prompt: prompts/devops.md + context: "инфраструктура + pipeline + tech stack" + tools: [Read, Write, Edit, Bash] + model: sonnet + description: > + DevOps. CI/CD pipeline, автодеплой, blue-green, rollback. + Docker Compose, GitHub Actions / Forgejo CI. + + security: + prompt: prompts/security.md + context: "код + security-relevant decisions" + tools: [Read, Bash] + model: opus # безопасность — не экономим + description: > + Безопасник. OWASP, CVE, auth, injection, secrets, dependencies. + Проверяет фаерволы, ставит ограничения. Знает актуальные уязвимости. + + # ═══════════════════════════════════════════════ + # ЮРИДИЧЕСКАЯ ПОДДЕРЖКА + # ═══════════════════════════════════════════════ + + legal: + prompt: prompts/legal.md + context: "описание задачи/модуля + юрисдикция + тип бизнеса" + tools: [WebSearch, WebFetch, Read, Write] + model: opus # юридические решения критичны + description: > + Юрист. Анализирует задачу с точки зрения законности: + - Можно ли так делать? (ЗоЗПП, 152-ФЗ, 54-ФЗ, GDPR...) + - Что нужно сделать чтобы было можно? (оферта, согласие, лицензия) + - Какие документы нужны? (политика конфиденциальности, договор) + - Какие риски? (штрафы, блокировки, претензии) + НЕ заменяет настоящего юриста — даёт направление и чеклист. + PM вызывает когда: коммерция, персональные данные, платежи, + пользовательский контент, трансграничные операции. + PM НЕ вызывает когда: внутренний инструмент без юрлица. + + legal_researcher: + prompt: prompts/legal_researcher.md + context: "юридический вопрос + юрисдикция" + tools: [WebSearch, WebFetch, Read, Write] + model: sonnet + description: > + Юридический исследователь. Ищет актуальные нормативные акты, + судебную практику, разъяснения регуляторов. + Подчинённый юриста — копает конкретный вопрос. + + # ═══════════════════════════════════════════════ + # САППОРТ И ОБРАТНАЯ СВЯЗЬ + # ═══════════════════════════════════════════════ + + support: + prompt: prompts/support.md + context: "описание продукта + FAQ + known issues + decisions (gotchas)" + tools: [Read, Write] + model: sonnet + description: > + Саппорт-агент. Общается с пользователем (через тебя или напрямую). + Задаёт правильные вопросы, собирает анамнез: + - Что именно не работает? На каком устройстве/браузере? + - Воспроизводится ли стабильно? Когда началось? + - Скриншот/видео? + Формирует структурированный тикет для PM. + НЕ обещает сроки, НЕ принимает решения, НЕ выполняет просьбы. + + support_guard: + prompt: prompts/support_guard.md + context: "бизнес-правила проекта + security policies" + tools: [Read] + model: sonnet + description: > + Фильтр саппорта (безопасник обратной связи). + Проверяет ВСЕ входящие от клиентов перед тем как они попадут в систему: + - "Дайте мне данные других пользователей" → REJECT + лог + - "Сделайте скидку 90%" → REJECT (не в компетенции системы) + - "Удалите мой аккаунт" → ESCALATE to human (Михаил решает) + - "Кнопка не работает" → PASS to support → PM + Классифицирует: bug / feature_request / question / abuse / escalate +``` + +### 3.4a Саппорт: от ручного к автоматическому (эволюция) + +``` +=== ФАЗА 1: Саппорт через тебя (сейчас) === + +[Клиент] → пишет тебе в WhatsApp/Telegram + │ + ▼ +[Ты] → пересказываешь Intake-менеджеру: + │ "клиент пишет что фильтры глючат на айфоне" + ▼ +[Intake] → формулирует → [PM проекта] → команда работает + │ + ▼ +[PM] → результат → [Intake] → тебе → ты отвечаешь клиенту + + +=== ФАЗА 2: Саппорт-агент общается с тобой (скоро) === + +[Клиент] → пишет тебе + │ + ▼ +[Ты] → копируешь сообщение клиента в kin: + │ kin support vdol "текст клиента" + ▼ +[Support] → задаёт тебе уточняющие вопросы: + │ "Спросите клиента: на каком устройстве? В каком браузере? + │ Воспроизводится ли если обновить страницу?" + ▼ +[Ты] → спрашиваешь клиента → добавляешь ответы + ▼ +[Support] → формирует тикет → [Support Guard проверяет] → [PM] + │ + ▼ +[PM] → результат → [Support] формулирует ответ клиенту + → "Мы нашли и исправили проблему с фильтрами. + Обновите страницу — должно работать." + ▼ +[Ты] → отправляешь клиенту (можешь отредактировать) + + +=== ФАЗА 3: Telegram-бот для клиентов (перспектива) === + +[Клиент] → пишет в Telegram-бот проекта напрямую + │ + ▼ +[Support Guard] → фильтрует: + │ abuse/manipulation → BLOCK + лог для тебя + │ escalation → NOTIFY тебя + │ нормальный запрос → PASS + ▼ +[Support Bot] → общается с клиентом: + │ задаёт вопросы, собирает анамнез, показывает FAQ + │ если FAQ решает проблему → закрывает + │ если нет → формирует тикет + ▼ +[PM] → принимает тикет, запускает команду + ▼ +[PM] → результат → [Support Bot] → отвечает клиенту + │ + │ ВСЕ ответы клиентам проходят через Support Guard: + │ - не раскрывает внутреннюю архитектуру + │ - не обещает невозможное + │ - не подтверждает уязвимости + │ - вежливо, в стиле бренда + ▼ +[Ты] → получаешь summary: "Клиент X обратился с багом Y, + команда исправила, клиент получил ответ Z" + (можешь вмешаться в любой момент) + + +=== ФАЗА 4: Проект живёт сам (далёкая перспектива) === + +[Клиенты] → боты → [Support] → [PM] → [Команда] → [Deploy] + │ │ + ▼ ▼ +[Аналитика использования] [Автодеплой фиксов] + │ + ▼ +[Маркетолог] → "конверсия упала на 5% на странице X" + → [PM] → [UX-дизайнер] → [Фронтендер] → [A/B тест] + +[Ты] = стратегическое управление + финальное approve на крупные изменения +``` + +**Таблицы для саппорта (добавить в БД):** + +```sql +-- Тикеты от пользователей +CREATE TABLE support_tickets ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + project_id TEXT NOT NULL REFERENCES projects(id), + source TEXT NOT NULL, -- 'manual', 'telegram_bot', 'email' + client_id TEXT, -- идентификатор клиента (telegram id, email...) + client_message TEXT NOT NULL, -- исходное сообщение клиента + classification TEXT, -- 'bug', 'feature_request', 'question', 'abuse', 'escalate' + guard_result TEXT, -- 'pass', 'reject', 'escalate' + guard_reason TEXT, -- почему отклонено/эскалировано + anamnesis JSON, -- собранная информация (устройство, шаги, скриншоты) + task_id TEXT REFERENCES tasks(id), -- связанная задача (если создана) + response TEXT, -- ответ клиенту + response_approved BOOLEAN DEFAULT FALSE, -- ты одобрил ответ? + status TEXT DEFAULT 'new', -- new, collecting_info, in_progress, resolved, rejected + created_at DATETIME DEFAULT CURRENT_TIMESTAMP, + resolved_at DATETIME +); + +-- Настройки бота для каждого проекта +CREATE TABLE support_bot_config ( + project_id TEXT PRIMARY KEY REFERENCES projects(id), + telegram_bot_token TEXT, -- токен Telegram-бота (encrypted) + welcome_message TEXT, -- приветствие + faq JSON, -- часто задаваемые вопросы + auto_reply BOOLEAN DEFAULT FALSE, -- автоматически отвечать клиентам? + require_approval BOOLEAN DEFAULT TRUE, -- требовать одобрение ответов? + brand_voice TEXT, -- стиль общения ("формальный", "дружелюбный") + forbidden_topics JSON, -- что нельзя обсуждать с клиентами + escalation_keywords JSON -- триггеры для эскалации к тебе +); + +CREATE INDEX idx_tickets_project ON support_tickets(project_id, status); +CREATE INDEX idx_tickets_client ON support_tickets(client_id); +``` + +**Итого: ~24 специализации в 9 отделах:** + +| Отдел | Роли | PM вызывает когда | +|-------|------|-------------------| +| Исследования | business_analyst, market_researcher, tech_researcher | Новый проект, новый модуль, выбор технологии | +| Дизайн | ux_designer, ui_designer | Новый модуль, редизайн, улучшение UX | +| Маркетинг | marketer, copywriter, seo_specialist | Запуск, лендинги, SEO, контент | +| Проектирование | architect, db_architect | Новый модуль, рефакторинг, масштабирование | +| Разработка | frontend_dev, backend_dev, fullstack_dev | Реализация | +| Качество | debugger, reviewer, tester, qa_analyst | Баги, ревью, тесты | +| Инфраструктура | sysadmin, devops, security | Деплой, CI/CD, безопасность | +| Юридическая | legal, legal_researcher | Коммерция, ПД, платежи, оферты, лицензии | +| Саппорт | support, support_guard | Обратная связь от клиентов | + +PM не вызывает всех — он собирает команду под задачу: +- Мелкий баг от клиента: support → PM → debugger → tester → frontend_dev +- Новый модуль с платежами: legal → marketer → ux → architect → security → devs → review +- Новый коммерческий проект: analyst → researchers → legal → ux → marketer → architect +- Внутренний инструмент: architect → fullstack_dev → tester (без юриста и маркетолога) + +**Разделение прав (КРИТИЧНО):** +- Исследователи/аналитики: WebSearch + Read + Write (документы). Не трогают код. +- Дизайнеры: Read + Write (спеки, wireframes). Не трогают код. +- Маркетологи: WebSearch + Read + Write. Не трогают код — дают требования. +- Архитектор: Read + Write (спеки). Не пишет код. +- Дебагер: Read + Bash. Ищет, НЕ правит. +- Ревьюер: ТОЛЬКО Read. Не трогает. +- Разработчики: полный доступ, но только к своему модулю. +- Сисадмин/DevOps: полный доступ к инфраструктуре. +- Безопасник: Read + Bash. Не правит — выдаёт требования. +- Саппорт: Read + Write (тикеты). Не трогает код, не принимает решений. +- Support Guard: ТОЛЬКО Read. Фильтр — пропускает/блокирует/эскалирует. + +### 3.5 Протокол обмена между агентами + +Агенты общаются ТОЛЬКО через структурированные артефакты в БД. +Никакого shared context. Каждый артефакт — JSON файл + запись в tasks. + +**Универсальный формат передачи:** +```json +{ + "task_id": "VDOL-042", + "from_role": "pm", + "to_role": "debugger", + "type": "debug_request", + "payload": { + "bug_description": "Фильтры поиска не применяются при быстром переключении", + "module": "search", + "affected_files": [ + "src/components/search/SearchFilters.vue", + "src/composables/useSearch.ts", + "src/api/search.ts" + ], + "known_context": [ + "Фильтры используют async API вызовы", + "Раньше был похожий баг с debounce (decision #15)" + ], + "reproduction_steps": "Быстро кликнуть 3 разных фильтра подряд" + } +} +``` + +**PM формирует payload, подтягивая из decisions:** +```python +# context_builder собирает для дебагера +context = { + "task": db.get_task("VDOL-042"), + "module_files": git.list_files("src/components/search/"), + "relevant_decisions": db.get_decisions( + project_id="vdol", + category="search", + types=["gotcha", "workaround"] + ), + "recent_bugs": db.get_tasks( + project_id="vdol", + module="search", + status="done", + type="debug", + limit=5 + ) +} +``` + +### 3.6 Коммуникация между рабочими агентами + +Рабочие агенты могут "общаться", но не напрямую — через PM как посредника, +или через артефакты в файловой системе: + +``` +[Дебагер] → пишет debug_report.json → [PM читает] + PM решает: "нужен фронтендер для фикса" +[PM] → формирует fix_request.json (debug_report + spec) → [Фронтендер] +[Фронтендер] → правит код → [Тестировщик] запускает тесты +[Тестировщик] → test_result.json → [PM] + PM решает: "тесты прошли, закрываю задачу" или "фейл, обратно фронтендеру" +``` + +**НО: для скорости можно разрешить прямую цепочку без PM:** + +``` +# PM заранее описывает pipeline +kin run VDOL-042 --pipeline "debugger → tester → frontend_dev → tester" +# Каждый агент передаёт результат следующему через файл +# PM получает только финальный результат + все промежуточные в логах +``` + +### 3.7 Механизм запуска (как это работает технически) + +```bash +# Сценарий 1: ты пишешь в Telegram +"продебажь фильтры в vdolipoperek" + +# Диспетчер (Python): +# 1. Парсит: проект=vdol, тип=debug, что=фильтры +# 2. Запускает PM проекта: + +claude -p "$(cat prompts/pm.md) + +ПРОЕКТ: vdolipoperek +TECH STACK: Vue 3, TypeScript, Nuxt +ТЕКУЩИЕ DECISIONS: +$(kin decisions vdol --category search --format brief) + +ЗАДАЧА: продебажь фильтры — не применяются при быстром переключении +ДОСТУПНЫЕ СПЕЦИАЛИСТЫ: debugger, frontend_dev, backend_dev, tester, reviewer, security +ШАБЛОНЫ МАРШРУТОВ: $(cat routes.yaml) + +Декомпозируй задачу и верни JSON с pipeline." \ + --session-id "pm-vdol-$(date +%s)" \ + --output-format json + +# PM возвращает: +{ + "task_id": "VDOL-043", + "pipeline": [ + {"role": "debugger", "module": "search", "brief": "..."}, + {"role": "tester", "depends_on": "debugger", "brief": "regression test"}, + {"role": "frontend_dev", "depends_on": "tester", "brief": "fix"}, + {"role": "tester", "depends_on": "frontend_dev", "brief": "verify fix"} + ], + "decisions_to_load": [15, 23] # PM знает какие decisions релевантны +} + +# Runner исполняет pipeline: +for step in pipeline: + context = context_builder.build(step.role, step.module, step.decisions) + result = claude_run(step.role, context, project_path) + save_result(step, result) + if not result.success: + escalate_to_pm(step, result) # PM решает что делать +``` + +```bash +# Сценарий 2: новый проект +"нужен агрегатор туров, чтобы парсить предложения и показывать клиентам" + +# Диспетчер определяет: тип=new_project +# Запускает Аналитика (БЕЗ PM, потому что проекта ещё нет): + +claude -p "$(cat prompts/analyst.md) +Исследуй тему: агрегатор туров для турагентства. +Контекст: существующий сайт vdolipoperek.com (Vue/Nuxt). +Нужно: конкуренты, доступные API поставщиков туров, ценообразование, +технические ограничения. Верни market_research.json" \ + --session-id "analyst-new-$(date +%s)" \ + --tools WebSearch,WebFetch,Write + +# Аналитик может спавнить исследователей: +# - "исследователь API" — ищет API TUI, Pegas, Anex... +# - "исследователь конкурентов" — анализирует level.travel, onlinetours... + +# После: Архитектор получает research + хотелки: +claude -p "$(cat prompts/architect.md) +ИССЛЕДОВАНИЕ: $(cat market_research.json) +ХОТЕЛКИ: агрегатор туров, парсинг предложений, отображение клиентам +СУЩЕСТВУЮЩИЙ СТЕК: Vue 3, Nuxt, Node.js +Спроектируй project_blueprint.json" \ + --session-id "arch-new-$(date +%s)" + +# Blueprint → создаётся проект в БД → назначается PM → работа начинается +``` + +### 3.5 State Management + +**SQLite база — мультипроектная с рождения:** + +```sql +-- Проекты (центральный реестр) +CREATE TABLE projects ( + id TEXT PRIMARY KEY, -- 'vdol', 'sharedbox', 'neverdns', 'barsik', 'askai' + name TEXT NOT NULL, -- 'В долю поперёк', 'SharedBox', 'NeverDNS' + path TEXT NOT NULL, -- ~/projects/mailbox, ~/projects/vdolipoperek + tech_stack JSON, -- ["vue3", "typescript", "nuxt"] + status TEXT DEFAULT 'active', -- active, paused, maintenance, ready + priority INTEGER DEFAULT 5, -- 1=критический, 10=когда-нибудь + pm_prompt TEXT, -- путь к кастомному промпту PM для этого проекта + claude_md_path TEXT, -- путь к CLAUDE.md проекта + forgejo_repo TEXT, -- owner/repo для синхронизации issues + created_at DATETIME DEFAULT CURRENT_TIMESTAMP +); + +-- Задачи (привязаны к проекту) +CREATE TABLE tasks ( + id TEXT PRIMARY KEY, -- VDOL-042, SB-015, NDNS-003 + project_id TEXT NOT NULL REFERENCES projects(id), + title TEXT NOT NULL, + status TEXT DEFAULT 'pending', -- pending, decomposed, in_progress, review, done, blocked + priority INTEGER DEFAULT 5, + assigned_role TEXT, -- architect, developer, reviewer, tester, security + parent_task_id TEXT REFERENCES tasks(id), -- для подзадач + brief JSON, -- Task Brief от PM + spec JSON, -- Implementation Spec от архитектора + review JSON, -- Review Result + test_result JSON, -- Test Result + security_result JSON, -- Security Check Result + forgejo_issue_id INTEGER, -- связка с Forgejo issue + created_at DATETIME DEFAULT CURRENT_TIMESTAMP, + updated_at DATETIME DEFAULT CURRENT_TIMESTAMP +); + +-- Решения и грабли (КЛЮЧЕВАЯ ТАБЛИЦА — то что теряется при compaction) +-- Это ВНЕШНЯЯ ПАМЯТЬ PM-агента для каждого проекта +CREATE TABLE decisions ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + project_id TEXT NOT NULL REFERENCES projects(id), + task_id TEXT REFERENCES tasks(id), -- может быть NULL для общепроектных решений + type TEXT NOT NULL, -- 'decision', 'gotcha', 'workaround', 'rejected_approach', 'convention' + category TEXT, -- 'architecture', 'ui', 'api', 'security', 'devops', 'performance' + title TEXT NOT NULL, + description TEXT NOT NULL, + tags JSON, -- ["ios-safari", "css", "bottom-sheet"] для поиска + created_at DATETIME DEFAULT CURRENT_TIMESTAMP +); + +-- Логи агентов (для дебага, обучения и cost tracking) +CREATE TABLE agent_logs ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + project_id TEXT NOT NULL REFERENCES projects(id), + task_id TEXT REFERENCES tasks(id), + agent_role TEXT NOT NULL, -- pm, analyst, architect, debugger, frontend_dev, etc. + session_id TEXT, -- claude --session-id + action TEXT NOT NULL, -- 'decompose', 'implement', 'review', 'test', 'fix', 'research' + input_summary TEXT, -- что получил (краткое описание, не полный текст) + output_summary TEXT, -- что выдал + tokens_used INTEGER, + model TEXT, -- haiku, sonnet, opus + cost_usd REAL, -- стоимость вызова + success BOOLEAN, + error_message TEXT, + duration_seconds INTEGER, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP +); + +-- Модули проекта (PM знает структуру) +-- Это "карта" проекта для PM: он знает какие модули есть и кого вызвать +CREATE TABLE modules ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + project_id TEXT NOT NULL REFERENCES projects(id), + name TEXT NOT NULL, -- 'search', 'auth', 'payments', 'ui-kit' + type TEXT NOT NULL, -- 'frontend', 'backend', 'shared', 'infra' + path TEXT NOT NULL, -- 'src/components/search/', 'src/api/search.ts' + description TEXT, -- 'Поиск и фильтрация туров' + owner_role TEXT, -- 'frontend_dev', 'backend_dev' — кого вызывать + dependencies JSON, -- ["auth", "api-client"] — зависимости между модулями + UNIQUE(project_id, name) +); + +-- Pipelines (история запусков — для обучения и повторного использования) +CREATE TABLE pipelines ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + task_id TEXT NOT NULL REFERENCES tasks(id), + project_id TEXT NOT NULL REFERENCES projects(id), + route_type TEXT NOT NULL, -- 'debug', 'feature', 'refactor', 'hotfix', 'new_project' + steps JSON NOT NULL, -- pipeline JSON от PM + status TEXT DEFAULT 'running', -- running, completed, failed, cancelled + total_cost_usd REAL, + total_tokens INTEGER, + total_duration_seconds INTEGER, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP, + completed_at DATETIME +); + +-- Кросс-проектные зависимости и связи +CREATE TABLE project_links ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + from_project TEXT NOT NULL REFERENCES projects(id), + to_project TEXT NOT NULL REFERENCES projects(id), + type TEXT NOT NULL, -- 'depends_on', 'shares_component', 'blocks' + description TEXT, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP +); + +-- Индексы для быстрого доступа PM-агентов +CREATE INDEX idx_tasks_project_status ON tasks(project_id, status); +CREATE INDEX idx_decisions_project ON decisions(project_id); +CREATE INDEX idx_decisions_tags ON decisions(tags); -- для JSON-поиска по тегам +CREATE INDEX idx_agent_logs_project ON agent_logs(project_id, created_at); +CREATE INDEX idx_agent_logs_cost ON agent_logs(project_id, cost_usd); +``` + +### 3.6 Контекст-билдер (КЛЮЧЕВОЙ КОМПОНЕНТ) + +Перед запуском любого агента, система собирает контекст из БД: + +``` +kin run VDOL-042 + │ + ▼ +[context-builder] + │ + ├── Читает task VDOL-042 из tasks table → brief, spec + ├── Читает decisions WHERE project_id='vdol' → релевантные грабли + │ (фильтрует по category и tags, не грузит ВСЕ решения) + ├── Читает projects WHERE id='vdol' → tech_stack, claude_md_path + ├── Формирует МИНИМАЛЬНЫЙ контекст для конкретной роли: + │ + │ Для архитектора: brief + ALL decisions (он должен знать историю) + │ Для программиста: spec + decisions WHERE category IN ('gotcha','workaround') + │ Для ревьюера: spec + код + decisions WHERE type='convention' + │ Для тестировщика: spec + код (минимум) + │ Для безопасника: код + security conventions + │ + └── Запускает claude -p с собранным контекстом +``` + +**Это решает проблему "раздувшихся CLAUDE.md":** контекст собирается динамически и фильтруется по роли. + +### 3.7 Meta-PM: обзор всех проектов + +Meta-PM — самый "тупой" но самый полезный агент. Он работает с VIEW-запросами к БД: + +```sql +-- "Что горит?" — для Meta-PM +SELECT p.name, p.priority, p.status, + COUNT(CASE WHEN t.status = 'blocked' THEN 1 END) as blocked_tasks, + COUNT(CASE WHEN t.status = 'in_progress' THEN 1 END) as active_tasks, + COUNT(CASE WHEN t.status = 'pending' THEN 1 END) as pending_tasks, + MAX(t.updated_at) as last_activity +FROM projects p +LEFT JOIN tasks t ON t.project_id = p.id +WHERE p.status = 'active' +GROUP BY p.id +ORDER BY p.priority ASC, blocked_tasks DESC; + +-- "Сколько трачу?" — cost tracking +SELECT p.name, + SUM(al.cost_usd) as total_cost, + SUM(al.tokens_used) as total_tokens, + COUNT(*) as agent_calls +FROM agent_logs al +JOIN projects p ON p.id = al.project_id +WHERE al.created_at > datetime('now', '-7 days') +GROUP BY p.id +ORDER BY total_cost DESC; +``` + +### 3.8 Компоненты + +``` +kin/ +├── core/ +│ ├── db.py -- SQLite init, migrations +│ ├── models.py -- Projects, Tasks, Decisions, Modules, Pipelines +│ ├── context_builder.py -- формирование контекста ПО РОЛИ из БД +│ └── api.py -- REST API для GUI (FastAPI, читает ту же SQLite) +│ +├── agents/ +│ ├── prompts/ -- ~24 промпта (pm.md, architect.md, debugger.md...) +│ ├── routes.yaml -- шаблоны маршрутов (debug, feature, refactor...) +│ ├── specialists.yaml -- пул ролей с tools, model, context rules +│ └── runner.py -- запуск claude -p, pipeline executor +│ +├── cli/ +│ └── main.py +│ # kin status — все проекты одним взглядом +│ # kin run VDOL-043 — PM декомпозирует + pipeline +│ # kin run VDOL-043 --dry-run — показать pipeline без запуска +│ # kin ask "что горит?" — Intake отвечает +│ # kin support vdol "текст" — тикет от клиента +│ # kin cost --last 7d — расходы +│ # kin new-project "агрегатор" — analyst → architect → PM +│ +├── web/ -- GUI (Vue 3 + TypeScript — твой стек!) +│ ├── src/ +│ │ ├── views/ +│ │ │ ├── Dashboard.vue -- обзор всех проектов +│ │ │ ├── ProjectView.vue -- один проект: задачи, модули, decisions +│ │ │ ├── PipelineView.vue -- pipeline задачи: кто работает, где блокер +│ │ │ ├── CostView.vue -- расходы по проектам и задачам +│ │ │ └── SupportView.vue -- тикеты от клиентов +│ │ ├── components/ +│ │ │ ├── ProjectCard.vue -- карточка проекта со статусом +│ │ │ ├── PipelineGraph.vue -- визуализация pipeline (граф агентов) +│ │ │ ├── AgentStatus.vue -- статус агента (idle/working/done/error) +│ │ │ ├── DecisionsList.vue -- decisions проекта с поиском по тегам +│ │ │ └── LiveLog.vue -- real-time лог текущего pipeline +│ │ └── App.vue +│ └── package.json +│ +├── integrations/ +│ ├── telegram_bot.py -- бот-интерфейс (для тебя + клиентские боты) +│ └── forgejo_sync.py -- двусторонняя синхронизация issues ↔ tasks +│ +├── config/ +│ └── projects.yaml -- начальная конфигурация проектов +│ +└── kin.db -- SQLite база (единственный source of truth) +``` + +### 3.9 GUI: что нужно видеть + +**Dashboard (главный экран):** +``` +┌─────────────────────────────────────────────────────────────┐ +│ Kin Dashboard Cost: $47/week │ +├─────────────────────────────────────────────────────────────┤ +│ │ +│ 🔴 vdolipoperek 3 active 1 blocked $12/week │ +│ └─ VDOL-043: debug фильтров [████░░] debugger → tester │ +│ └─ VDOL-044: mobile bottom-sheet [██████] done ✓ │ +│ └─ VDOL-045: оплата [░░░░░░] blocked: ждёт юриста │ +│ │ +│ 🟡 sharedbox 1 active $8/week │ +│ └─ SB-016: multi-tenant isolation [██░░░░] architect │ +│ │ +│ 🟢 neverdns 0 active $0/week │ +│ └─ маркетинг-фаза, ждёт контент │ +│ │ +│ 🟢 barsik 1 active $5/week │ +│ └─ BARS-007: RAG pipeline [████░░] backend_dev │ +│ │ +│ ⚪ askai 0 active $0/week │ +│ ⚪ ddfo 0 active $0/week │ +│ ⚪ stopleak 0 active $0/week │ +│ │ +│ ─── Support ─── │ +│ 2 новых тикета (vdolipoperek) │ +│ 1 ожидает твоего approve │ +│ │ +└─────────────────────────────────────────────────────────────┘ +``` + +**Pipeline View (конкретная задача):** +``` +┌─────────────────────────────────────────────────────────────┐ +│ VDOL-043: Debug фильтров поиска Status: in_progress│ +│ Priority: high Cost: $1.82 Duration: 12 min │ +├─────────────────────────────────────────────────────────────┤ +│ │ +│ [PM] ──────► [Debugger] ──────► [Tester] ──────► [Frontend]│ +│ ✓ 0.3s ✓ $0.45 ● working ○ pending│ +│ decomposed found: writing │ +│ pipeline race condition regression │ +│ in useSearch.ts test │ +│ │ +│ Decisions добавлены: │ +│ #47: "race condition в async фильтре — AbortController" │ +│ │ +│ ─── Live Log ─── │ +│ 12:04:32 [tester] Запущен: session tst-VDOL043-1710... │ +│ 12:04:33 [tester] Читает: src/composables/useSearch.ts │ +│ 12:04:45 [tester] Пишет: tests/search.filter.spec.ts │ +│ 12:05:01 [tester] Bash: npm run test -- search.filter │ +│ │ +└─────────────────────────────────────────────────────────────┘ +``` + +**Почему Vue 3:** это твой стек, ты на нём строишь vdolipoperek. +GUI Kin — это тоже проект, который Kin может помогать разрабатывать. +Meta-moment: Kin строит свой собственный GUI. + +**Архитектура GUI:** +``` +[kin.db] ← SQLite (source of truth) + │ + ├── [core/api.py] ← FastAPI, REST endpoints + │ GET /projects — список проектов со статусами + │ GET /projects/{id} — детали проекта + задачи + modules + │ GET /tasks/{id} — задача + pipeline + agent logs + │ GET /tasks/{id}/live — SSE stream для live log + │ GET /pipelines/{id} — граф pipeline с статусами + │ GET /decisions?project=X — decisions с фильтрами + │ GET /support/tickets — тикеты от клиентов + │ GET /cost?period=7d — расходы + │ POST /tasks — создать задачу + │ POST /tasks/{id}/run — запустить pipeline + │ POST /support/approve/{id} — одобрить ответ клиенту + │ + └── [web/] ← Vue 3 + TypeScript, Vite + Подключается к API + SSE для live обновлений pipeline + Responsive (работает с MacBook и с телефона) +``` + +**Ключевое:** GUI читает ту же SQLite что и CLI/runner. +Нет отдельной базы для GUI, нет sync проблем. +runner.py пишет в kin.db → API читает → Vue показывает. +Real-time через SSE (Server-Sent Events) — runner пишет лог → API стримит → Vue обновляет. + +### 3.10 Интеграция с существующей инфраструктурой + +- **Forgejo**: Двусторонний sync — issue создано в Forgejo → task в kin, task завершён → issue закрыт. Forgejo остаётся UI для ручного просмотра. +- **Obsidian**: Decisions из БД экспортируются как .md в vault. Kanban-доска читает задачи. Направление: kin → Obsidian (read-only зеркало). +- **Telegram бот**: Основной мобильный интерфейс. Свободная форма: "продебажь фильтры в vdolipoperek" → dispatcher парсит → PM → pipeline. +- **Mac Mini M4 Pro**: Основной хост. Агенты запускаются как процессы на нём. +- **MacBook**: Через SSH + Telegram бот. Или Syncthing синхронизирует kin.db (receive-only на MacBook). +- **CLAUDE.md per project**: Минимальный (30 строк), содержит ТОЛЬКО: "tech stack", "coding conventions", "ссылка на kin для контекста". Decisions НЕ дублируются. + +### 3.10 Ключевые отличия от Ruflo + +| Аспект | Ruflo | Kin | +|--------|-------|---------| +| Мультипроектность | Нет | Intake + Project PMs | +| Полнота команды | Только dev | ~22 роли: research, design, marketing, dev, QA, ops, support | +| Маршрутизация | Фиксированная | PM динамически собирает команду | +| Изоляция | Один промпт | Каждый агент = отдельный процесс | +| Обратная связь | Нет | Support → Guard → PM → команда → ответ клиенту | +| Клиентские боты | Нет | Telegram per project (перспектива) | + +--- + +## ЧАСТЬ 4: План действий + +### Фаза 1: Фундамент + один проект (2-3 дня) +- [ ] SQLite схема (все таблицы включая support) +- [ ] context-builder, runner.py, pipeline executor +- [ ] Intake-агент, PM, routes.yaml, specialists.yaml +- [ ] Базовые промпты: architect, frontend_dev, debugger, tester, reviewer +- [ ] CLI + тест на vdolipoperek.com + +### Фаза 2: Полная команда ~22 роли (2-3 дня) +- [ ] Все промпты, разделение прав +- [ ] Тест полной цепочки marketer → ux → architect → dev → review + +### Фаза 3: Все 10 проектов (1-2 дня) +### Фаза 4: Telegram интеграция (1-2 дня) +### Фаза 5: Саппорт + Support Guard (1-2 дня) +### Фаза 6: Forgejo + Obsidian sync (1 день) +### Фаза 7: Боевой прогон на vdolipoperek (1-2 недели) +### Фаза 8: Клиентские Telegram-боты (перспектива) +### Фаза 9: Самоподдерживающиеся проекты (далёкая перспектива) + +--- + +## Заметки + +**Архитектура:** Изоляция контекста через процессы. Decisions = внешняя память PM. PM тупой/памятливый, workers умные/забывчивые. context-builder фильтрует по роли. ~22 роли = полная софтверная компания. + +**Бизнес:** Полнота > скорость. Продукт = коммерческий, не поделка. Саппорт замыкает цикл. Support Guard критичен. В перспективе проекты живут сами. + +**Техника:** Python, SQLite (source of truth), cost tracking встроен, Forgejo sync, Obsidian read-only. + +**Из Ruflo взять:** MCP, SQLite memory, model routing, ADR, hooks. **НЕ брать:** fake parallelism, over-engineering. diff --git a/agent-orchestrator-research.md b/agent-orchestrator-research.md new file mode 100644 index 0000000..8d7aff2 --- /dev/null +++ b/agent-orchestrator-research.md @@ -0,0 +1,1291 @@ +# Исследование мультиагентных оркестраторов и проект собственного + +## Дата: 15 марта 2026 +## Проект: Kin — виртуальная софтверная компания + +--- + +## ЧАСТЬ 1: Анализ Ruflo (ex Claude Flow) + +### 1.1 Общие сведения + +- **Репо**: github.com/ruvnet/ruflo +- **Автор**: ruvnet (один разработчик) +- **Звёзды**: ~20K, форки: ~2.3K +- **Коммиты**: 5,800+, 55 alpha-итераций +- **Текущая версия**: v3.5.0 (февраль 2026) — первый "стабильный" релиз +- **Стек**: TypeScript/Node.js, WASM (Rust) для policy engine и embeddings +- **Пакеты**: @claude-flow/cli, claude-flow, ruflo — все три являются обёртками над @claude-flow/cli + +### 1.2 Архитектура (заявленная) + +``` +User → Ruflo CLI/MCP → Router → Swarm → Agents → Memory → LLM Providers + ↑ ↓ + └──────────────── Learning Loop ←────────────────────────┘ +``` + +**Ключевые компоненты:** +- **CLI/MCP интерфейс** — входная точка, 175+ MCP-тулов +- **Router** — маршрутизация задач к агентам, автоматический выбор модели (haiku/sonnet/opus) +- **Swarm Manager** — управление топологией (hierarchical, mesh, ring, star) +- **Agent System** — 60+ предопределённых агентов в 16 категориях +- **Memory** — SQLite (.swarm/memory.db), ReasoningBank с hash-based embeddings +- **Hive Mind** — queen/worker иерархия с "consensus" протоколами + +### 1.3 Что РЕАЛЬНО работает (по issue tracker и коду) + +**Механизм запуска агентов:** +- Ключевая функция: `launchClaudeCodeWithSwarm` в `src/cli/commands/swarm-new.ts` +- ЧТО ОНА ДЕЛАЕТ: формирует гигантский `swarmPrompt` (текстовую строку) и передаёт его в `claude` CLI +- ПО СУТИ: это prompt engineering — агенты "существуют" как инструкции в промпте одного Claude Code +- Файлы генерируются прямо в корень проекта (issue #398 — нет контроля output directory) + +**Критический баг (issue #955):** +- `--claude` флаг в `hive-mind spawn` ДОКУМЕНТИРОВАН, но НЕ РЕАЛИЗОВАН +- Команда спавнит "worker agent" (запись в БД), но НЕ запускает Claude Code +- Флаг молча игнорируется + +**Memory система:** +- SQLite-based, работает +- ReasoningBank использует hash-based embeddings (не требует API) — быстрые, но примитивные +- 2-3ms latency для поиска — это хорошо +- Persistent между сессиями + +**GUI:** +- Web UI на порту 3000 (WebSocket) +- Терминальный эмулятор в браузере +- @liamhelmer/claude-flow-ui — отдельный npm-пакет для UI + +### 1.4 Что МАРКЕТИНГ vs РЕАЛЬНОСТЬ + +| Заявлено | Реальность | +|----------|-----------| +| 60+ специализированных агентов | Папки с CLAUDE.md промптами в .agents/skills/, по сути — шаблоны для system prompt | +| Byzantine consensus | Протокол описан, но по issue tracker - не используется в реальных сценариях | +| Neural pattern recognition | Hash-based embeddings + паттерн-матчинг, не нейросеть | +| 127 параллельных агентов (из issue #125) | Это wishlist/epic, не реализация. Реально — один claude процесс с большим промптом | +| Self-learning | Запись success/failure в SQLite + маршрутизация на основе прошлых результатов. Работает, но это не ML | +| WASM SIMD acceleration | Rust-based WASM для embeddings — реально работает, даёт скорость | +| Hive Mind | Queen = координирующий промпт, Workers = записи в БД, НЕ отдельные Claude процессы | + +### 1.5 Что ВЗЯТЬ из Ruflo + +**Сильные стороны (стоит перенять):** + +1. **MCP-интеграция** — агент-оркестратор как MCP-сервер для Claude Code. Это позволяет Claude Code вызывать оркестратор как тулзу, а не наоборот. Элегантно. + +2. **SQLite memory с namespace** — простая, надёжная, persistent. Key-value с namespace (architecture/, bugs/, decisions/) — хороший паттерн. + +3. **Model routing** — автоматический выбор haiku для простых задач, opus для сложных. Экономит деньги. + +4. **Anti-drift defaults** — `topology: hierarchical` + `maxAgents: 8` + `strategy: specialized`. Маленькие команды с чёткими ролями дрифтят меньше. + +5. **ADR (Architecture Decision Records)** — spec-first подход, где архитектура описана в ADR-файлах и агенты обязаны следовать. + +6. **Hook system** — pre-task/post-task хуки для автоматизации (pre: загрузить контекст, post: сохранить результат). + +7. **Структура .agents/skills/** — каждый агент = директория с CLAUDE.md (system prompt) + tools + config. Модульно, расширяемо. + +**Слабые стороны (НЕ повторять):** + +1. **Fake parallelism** — агенты не являются отдельными процессами. Это один Claude Code с большим промптом, притворяющимся несколькими агентами. ГЛАВНАЯ ПРОБЛЕМА — контекст всё равно один, compaction убивает всех одновременно. + +2. **Over-engineering** — 215 MCP tools, 87 в wiki, byzantine consensus — для одного разработчика это красные флаги. Много surface area, мало глубины. + +3. **Размытие ответственности пакетов** — 3 npm-пакета (@claude-flow/cli, claude-flow, ruflo) — это один и тот же код. Ребрендинг создаёт путаницу. + +4. **Нет реальной изоляции контекста** — это ключевое. Если агент-программист и агент-тестировщик живут в одном контексте, compaction убивает нюансы обоих. + +5. **Документация как продукт** — README и Wiki описывают фичи, которые не реализованы (issue #955). Доверять нельзя. + +--- + +## ЧАСТЬ 2: Анализ других фреймворков (ключевые выводы) + +### 2.1 CrewAI (Python) + +- **Модель**: Role-based crews + Event-driven Flows +- **Сильное**: Быстрое прототипирование, MCP + A2A поддержка, 44K+ GitHub stars +- **Слабое**: Ограниченный checkpointing, роли через prompt engineering (не настоящая изоляция) +- **Для нас**: Flows (event-driven оркестрация поверх crews) — хорошая концепция + +### 2.2 LangGraph (Python) + +- **Модель**: Directed graph с conditional edges, checkpointing +- **Сильное**: Лучшая в индустрии persistence/state, time-travel debug +- **Слабое**: Высокий порог входа, привязка к LangChain экосистеме +- **Для нас**: Концепция checkpointing + graph-based flow + +### 2.3 Claude Agent SDK + +- **Модель**: Tool-use chain с sub-agents +- **Сильное**: Нативная интеграция с Claude, safety-first +- **Слабое**: Только Claude модели, лёгкий на оркестрацию +- **Для нас**: Lifecycle hooks + +### 2.4 Нативные субагенты Claude Code + +- **Как работают**: `claude -p "task" --session-id "id"` запускает ОТДЕЛЬНЫЙ процесс +- **КЛЮЧЕВОЕ**: Это РЕАЛЬНАЯ изоляция контекста! Каждый субагент — отдельное контекстное окно +- **Ограничение**: Нет координации "из коробки", нет shared memory, нет PM-слоя + +--- + +## ЧАСТЬ 3: Архитектура Kin (наш проект) + +### 3.1 Ключевой принцип + +> **Каждый агент = отдельный Claude Code процесс с изолированным контекстом.** +> Compaction в рамках одного агента не убивает нюансы, потому что его контекст маленький и специализированный. +> PM-агент держит мета-уровень, а не весь код. + +### 3.2 Живая иерархия с динамической маршрутизацией + +Не pipeline (A→B→C→D), а **живая организация**: каждый уровень понимает +свою зону ответственности и собирает нужную команду под задачу. + +``` +[Ты (Михаил)] ← человеческая речь, свободная форма: + │ "клиент пишет что фильтры глючат на айфоне когда быстро тыкаешь" + │ "нужен агрегатор туров, чтобы парсить предложения" + │ "что у меня горит?" + │ "посмотри что там с NeverDNS, давно не трогали" + ▼ +[Intake-менеджер] — УМНЫЙ агент (Sonnet), НЕ код на Python. + │ Почему агент, а не код: + │ - Клиент пишет "фильтры глючат на айфоне когда быстро тыкаешь" + │ → код не поймёт нюанс "быстро тыкаешь" = race condition + │ → агент переформулирует для команды + │ - Ты пишешь "посмотри что там с NeverDNS" + │ → это не задача, это запрос на статус + возможно ревью + │ → агент разберётся + │ + │ Что делает: + │ 1. Понимает контекст (ты — директор и продажник, клиенты через тебя) + │ 2. Определяет проект, тип задачи, срочность + │ 3. Задаёт уточняющие вопросы если надо + │ 4. Формулирует задачу на языке команды + │ 5. Для простых запросов (статус) — SQL к БД, без агентов + │ 6. Для задач — маршрутизирует к нужному PM проекта + │ 7. Для новых проектов — запускает цепочку research → design → architecture + │ + │ Его контекст: список проектов + статусы (из БД, маленький) + │ НЕ знает: код, архитектуру, детали — только "кто чем занимается" + │ + ├── [PM:vdolipoperek] ── знает проект ГЛУБОКО + │ │ Что знает: модули, tech stack, decisions, грабли, текущий статус + │ │ Что умеет: декомпозировать задачу, выбрать нужных специалистов + │ │ Его контекст: decisions + modules + текущие tasks (из БД) + │ │ + │ │ Intake передаёт: "Баг: фильтры поиска не применяются при + │ │ быстром переключении на iOS Safari. + │ │ Источник: жалоба клиента. Приоритет: высокий." + │ │ + │ │ PM думает: "фильтры — это модуль search. iOS Safari — + │ │ у нас уже была decision #15 про position:fixed. + │ │ Нужен дебагер на модуль search." + │ │ + │ │ ┌─── [Дебагер] ← описание бага + код модуля + decision #15 + │ │ │ │ ищет проблему + │ │ │ │ нашёл: "race condition в async фильтре" + │ │ │ ▼ + │ │ │ [Тестировщик] ← найденный баг + модуль + │ │ │ │ regression test, подтверждает баг + │ │ │ ▼ + │ │ │ [Фронтендер] ← баг + тест + spec модуля + │ │ │ │ фиксит, тест проходит + │ │ │ ▼ + │ │ └─── [PM] ← результат. Записывает decision: + │ │ "race condition в SearchFilters — debounce + AbortController" + │ │ → Intake сообщает тебе → ты сообщаешь клиенту + │ │ + │ │ Другой пример: "добавить оплату на сайт" + │ │ PM: "новый модуль, нужна полная команда" + │ │ + │ │ ┌─── [Маркетолог] ← "платежи на сайте турагентства" + │ │ │ │ исследует: как конкуренты делают checkout, + │ │ │ │ какие conversion-паттерны, trust signals + │ │ │ ▼ + │ │ │ [UX-дизайнер] ← research маркетолога + brief + │ │ │ │ проектирует: user flow оплаты, wireframes + │ │ │ ▼ + │ │ │ [Архитектор] ← UX flow + brief + все decisions проекта + │ │ │ │ spec: модули, API, БД, интеграция с платёжкой + │ │ │ ▼ + │ │ │ [Безопасник] ← spec (PCI DSS для платежей!) + │ │ │ │ security requirements + │ │ │ ▼ + │ │ │ [Бэкендер] ← spec + security reqs (параллельно!) + │ │ │ [Фронтендер] ← spec + UX wireframes + │ │ │ ▼ + │ │ │ [Ревьюер] + [Тестировщик] + [Безопасник] + │ │ └─── [PM] ← всё готово, decisions обновлены + │ │ + ├── [PM:sharedbox] ── знает свой проект так же глубоко + │ └── (своя динамическая команда) + │ + ├── [PM:neverdns] ── знает: готов, в маркетинг-фазе + │ └── (маркетолог, копирайтер, SEO — другая команда!) + │ + └── ... (остальные проекты) + + +[Для НОВЫХ проектов — отдельная цепочка:] + + Intake: "нужен агрегатор туров" + │ + ├── [Бизнес-аналитик] ← хотелки + контекст (турагентство) + │ │ исследует: бизнес-модель, монетизация, целевая аудитория + │ │ может спавнить: + │ │ [Исследователь рынка] ← конкуренты, ниша + │ │ [Исследователь API] ← какие API поставщиков туров есть + │ │ [Исследователь юридики] ← лицензии, договора + │ ▼ + ├── [UX-дизайнер] ← research + хотелки + │ │ user journey, wireframes ключевых страниц + │ │ смотрит конкурентов, лучшие практики + │ ▼ + ├── [Маркетолог] ← research + UX + │ │ стратегия продвижения, SEO, механики удержания + │ │ что учесть при разработке для маркетинга + │ ▼ + ├── [Архитектор] ← research + UX + marketing reqs + │ │ project_blueprint: модули, tech stack, план + │ │ учитывает существующий стек (Vue/Nuxt) + │ ▼ + └── Создаётся проект в БД → назначается PM → работа начинается +``` + +### 3.3 Типы задач и маршруты (PM выбирает динамически) + +PM проекта — это не тупой маршрутизатор, это агент, который ПОНИМАЕТ задачу. +Но чтобы понимал хорошо, ему нужны "шаблоны маршрутов" как подсказки: + +```yaml +# В промпте PM: "ты знаешь эти типы задач и кого вызывать" + +routes: + debug: + description: "Найти и исправить баг" + typical_flow: + - debugger: "найди причину, опиши" + - tester: "напиши regression test, подтверди баг" + - developer: "исправь, тест должен пройти" + pm_decides: + - какой модуль затронут (из знания проекта) + - frontend или backend баг + - нужен ли security review (если баг в auth/payments) + + feature: + description: "Новая фича" + typical_flow: + - architect: "спроектируй" + - developer: "реализуй" (может быть несколько параллельно) + - reviewer: "проверь" + - tester: "протестируй" + pm_decides: + - масштаб (один компонент или новый модуль) + - нужен ли architect (мелкая фича → сразу developer) + - параллелить ли frontend/backend + + refactor: + description: "Рефакторинг существующего кода" + typical_flow: + - architect: "оцени scope, предложи план" + - developer: "рефактори по плану" + - tester: "прогони существующие тесты" + pm_decides: + - затрагивает ли другие модули + - нужна ли миграция данных + + security_audit: + description: "Проверка безопасности" + typical_flow: + - security: "проверь по OWASP" + - developer: "исправь найденное" + - security: "подтверди исправления" + + new_project: + description: "Создание нового проекта с нуля" + typical_flow: + - analyst: "исследуй рынок, конкурентов, API" + - architect: "спроектируй на основе исследования" + - pm: "декомпозируй blueprint на задачи" + - # далее — обычные feature/debug задачи + + hotfix: + description: "Срочное исправление в продакшене" + typical_flow: + - debugger: "найди причину" + - developer: "минимальный fix" + - tester: "smoke test" + constraints: + - максимум 1 час + - минимум изменений + - deploy сразу +``` + +### 3.4 Пул специалистов (агенты-рабочие) + +Рабочие агенты — НЕ фиксированный набор. Это **пул ролей**, из которых PM +собирает команду под задачу. Каждый — отдельный Claude Code процесс. + +```yaml +specialists: + + # ═══════════════════════════════════════════════ + # ИССЛЕДОВАНИЯ И АНАЛИТИКА + # ═══════════════════════════════════════════════ + + business_analyst: + prompt: prompts/business_analyst.md + context: "задание + бизнес-контекст проекта" + tools: [WebSearch, WebFetch, Read, Write] + model: opus # стратегические решения + description: > + Бизнес-аналитик. Исследует бизнес-модель, монетизацию, целевую + аудиторию, юридические аспекты. Может спавнить исследователей. + + market_researcher: + prompt: prompts/market_researcher.md + context: "тема исследования + рамки" + tools: [WebSearch, WebFetch, Write] + model: sonnet + description: > + Исследователь рынка. Конкуренты, ниша, тренды, ценообразование. + Подчинённый аналитика — копает конкретную тему. + + tech_researcher: + prompt: prompts/tech_researcher.md + context: "что исследовать + ограничения" + tools: [WebSearch, WebFetch, Read, Write] + model: sonnet + description: > + Технический исследователь. API поставщиков, библиотеки, + интеграции, бенчмарки. Знает где искать доки, changelog, issues. + + # ═══════════════════════════════════════════════ + # ДИЗАЙН И UX + # ═══════════════════════════════════════════════ + + ux_designer: + prompt: prompts/ux_designer.md + context: "brief + research + примеры конкурентов" + tools: [WebSearch, WebFetch, Read, Write] + model: opus # UX-решения критичны для продукта + description: > + UX-дизайнер. User journey, wireframes (текстовые/Mermaid), + information architecture, interaction patterns. + Смотрит на конкурентов, лучшие практики, accessibility. + + ui_designer: + prompt: prompts/ui_designer.md + context: "wireframes + style guide проекта" + tools: [Read, Write] + model: sonnet + description: > + UI-дизайнер. Визуальный дизайн, компонентная система, + типографика, цвета, spacing. Описывает на уровне CSS tokens. + + # ═══════════════════════════════════════════════ + # МАРКЕТИНГ И КОНТЕНТ + # ═══════════════════════════════════════════════ + + marketer: + prompt: prompts/marketer.md + context: "research + продукт + целевая аудитория" + tools: [WebSearch, WebFetch, Read, Write] + model: sonnet + description: > + Маркетолог. Стратегия продвижения, SEO-требования для разработки, + conversion-паттерны, A/B тест гипотезы, trust signals. + Знает исследования по поведению пользователей. + Даёт требования разработчикам: что учесть в коде для маркетинга. + + copywriter: + prompt: prompts/copywriter.md + context: "brief + tone of voice + целевая аудитория" + tools: [Read, Write] + model: sonnet + description: > + Копирайтер. Тексты для UI (кнопки, заголовки, ошибки), + лендинги, описания, meta-теги. Знает русский и английский. + + seo_specialist: + prompt: prompts/seo_specialist.md + context: "сайт + ниша + текущие метрики (если есть)" + tools: [WebSearch, WebFetch, Read, Write, Bash] + model: sonnet + description: > + SEO-специалист. Техническое SEO, структура URL, meta-теги, + schema.org разметка, Core Web Vitals, sitemap. + Даёт конкретные требования фронтендеру и бэкендеру. + + # ═══════════════════════════════════════════════ + # ПРОЕКТИРОВАНИЕ + # ═══════════════════════════════════════════════ + + architect: + prompt: prompts/architect.md + context: "brief + ВСЕ decisions проекта + tech_stack + research (если есть)" + tools: [Read, Write] + model: opus # критические решения — максимум мозгов + description: > + Системный архитектор. Проектирует архитектуру, модули, API, + схему БД, интеграции. Выдаёт implementation spec. + Не пишет код — пишет спецификации. + + db_architect: + prompt: prompts/db_architect.md + context: "spec + текущая схема БД" + tools: [Read, Write] + model: opus + description: > + Архитектор БД. Схема, миграции, индексы, нормализация. + Когда SQLite хватит, когда переходить на PostgreSQL. + + # ═══════════════════════════════════════════════ + # РАЗРАБОТКА + # ═══════════════════════════════════════════════ + + frontend_dev: + prompt: prompts/frontend_dev.md + context: "spec модуля + wireframes + relevant decisions (gotchas)" + tools: [Read, Write, Edit, Bash] + model: sonnet + working_dir: "{project_path}" + description: > + Фронтендер. Vue/Nuxt/React, CSS, анимации, responsive. + Работает в директории проекта. + + backend_dev: + prompt: prompts/backend_dev.md + context: "spec модуля + API contracts + relevant decisions" + tools: [Read, Write, Edit, Bash] + model: sonnet + working_dir: "{project_path}" + description: > + Бэкендер. Node.js/Python, API, интеграции, бизнес-логика. + + fullstack_dev: + prompt: prompts/fullstack_dev.md + context: "spec модуля + relevant decisions" + tools: [Read, Write, Edit, Bash] + model: sonnet + working_dir: "{project_path}" + description: > + Фулстекер. Для мелких задач, где нет смысла делить. + + # ═══════════════════════════════════════════════ + # КАЧЕСТВО + # ═══════════════════════════════════════════════ + + debugger: + prompt: prompts/debugger.md + context: "описание бага + код модуля + логи (если есть)" + tools: [Read, Bash, Write] # НЕТ Edit! Дебагер ищет, не чинит. + model: opus # дебаг требует глубокого reasoning + description: > + Дебагер. Ищет причину бага, описывает root cause, НЕ исправляет. + Предлагает решение, но не трогает код. + + reviewer: + prompt: prompts/reviewer.md + context: "код + spec + conventions проекта" + tools: [Read] # ТОЛЬКО чтение! Ревьюер не правит. + model: sonnet + description: > + Ревьюер. Code review: соответствие spec, качество, паттерны, + naming, edge cases. Только читает, не правит. + + tester: + prompt: prompts/tester.md + context: "код + spec" + tools: [Read, Write, Edit, Bash] + model: sonnet + working_dir: "{project_path}" + description: > + Тестировщик. Unit, integration, e2e тесты. Гоняет, ищет edge cases. + + qa_analyst: + prompt: prompts/qa_analyst.md + context: "spec + UX flow + текущие тесты" + tools: [Read, Write] + model: sonnet + description: > + QA-аналитик. Тест-планы, тест-кейсы, acceptance criteria. + Не пишет код тестов — описывает ЧТО тестировать. + + # ═══════════════════════════════════════════════ + # ИНФРАСТРУКТУРА И БЕЗОПАСНОСТЬ + # ═══════════════════════════════════════════════ + + sysadmin: + prompt: prompts/sysadmin.md + context: "инфраструктура проекта + текущий стек" + tools: [Read, Write, Edit, Bash] + model: sonnet + description: > + Сисадмин. Docker, nginx, CI/CD, мониторинг, бэкапы. + Знает когда SQLite хватит и когда нужен PostgreSQL. + Настраивает фаерволы, SSL, деплой. Ставит пакеты/модули. + + devops: + prompt: prompts/devops.md + context: "инфраструктура + pipeline + tech stack" + tools: [Read, Write, Edit, Bash] + model: sonnet + description: > + DevOps. CI/CD pipeline, автодеплой, blue-green, rollback. + Docker Compose, GitHub Actions / Forgejo CI. + + security: + prompt: prompts/security.md + context: "код + security-relevant decisions" + tools: [Read, Bash] + model: opus # безопасность — не экономим + description: > + Безопасник. OWASP, CVE, auth, injection, secrets, dependencies. + Проверяет фаерволы, ставит ограничения. Знает актуальные уязвимости. + + # ═══════════════════════════════════════════════ + # ЮРИДИЧЕСКАЯ ПОДДЕРЖКА + # ═══════════════════════════════════════════════ + + legal: + prompt: prompts/legal.md + context: "описание задачи/модуля + юрисдикция + тип бизнеса" + tools: [WebSearch, WebFetch, Read, Write] + model: opus # юридические решения критичны + description: > + Юрист. Анализирует задачу с точки зрения законности: + - Можно ли так делать? (ЗоЗПП, 152-ФЗ, 54-ФЗ, GDPR...) + - Что нужно сделать чтобы было можно? (оферта, согласие, лицензия) + - Какие документы нужны? (политика конфиденциальности, договор) + - Какие риски? (штрафы, блокировки, претензии) + НЕ заменяет настоящего юриста — даёт направление и чеклист. + PM вызывает когда: коммерция, персональные данные, платежи, + пользовательский контент, трансграничные операции. + PM НЕ вызывает когда: внутренний инструмент без юрлица. + + legal_researcher: + prompt: prompts/legal_researcher.md + context: "юридический вопрос + юрисдикция" + tools: [WebSearch, WebFetch, Read, Write] + model: sonnet + description: > + Юридический исследователь. Ищет актуальные нормативные акты, + судебную практику, разъяснения регуляторов. + Подчинённый юриста — копает конкретный вопрос. + + # ═══════════════════════════════════════════════ + # САППОРТ И ОБРАТНАЯ СВЯЗЬ + # ═══════════════════════════════════════════════ + + support: + prompt: prompts/support.md + context: "описание продукта + FAQ + known issues + decisions (gotchas)" + tools: [Read, Write] + model: sonnet + description: > + Саппорт-агент. Общается с пользователем (через тебя или напрямую). + Задаёт правильные вопросы, собирает анамнез: + - Что именно не работает? На каком устройстве/браузере? + - Воспроизводится ли стабильно? Когда началось? + - Скриншот/видео? + Формирует структурированный тикет для PM. + НЕ обещает сроки, НЕ принимает решения, НЕ выполняет просьбы. + + support_guard: + prompt: prompts/support_guard.md + context: "бизнес-правила проекта + security policies" + tools: [Read] + model: sonnet + description: > + Фильтр саппорта (безопасник обратной связи). + Проверяет ВСЕ входящие от клиентов перед тем как они попадут в систему: + - "Дайте мне данные других пользователей" → REJECT + лог + - "Сделайте скидку 90%" → REJECT (не в компетенции системы) + - "Удалите мой аккаунт" → ESCALATE to human (Михаил решает) + - "Кнопка не работает" → PASS to support → PM + Классифицирует: bug / feature_request / question / abuse / escalate +``` + +### 3.4a Саппорт: от ручного к автоматическому (эволюция) + +``` +=== ФАЗА 1: Саппорт через тебя (сейчас) === + +[Клиент] → пишет тебе в WhatsApp/Telegram + │ + ▼ +[Ты] → пересказываешь Intake-менеджеру: + │ "клиент пишет что фильтры глючат на айфоне" + ▼ +[Intake] → формулирует → [PM проекта] → команда работает + │ + ▼ +[PM] → результат → [Intake] → тебе → ты отвечаешь клиенту + + +=== ФАЗА 2: Саппорт-агент общается с тобой (скоро) === + +[Клиент] → пишет тебе + │ + ▼ +[Ты] → копируешь сообщение клиента в kin: + │ kin support vdol "текст клиента" + ▼ +[Support] → задаёт тебе уточняющие вопросы: + │ "Спросите клиента: на каком устройстве? В каком браузере? + │ Воспроизводится ли если обновить страницу?" + ▼ +[Ты] → спрашиваешь клиента → добавляешь ответы + ▼ +[Support] → формирует тикет → [Support Guard проверяет] → [PM] + │ + ▼ +[PM] → результат → [Support] формулирует ответ клиенту + → "Мы нашли и исправили проблему с фильтрами. + Обновите страницу — должно работать." + ▼ +[Ты] → отправляешь клиенту (можешь отредактировать) + + +=== ФАЗА 3: Telegram-бот для клиентов (перспектива) === + +[Клиент] → пишет в Telegram-бот проекта напрямую + │ + ▼ +[Support Guard] → фильтрует: + │ abuse/manipulation → BLOCK + лог для тебя + │ escalation → NOTIFY тебя + │ нормальный запрос → PASS + ▼ +[Support Bot] → общается с клиентом: + │ задаёт вопросы, собирает анамнез, показывает FAQ + │ если FAQ решает проблему → закрывает + │ если нет → формирует тикет + ▼ +[PM] → принимает тикет, запускает команду + ▼ +[PM] → результат → [Support Bot] → отвечает клиенту + │ + │ ВСЕ ответы клиентам проходят через Support Guard: + │ - не раскрывает внутреннюю архитектуру + │ - не обещает невозможное + │ - не подтверждает уязвимости + │ - вежливо, в стиле бренда + ▼ +[Ты] → получаешь summary: "Клиент X обратился с багом Y, + команда исправила, клиент получил ответ Z" + (можешь вмешаться в любой момент) + + +=== ФАЗА 4: Проект живёт сам (далёкая перспектива) === + +[Клиенты] → боты → [Support] → [PM] → [Команда] → [Deploy] + │ │ + ▼ ▼ +[Аналитика использования] [Автодеплой фиксов] + │ + ▼ +[Маркетолог] → "конверсия упала на 5% на странице X" + → [PM] → [UX-дизайнер] → [Фронтендер] → [A/B тест] + +[Ты] = стратегическое управление + финальное approve на крупные изменения +``` + +**Таблицы для саппорта (добавить в БД):** + +```sql +-- Тикеты от пользователей +CREATE TABLE support_tickets ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + project_id TEXT NOT NULL REFERENCES projects(id), + source TEXT NOT NULL, -- 'manual', 'telegram_bot', 'email' + client_id TEXT, -- идентификатор клиента (telegram id, email...) + client_message TEXT NOT NULL, -- исходное сообщение клиента + classification TEXT, -- 'bug', 'feature_request', 'question', 'abuse', 'escalate' + guard_result TEXT, -- 'pass', 'reject', 'escalate' + guard_reason TEXT, -- почему отклонено/эскалировано + anamnesis JSON, -- собранная информация (устройство, шаги, скриншоты) + task_id TEXT REFERENCES tasks(id), -- связанная задача (если создана) + response TEXT, -- ответ клиенту + response_approved BOOLEAN DEFAULT FALSE, -- ты одобрил ответ? + status TEXT DEFAULT 'new', -- new, collecting_info, in_progress, resolved, rejected + created_at DATETIME DEFAULT CURRENT_TIMESTAMP, + resolved_at DATETIME +); + +-- Настройки бота для каждого проекта +CREATE TABLE support_bot_config ( + project_id TEXT PRIMARY KEY REFERENCES projects(id), + telegram_bot_token TEXT, -- токен Telegram-бота (encrypted) + welcome_message TEXT, -- приветствие + faq JSON, -- часто задаваемые вопросы + auto_reply BOOLEAN DEFAULT FALSE, -- автоматически отвечать клиентам? + require_approval BOOLEAN DEFAULT TRUE, -- требовать одобрение ответов? + brand_voice TEXT, -- стиль общения ("формальный", "дружелюбный") + forbidden_topics JSON, -- что нельзя обсуждать с клиентами + escalation_keywords JSON -- триггеры для эскалации к тебе +); + +CREATE INDEX idx_tickets_project ON support_tickets(project_id, status); +CREATE INDEX idx_tickets_client ON support_tickets(client_id); +``` + +**Итого: ~24 специализации в 9 отделах:** + +| Отдел | Роли | PM вызывает когда | +|-------|------|-------------------| +| Исследования | business_analyst, market_researcher, tech_researcher | Новый проект, новый модуль, выбор технологии | +| Дизайн | ux_designer, ui_designer | Новый модуль, редизайн, улучшение UX | +| Маркетинг | marketer, copywriter, seo_specialist | Запуск, лендинги, SEO, контент | +| Проектирование | architect, db_architect | Новый модуль, рефакторинг, масштабирование | +| Разработка | frontend_dev, backend_dev, fullstack_dev | Реализация | +| Качество | debugger, reviewer, tester, qa_analyst | Баги, ревью, тесты | +| Инфраструктура | sysadmin, devops, security | Деплой, CI/CD, безопасность | +| Юридическая | legal, legal_researcher | Коммерция, ПД, платежи, оферты, лицензии | +| Саппорт | support, support_guard | Обратная связь от клиентов | + +PM не вызывает всех — он собирает команду под задачу: +- Мелкий баг от клиента: support → PM → debugger → tester → frontend_dev +- Новый модуль с платежами: legal → marketer → ux → architect → security → devs → review +- Новый коммерческий проект: analyst → researchers → legal → ux → marketer → architect +- Внутренний инструмент: architect → fullstack_dev → tester (без юриста и маркетолога) + +**Разделение прав (КРИТИЧНО):** +- Исследователи/аналитики: WebSearch + Read + Write (документы). Не трогают код. +- Дизайнеры: Read + Write (спеки, wireframes). Не трогают код. +- Маркетологи: WebSearch + Read + Write. Не трогают код — дают требования. +- Архитектор: Read + Write (спеки). Не пишет код. +- Дебагер: Read + Bash. Ищет, НЕ правит. +- Ревьюер: ТОЛЬКО Read. Не трогает. +- Разработчики: полный доступ, но только к своему модулю. +- Сисадмин/DevOps: полный доступ к инфраструктуре. +- Безопасник: Read + Bash. Не правит — выдаёт требования. +- Саппорт: Read + Write (тикеты). Не трогает код, не принимает решений. +- Support Guard: ТОЛЬКО Read. Фильтр — пропускает/блокирует/эскалирует. + +### 3.5 Протокол обмена между агентами + +Агенты общаются ТОЛЬКО через структурированные артефакты в БД. +Никакого shared context. Каждый артефакт — JSON файл + запись в tasks. + +**Универсальный формат передачи:** +```json +{ + "task_id": "VDOL-042", + "from_role": "pm", + "to_role": "debugger", + "type": "debug_request", + "payload": { + "bug_description": "Фильтры поиска не применяются при быстром переключении", + "module": "search", + "affected_files": [ + "src/components/search/SearchFilters.vue", + "src/composables/useSearch.ts", + "src/api/search.ts" + ], + "known_context": [ + "Фильтры используют async API вызовы", + "Раньше был похожий баг с debounce (decision #15)" + ], + "reproduction_steps": "Быстро кликнуть 3 разных фильтра подряд" + } +} +``` + +**PM формирует payload, подтягивая из decisions:** +```python +# context_builder собирает для дебагера +context = { + "task": db.get_task("VDOL-042"), + "module_files": git.list_files("src/components/search/"), + "relevant_decisions": db.get_decisions( + project_id="vdol", + category="search", + types=["gotcha", "workaround"] + ), + "recent_bugs": db.get_tasks( + project_id="vdol", + module="search", + status="done", + type="debug", + limit=5 + ) +} +``` + +### 3.6 Коммуникация между рабочими агентами + +Рабочие агенты могут "общаться", но не напрямую — через PM как посредника, +или через артефакты в файловой системе: + +``` +[Дебагер] → пишет debug_report.json → [PM читает] + PM решает: "нужен фронтендер для фикса" +[PM] → формирует fix_request.json (debug_report + spec) → [Фронтендер] +[Фронтендер] → правит код → [Тестировщик] запускает тесты +[Тестировщик] → test_result.json → [PM] + PM решает: "тесты прошли, закрываю задачу" или "фейл, обратно фронтендеру" +``` + +**НО: для скорости можно разрешить прямую цепочку без PM:** + +``` +# PM заранее описывает pipeline +kin run VDOL-042 --pipeline "debugger → tester → frontend_dev → tester" +# Каждый агент передаёт результат следующему через файл +# PM получает только финальный результат + все промежуточные в логах +``` + +### 3.7 Механизм запуска (как это работает технически) + +```bash +# Сценарий 1: ты пишешь в Telegram +"продебажь фильтры в vdolipoperek" + +# Диспетчер (Python): +# 1. Парсит: проект=vdol, тип=debug, что=фильтры +# 2. Запускает PM проекта: + +claude -p "$(cat prompts/pm.md) + +ПРОЕКТ: vdolipoperek +TECH STACK: Vue 3, TypeScript, Nuxt +ТЕКУЩИЕ DECISIONS: +$(kin decisions vdol --category search --format brief) + +ЗАДАЧА: продебажь фильтры — не применяются при быстром переключении +ДОСТУПНЫЕ СПЕЦИАЛИСТЫ: debugger, frontend_dev, backend_dev, tester, reviewer, security +ШАБЛОНЫ МАРШРУТОВ: $(cat routes.yaml) + +Декомпозируй задачу и верни JSON с pipeline." \ + --session-id "pm-vdol-$(date +%s)" \ + --output-format json + +# PM возвращает: +{ + "task_id": "VDOL-043", + "pipeline": [ + {"role": "debugger", "module": "search", "brief": "..."}, + {"role": "tester", "depends_on": "debugger", "brief": "regression test"}, + {"role": "frontend_dev", "depends_on": "tester", "brief": "fix"}, + {"role": "tester", "depends_on": "frontend_dev", "brief": "verify fix"} + ], + "decisions_to_load": [15, 23] # PM знает какие decisions релевантны +} + +# Runner исполняет pipeline: +for step in pipeline: + context = context_builder.build(step.role, step.module, step.decisions) + result = claude_run(step.role, context, project_path) + save_result(step, result) + if not result.success: + escalate_to_pm(step, result) # PM решает что делать +``` + +```bash +# Сценарий 2: новый проект +"нужен агрегатор туров, чтобы парсить предложения и показывать клиентам" + +# Диспетчер определяет: тип=new_project +# Запускает Аналитика (БЕЗ PM, потому что проекта ещё нет): + +claude -p "$(cat prompts/analyst.md) +Исследуй тему: агрегатор туров для турагентства. +Контекст: существующий сайт vdolipoperek.com (Vue/Nuxt). +Нужно: конкуренты, доступные API поставщиков туров, ценообразование, +технические ограничения. Верни market_research.json" \ + --session-id "analyst-new-$(date +%s)" \ + --tools WebSearch,WebFetch,Write + +# Аналитик может спавнить исследователей: +# - "исследователь API" — ищет API TUI, Pegas, Anex... +# - "исследователь конкурентов" — анализирует level.travel, onlinetours... + +# После: Архитектор получает research + хотелки: +claude -p "$(cat prompts/architect.md) +ИССЛЕДОВАНИЕ: $(cat market_research.json) +ХОТЕЛКИ: агрегатор туров, парсинг предложений, отображение клиентам +СУЩЕСТВУЮЩИЙ СТЕК: Vue 3, Nuxt, Node.js +Спроектируй project_blueprint.json" \ + --session-id "arch-new-$(date +%s)" + +# Blueprint → создаётся проект в БД → назначается PM → работа начинается +``` + +### 3.5 State Management + +**SQLite база — мультипроектная с рождения:** + +```sql +-- Проекты (центральный реестр) +CREATE TABLE projects ( + id TEXT PRIMARY KEY, -- 'vdol', 'sharedbox', 'neverdns', 'barsik', 'askai' + name TEXT NOT NULL, -- 'В долю поперёк', 'SharedBox', 'NeverDNS' + path TEXT NOT NULL, -- ~/projects/mailbox, ~/projects/vdolipoperek + tech_stack JSON, -- ["vue3", "typescript", "nuxt"] + status TEXT DEFAULT 'active', -- active, paused, maintenance, ready + priority INTEGER DEFAULT 5, -- 1=критический, 10=когда-нибудь + pm_prompt TEXT, -- путь к кастомному промпту PM для этого проекта + claude_md_path TEXT, -- путь к CLAUDE.md проекта + forgejo_repo TEXT, -- owner/repo для синхронизации issues + created_at DATETIME DEFAULT CURRENT_TIMESTAMP +); + +-- Задачи (привязаны к проекту) +CREATE TABLE tasks ( + id TEXT PRIMARY KEY, -- VDOL-042, SB-015, NDNS-003 + project_id TEXT NOT NULL REFERENCES projects(id), + title TEXT NOT NULL, + status TEXT DEFAULT 'pending', -- pending, decomposed, in_progress, review, done, blocked + priority INTEGER DEFAULT 5, + assigned_role TEXT, -- architect, developer, reviewer, tester, security + parent_task_id TEXT REFERENCES tasks(id), -- для подзадач + brief JSON, -- Task Brief от PM + spec JSON, -- Implementation Spec от архитектора + review JSON, -- Review Result + test_result JSON, -- Test Result + security_result JSON, -- Security Check Result + forgejo_issue_id INTEGER, -- связка с Forgejo issue + created_at DATETIME DEFAULT CURRENT_TIMESTAMP, + updated_at DATETIME DEFAULT CURRENT_TIMESTAMP +); + +-- Решения и грабли (КЛЮЧЕВАЯ ТАБЛИЦА — то что теряется при compaction) +-- Это ВНЕШНЯЯ ПАМЯТЬ PM-агента для каждого проекта +CREATE TABLE decisions ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + project_id TEXT NOT NULL REFERENCES projects(id), + task_id TEXT REFERENCES tasks(id), -- может быть NULL для общепроектных решений + type TEXT NOT NULL, -- 'decision', 'gotcha', 'workaround', 'rejected_approach', 'convention' + category TEXT, -- 'architecture', 'ui', 'api', 'security', 'devops', 'performance' + title TEXT NOT NULL, + description TEXT NOT NULL, + tags JSON, -- ["ios-safari", "css", "bottom-sheet"] для поиска + created_at DATETIME DEFAULT CURRENT_TIMESTAMP +); + +-- Логи агентов (для дебага, обучения и cost tracking) +CREATE TABLE agent_logs ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + project_id TEXT NOT NULL REFERENCES projects(id), + task_id TEXT REFERENCES tasks(id), + agent_role TEXT NOT NULL, -- pm, analyst, architect, debugger, frontend_dev, etc. + session_id TEXT, -- claude --session-id + action TEXT NOT NULL, -- 'decompose', 'implement', 'review', 'test', 'fix', 'research' + input_summary TEXT, -- что получил (краткое описание, не полный текст) + output_summary TEXT, -- что выдал + tokens_used INTEGER, + model TEXT, -- haiku, sonnet, opus + cost_usd REAL, -- стоимость вызова + success BOOLEAN, + error_message TEXT, + duration_seconds INTEGER, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP +); + +-- Модули проекта (PM знает структуру) +-- Это "карта" проекта для PM: он знает какие модули есть и кого вызвать +CREATE TABLE modules ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + project_id TEXT NOT NULL REFERENCES projects(id), + name TEXT NOT NULL, -- 'search', 'auth', 'payments', 'ui-kit' + type TEXT NOT NULL, -- 'frontend', 'backend', 'shared', 'infra' + path TEXT NOT NULL, -- 'src/components/search/', 'src/api/search.ts' + description TEXT, -- 'Поиск и фильтрация туров' + owner_role TEXT, -- 'frontend_dev', 'backend_dev' — кого вызывать + dependencies JSON, -- ["auth", "api-client"] — зависимости между модулями + UNIQUE(project_id, name) +); + +-- Pipelines (история запусков — для обучения и повторного использования) +CREATE TABLE pipelines ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + task_id TEXT NOT NULL REFERENCES tasks(id), + project_id TEXT NOT NULL REFERENCES projects(id), + route_type TEXT NOT NULL, -- 'debug', 'feature', 'refactor', 'hotfix', 'new_project' + steps JSON NOT NULL, -- pipeline JSON от PM + status TEXT DEFAULT 'running', -- running, completed, failed, cancelled + total_cost_usd REAL, + total_tokens INTEGER, + total_duration_seconds INTEGER, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP, + completed_at DATETIME +); + +-- Кросс-проектные зависимости и связи +CREATE TABLE project_links ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + from_project TEXT NOT NULL REFERENCES projects(id), + to_project TEXT NOT NULL REFERENCES projects(id), + type TEXT NOT NULL, -- 'depends_on', 'shares_component', 'blocks' + description TEXT, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP +); + +-- Индексы для быстрого доступа PM-агентов +CREATE INDEX idx_tasks_project_status ON tasks(project_id, status); +CREATE INDEX idx_decisions_project ON decisions(project_id); +CREATE INDEX idx_decisions_tags ON decisions(tags); -- для JSON-поиска по тегам +CREATE INDEX idx_agent_logs_project ON agent_logs(project_id, created_at); +CREATE INDEX idx_agent_logs_cost ON agent_logs(project_id, cost_usd); +``` + +### 3.6 Контекст-билдер (КЛЮЧЕВОЙ КОМПОНЕНТ) + +Перед запуском любого агента, система собирает контекст из БД: + +``` +kin run VDOL-042 + │ + ▼ +[context-builder] + │ + ├── Читает task VDOL-042 из tasks table → brief, spec + ├── Читает decisions WHERE project_id='vdol' → релевантные грабли + │ (фильтрует по category и tags, не грузит ВСЕ решения) + ├── Читает projects WHERE id='vdol' → tech_stack, claude_md_path + ├── Формирует МИНИМАЛЬНЫЙ контекст для конкретной роли: + │ + │ Для архитектора: brief + ALL decisions (он должен знать историю) + │ Для программиста: spec + decisions WHERE category IN ('gotcha','workaround') + │ Для ревьюера: spec + код + decisions WHERE type='convention' + │ Для тестировщика: spec + код (минимум) + │ Для безопасника: код + security conventions + │ + └── Запускает claude -p с собранным контекстом +``` + +**Это решает проблему "раздувшихся CLAUDE.md":** контекст собирается динамически и фильтруется по роли. + +### 3.7 Meta-PM: обзор всех проектов + +Meta-PM — самый "тупой" но самый полезный агент. Он работает с VIEW-запросами к БД: + +```sql +-- "Что горит?" — для Meta-PM +SELECT p.name, p.priority, p.status, + COUNT(CASE WHEN t.status = 'blocked' THEN 1 END) as blocked_tasks, + COUNT(CASE WHEN t.status = 'in_progress' THEN 1 END) as active_tasks, + COUNT(CASE WHEN t.status = 'pending' THEN 1 END) as pending_tasks, + MAX(t.updated_at) as last_activity +FROM projects p +LEFT JOIN tasks t ON t.project_id = p.id +WHERE p.status = 'active' +GROUP BY p.id +ORDER BY p.priority ASC, blocked_tasks DESC; + +-- "Сколько трачу?" — cost tracking +SELECT p.name, + SUM(al.cost_usd) as total_cost, + SUM(al.tokens_used) as total_tokens, + COUNT(*) as agent_calls +FROM agent_logs al +JOIN projects p ON p.id = al.project_id +WHERE al.created_at > datetime('now', '-7 days') +GROUP BY p.id +ORDER BY total_cost DESC; +``` + +### 3.8 Компоненты + +``` +kin/ +├── core/ +│ ├── db.py -- SQLite init, migrations +│ ├── models.py -- Projects, Tasks, Decisions, Modules, Pipelines +│ ├── context_builder.py -- формирование контекста ПО РОЛИ из БД +│ └── api.py -- REST API для GUI (FastAPI, читает ту же SQLite) +│ +├── agents/ +│ ├── prompts/ -- ~24 промпта (pm.md, architect.md, debugger.md...) +│ ├── routes.yaml -- шаблоны маршрутов (debug, feature, refactor...) +│ ├── specialists.yaml -- пул ролей с tools, model, context rules +│ └── runner.py -- запуск claude -p, pipeline executor +│ +├── cli/ +│ └── main.py +│ # kin status — все проекты одним взглядом +│ # kin run VDOL-043 — PM декомпозирует + pipeline +│ # kin run VDOL-043 --dry-run — показать pipeline без запуска +│ # kin ask "что горит?" — Intake отвечает +│ # kin support vdol "текст" — тикет от клиента +│ # kin cost --last 7d — расходы +│ # kin new-project "агрегатор" — analyst → architect → PM +│ +├── web/ -- GUI (Vue 3 + TypeScript — твой стек!) +│ ├── src/ +│ │ ├── views/ +│ │ │ ├── Dashboard.vue -- обзор всех проектов +│ │ │ ├── ProjectView.vue -- один проект: задачи, модули, decisions +│ │ │ ├── PipelineView.vue -- pipeline задачи: кто работает, где блокер +│ │ │ ├── CostView.vue -- расходы по проектам и задачам +│ │ │ └── SupportView.vue -- тикеты от клиентов +│ │ ├── components/ +│ │ │ ├── ProjectCard.vue -- карточка проекта со статусом +│ │ │ ├── PipelineGraph.vue -- визуализация pipeline (граф агентов) +│ │ │ ├── AgentStatus.vue -- статус агента (idle/working/done/error) +│ │ │ ├── DecisionsList.vue -- decisions проекта с поиском по тегам +│ │ │ └── LiveLog.vue -- real-time лог текущего pipeline +│ │ └── App.vue +│ └── package.json +│ +├── integrations/ +│ ├── telegram_bot.py -- бот-интерфейс (для тебя + клиентские боты) +│ └── forgejo_sync.py -- двусторонняя синхронизация issues ↔ tasks +│ +├── config/ +│ └── projects.yaml -- начальная конфигурация проектов +│ +└── kin.db -- SQLite база (единственный source of truth) +``` + +### 3.9 GUI: что нужно видеть + +**Dashboard (главный экран):** +``` +┌─────────────────────────────────────────────────────────────┐ +│ Kin Dashboard Cost: $47/week │ +├─────────────────────────────────────────────────────────────┤ +│ │ +│ 🔴 vdolipoperek 3 active 1 blocked $12/week │ +│ └─ VDOL-043: debug фильтров [████░░] debugger → tester │ +│ └─ VDOL-044: mobile bottom-sheet [██████] done ✓ │ +│ └─ VDOL-045: оплата [░░░░░░] blocked: ждёт юриста │ +│ │ +│ 🟡 sharedbox 1 active $8/week │ +│ └─ SB-016: multi-tenant isolation [██░░░░] architect │ +│ │ +│ 🟢 neverdns 0 active $0/week │ +│ └─ маркетинг-фаза, ждёт контент │ +│ │ +│ 🟢 barsik 1 active $5/week │ +│ └─ BARS-007: RAG pipeline [████░░] backend_dev │ +│ │ +│ ⚪ askai 0 active $0/week │ +│ ⚪ ddfo 0 active $0/week │ +│ ⚪ stopleak 0 active $0/week │ +│ │ +│ ─── Support ─── │ +│ 2 новых тикета (vdolipoperek) │ +│ 1 ожидает твоего approve │ +│ │ +└─────────────────────────────────────────────────────────────┘ +``` + +**Pipeline View (конкретная задача):** +``` +┌─────────────────────────────────────────────────────────────┐ +│ VDOL-043: Debug фильтров поиска Status: in_progress│ +│ Priority: high Cost: $1.82 Duration: 12 min │ +├─────────────────────────────────────────────────────────────┤ +│ │ +│ [PM] ──────► [Debugger] ──────► [Tester] ──────► [Frontend]│ +│ ✓ 0.3s ✓ $0.45 ● working ○ pending│ +│ decomposed found: writing │ +│ pipeline race condition regression │ +│ in useSearch.ts test │ +│ │ +│ Decisions добавлены: │ +│ #47: "race condition в async фильтре — AbortController" │ +│ │ +│ ─── Live Log ─── │ +│ 12:04:32 [tester] Запущен: session tst-VDOL043-1710... │ +│ 12:04:33 [tester] Читает: src/composables/useSearch.ts │ +│ 12:04:45 [tester] Пишет: tests/search.filter.spec.ts │ +│ 12:05:01 [tester] Bash: npm run test -- search.filter │ +│ │ +└─────────────────────────────────────────────────────────────┘ +``` + +**Почему Vue 3:** это твой стек, ты на нём строишь vdolipoperek. +GUI Kin — это тоже проект, который Kin может помогать разрабатывать. +Meta-moment: Kin строит свой собственный GUI. + +**Архитектура GUI:** +``` +[kin.db] ← SQLite (source of truth) + │ + ├── [core/api.py] ← FastAPI, REST endpoints + │ GET /projects — список проектов со статусами + │ GET /projects/{id} — детали проекта + задачи + modules + │ GET /tasks/{id} — задача + pipeline + agent logs + │ GET /tasks/{id}/live — SSE stream для live log + │ GET /pipelines/{id} — граф pipeline с статусами + │ GET /decisions?project=X — decisions с фильтрами + │ GET /support/tickets — тикеты от клиентов + │ GET /cost?period=7d — расходы + │ POST /tasks — создать задачу + │ POST /tasks/{id}/run — запустить pipeline + │ POST /support/approve/{id} — одобрить ответ клиенту + │ + └── [web/] ← Vue 3 + TypeScript, Vite + Подключается к API + SSE для live обновлений pipeline + Responsive (работает с MacBook и с телефона) +``` + +**Ключевое:** GUI читает ту же SQLite что и CLI/runner. +Нет отдельной базы для GUI, нет sync проблем. +runner.py пишет в kin.db → API читает → Vue показывает. +Real-time через SSE (Server-Sent Events) — runner пишет лог → API стримит → Vue обновляет. + +### 3.10 Интеграция с существующей инфраструктурой + +- **Forgejo**: Двусторонний sync — issue создано в Forgejo → task в kin, task завершён → issue закрыт. Forgejo остаётся UI для ручного просмотра. +- **Obsidian**: Decisions из БД экспортируются как .md в vault. Kanban-доска читает задачи. Направление: kin → Obsidian (read-only зеркало). +- **Telegram бот**: Основной мобильный интерфейс. Свободная форма: "продебажь фильтры в vdolipoperek" → dispatcher парсит → PM → pipeline. +- **Mac Mini M4 Pro**: Основной хост. Агенты запускаются как процессы на нём. +- **MacBook**: Через SSH + Telegram бот. Или Syncthing синхронизирует kin.db (receive-only на MacBook). +- **CLAUDE.md per project**: Минимальный (30 строк), содержит ТОЛЬКО: "tech stack", "coding conventions", "ссылка на kin для контекста". Decisions НЕ дублируются. + +### 3.10 Ключевые отличия от Ruflo + +| Аспект | Ruflo | Kin | +|--------|-------|---------| +| Мультипроектность | Нет | Intake + Project PMs | +| Полнота команды | Только dev | ~22 роли: research, design, marketing, dev, QA, ops, support | +| Маршрутизация | Фиксированная | PM динамически собирает команду | +| Изоляция | Один промпт | Каждый агент = отдельный процесс | +| Обратная связь | Нет | Support → Guard → PM → команда → ответ клиенту | +| Клиентские боты | Нет | Telegram per project (перспектива) | + +--- + +## ЧАСТЬ 4: План действий + +### Фаза 1: Фундамент + один проект (2-3 дня) +- [ ] SQLite схема (все таблицы включая support) +- [ ] context-builder, runner.py, pipeline executor +- [ ] Intake-агент, PM, routes.yaml, specialists.yaml +- [ ] Базовые промпты: architect, frontend_dev, debugger, tester, reviewer +- [ ] CLI + тест на vdolipoperek.com + +### Фаза 2: Полная команда ~22 роли (2-3 дня) +- [ ] Все промпты, разделение прав +- [ ] Тест полной цепочки marketer → ux → architect → dev → review + +### Фаза 3: Все 10 проектов (1-2 дня) +### Фаза 4: Telegram интеграция (1-2 дня) +### Фаза 5: Саппорт + Support Guard (1-2 дня) +### Фаза 6: Forgejo + Obsidian sync (1 день) +### Фаза 7: Боевой прогон на vdolipoperek (1-2 недели) +### Фаза 8: Клиентские Telegram-боты (перспектива) +### Фаза 9: Самоподдерживающиеся проекты (далёкая перспектива) + +--- + +## Заметки + +**Архитектура:** Изоляция контекста через процессы. Decisions = внешняя память PM. PM тупой/памятливый, workers умные/забывчивые. context-builder фильтрует по роли. ~22 роли = полная софтверная компания. + +**Бизнес:** Полнота > скорость. Продукт = коммерческий, не поделка. Саппорт замыкает цикл. Support Guard критичен. В перспективе проекты живут сами. + +**Техника:** Python, SQLite (source of truth), cost tracking встроен, Forgejo sync, Obsidian read-only. + +**Из Ruflo взять:** MCP, SQLite memory, model routing, ADR, hooks. **НЕ брать:** fake parallelism, over-engineering. diff --git a/agents/__init__.py b/agents/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/agents/bootstrap.py b/agents/bootstrap.py new file mode 100644 index 0000000..ecd79d7 --- /dev/null +++ b/agents/bootstrap.py @@ -0,0 +1,711 @@ +""" +Kin bootstrap — auto-detect project tech stack, modules, and decisions. +Scans project directory, CLAUDE.md, and optionally Obsidian vault. +Writes results to kin.db via core.models. +""" + +import json +import re +from pathlib import Path +from typing import Any + +DEFAULT_VAULT = Path.home() / "Library" / "Mobile Documents" / "iCloud~md~obsidian" / "Documents" + +# --------------------------------------------------------------------------- +# Tech stack detection +# --------------------------------------------------------------------------- + +# package.json dependency → tech label +_NPM_MARKERS = { + "vue": "vue3", "nuxt": "nuxt3", "react": "react", "next": "nextjs", + "svelte": "svelte", "angular": "angular", + "typescript": "typescript", "vite": "vite", "webpack": "webpack", + "express": "express", "fastify": "fastify", "koa": "koa", + "pinia": "pinia", "vuex": "vuex", "redux": "redux", + "tailwindcss": "tailwind", "prisma": "prisma", "drizzle-orm": "drizzle", + "pg": "postgresql", "mysql2": "mysql", "better-sqlite3": "sqlite", + "axios": "axios", "puppeteer": "puppeteer", "playwright": "playwright", +} + +# Config files → tech label +_FILE_MARKERS = { + "nuxt.config.ts": "nuxt3", "nuxt.config.js": "nuxt3", + "vite.config.ts": "vite", "vite.config.js": "vite", + "tsconfig.json": "typescript", + "tailwind.config.js": "tailwind", "tailwind.config.ts": "tailwind", + "docker-compose.yml": "docker", "docker-compose.yaml": "docker", + "Dockerfile": "docker", + "go.mod": "go", "Cargo.toml": "rust", + "requirements.txt": "python", "pyproject.toml": "python", + "setup.py": "python", "Pipfile": "python", + ".eslintrc.js": "eslint", ".prettierrc": "prettier", +} + + +_SKIP_DIRS = {"node_modules", ".git", "dist", ".next", ".nuxt", "__pycache__", ".venv", "venv"} + + +def detect_tech_stack(project_path: Path) -> list[str]: + """Detect tech stack from project files. + + Searches recursively up to depth 3, skipping node_modules/.git/dist. + Falls back to CLAUDE.md heuristics if no files found. + """ + stack: set[str] = set() + + # Recursive search for config files and package.json (depth ≤ 3) + for fpath in _walk_files(project_path, max_depth=3): + fname = fpath.name + if fname in _FILE_MARKERS: + stack.add(_FILE_MARKERS[fname]) + if fname == "package.json": + stack.update(_parse_package_json(fpath)) + if fname == "requirements.txt": + stack.update(_parse_requirements_txt(fpath)) + if fname == "go.mod": + stack.add("go") + try: + text = fpath.read_text(errors="replace") + if "gin-gonic" in text: + stack.add("gin") + if "fiber" in text: + stack.add("fiber") + except OSError: + pass + + # Fallback: extract tech hints from CLAUDE.md if no config files found + if not stack: + stack.update(_detect_stack_from_claude_md(project_path)) + + return sorted(stack) + + +# CLAUDE.md text → tech labels (for fallback when project files are on a remote server) +_CLAUDE_MD_TECH_HINTS = { + r"(?i)vue[\s.]?3": "vue3", r"(?i)vue[\s.]?2": "vue2", + r"(?i)\bnuxt\b": "nuxt3", r"(?i)\breact\b": "react", + r"(?i)\btypescript\b": "typescript", r"(?i)\bvite\b": "vite", + r"(?i)\btailwind": "tailwind", + r"(?i)node\.?js": "nodejs", r"(?i)\bexpress\b": "express", + r"(?i)postgresql|postgres": "postgresql", + r"(?i)\bsqlite\b": "sqlite", r"(?i)\bmysql\b": "mysql", + r"(?i)\bdocker\b": "docker", + r"(?i)\bpython\b": "python", r"(?i)\bfastapi\b": "fastapi", + r"(?i)\bdjango\b": "django", r"(?i)\bflask\b": "flask", + r"(?i)\bgo\b.*(?:gin|fiber|module)": "go", + r"(?i)\bnginx\b": "nginx", + r"(?i)\bpinia\b": "pinia", r"(?i)\bvuex\b": "vuex", +} + + +def _detect_stack_from_claude_md(project_path: Path) -> list[str]: + """Fallback: infer tech stack from CLAUDE.md text when no config files exist.""" + claude_md = project_path / "CLAUDE.md" + if not claude_md.exists(): + return [] + try: + text = claude_md.read_text(errors="replace")[:5000] # First 5KB is enough + except OSError: + return [] + stack = [] + for pattern, tech in _CLAUDE_MD_TECH_HINTS.items(): + if re.search(pattern, text): + stack.append(tech) + return stack + + +def _walk_files(root: Path, max_depth: int = 3, _depth: int = 0): + """Yield files up to max_depth, skipping node_modules/dist/.git.""" + if _depth > max_depth: + return + try: + entries = sorted(root.iterdir()) + except (OSError, PermissionError): + return + for entry in entries: + if entry.is_file(): + yield entry + elif entry.is_dir() and entry.name not in _SKIP_DIRS and not entry.name.startswith("."): + yield from _walk_files(entry, max_depth, _depth + 1) + + +def _parse_package_json(path: Path) -> list[str]: + """Extract tech labels from package.json.""" + try: + data = json.loads(path.read_text(errors="replace")) + except (json.JSONDecodeError, OSError): + return [] + stack = [] + all_deps = {} + for key in ("dependencies", "devDependencies"): + all_deps.update(data.get(key, {})) + for dep_name, tech in _NPM_MARKERS.items(): + if dep_name in all_deps: + stack.append(tech) + return stack + + +def _parse_requirements_txt(path: Path) -> list[str]: + """Extract tech labels from requirements.txt.""" + markers = { + "fastapi": "fastapi", "flask": "flask", "django": "django", + "sqlalchemy": "sqlalchemy", "celery": "celery", "redis": "redis", + "pydantic": "pydantic", "click": "click", "pytest": "pytest", + } + stack = [] + try: + text = path.read_text(errors="replace").lower() + except OSError: + return stack + for pkg, tech in markers.items(): + if pkg in text: + stack.append(tech) + return stack + + +def _is_inside_node_modules(path: Path, root: Path) -> bool: + rel = path.relative_to(root) + return "node_modules" in rel.parts + + +# --------------------------------------------------------------------------- +# Module detection +# --------------------------------------------------------------------------- + +_FRONTEND_EXTS = {".vue", ".jsx", ".tsx", ".svelte"} +_BACKEND_MARKERS = {"express", "fastify", "koa", "router", "controller", "middleware"} + + +def detect_modules(project_path: Path) -> list[dict]: + """Scan for modules: checks root subdirs, */src/ patterns, standard names. + + Strategy: + 1. Find all "source root" dirs (src/, app/, lib/ at root or inside top-level dirs) + 2. Each first-level subdir of a source root = a module candidate + 3. Top-level dirs with their own src/ are treated as component roots + (e.g. frontend/, backend-pg/) — scan THEIR src/ for modules + """ + modules = [] + scan_dirs: list[tuple[Path, str | None]] = [] # (dir, prefix_hint) + + # Direct source dirs in root + for name in ("src", "app", "lib"): + d = project_path / name + if d.is_dir(): + scan_dirs.append((d, None)) + + # Top-level component dirs (frontend/, backend/, backend-pg/, server/, client/) + # These get scanned for src/ inside, or directly if they contain source files + for child in sorted(project_path.iterdir()): + if not child.is_dir() or child.name in _SKIP_DIRS or child.name.startswith("."): + continue + child_src = child / "src" + if child_src.is_dir(): + # e.g. frontend/src/, backend-pg/src/ — scan their subdirs + scan_dirs.append((child_src, child.name)) + elif child.name in ("frontend", "backend", "server", "client", "web", "api"): + # No src/ but it's a known component dir — scan it directly + scan_dirs.append((child, child.name)) + + seen = set() + for scan_dir, prefix in scan_dirs: + for child in sorted(scan_dir.iterdir()): + if not child.is_dir() or child.name in _SKIP_DIRS or child.name.startswith("."): + continue + mod = _analyze_module(child, project_path) + key = (mod["name"], mod["path"]) + if key not in seen: + seen.add(key) + modules.append(mod) + + return modules + + +def _analyze_module(dir_path: Path, project_root: Path) -> dict: + """Analyze a directory to determine module type and file count.""" + rel_path = str(dir_path.relative_to(project_root)) + "/" + files = list(dir_path.rglob("*")) + source_files = [f for f in files if f.is_file() and not f.name.startswith(".")] + file_count = len(source_files) + + # Determine type + exts = {f.suffix for f in source_files} + mod_type = _guess_module_type(dir_path, exts, source_files) + + return { + "name": dir_path.name, + "type": mod_type, + "path": rel_path, + "file_count": file_count, + } + + +def _guess_module_type(dir_path: Path, exts: set[str], files: list[Path]) -> str: + """Guess if module is frontend, backend, shared, or infra.""" + # Obvious frontend + if exts & _FRONTEND_EXTS: + return "frontend" + + # Check file contents for backend markers + has_backend_marker = False + for f in files[:20]: # Sample first 20 files + if f.suffix in (".ts", ".js", ".mjs"): + try: + text = f.read_text(errors="replace")[:2000] + text_lower = text.lower() + if any(m in text_lower for m in _BACKEND_MARKERS): + has_backend_marker = True + break + except OSError: + continue + + if has_backend_marker: + return "backend" + + # Infra patterns + name = dir_path.name.lower() + if name in ("infra", "deploy", "scripts", "ci", "docker", "nginx", "config"): + return "infra" + + # Shared by default if ambiguous + if exts & {".ts", ".js", ".py"}: + return "shared" + + return "shared" + + +# --------------------------------------------------------------------------- +# Decisions from CLAUDE.md +# --------------------------------------------------------------------------- + +_DECISION_PATTERNS = [ + (r"(?i)\b(GOTCHA|ВАЖНО|WARNING|ВНИМАНИЕ)[:\s]+(.*?)(?=\n[#\-]|\n\n|\Z)", "gotcha"), + (r"(?i)\b(WORKAROUND|ОБХОДНОЙ|ХАК)[:\s]+(.*?)(?=\n[#\-]|\n\n|\Z)", "workaround"), + (r"(?i)\b(FIXME|БАГИ?)[:\s]+(.*?)(?=\n[#\-]|\n\n|\Z)", "gotcha"), + (r"(?i)\b(РЕШЕНИЕ|DECISION)[:\s]+(.*?)(?=\n[#\-]|\n\n|\Z)", "decision"), + (r"(?i)\b(CONVENTION|СОГЛАШЕНИЕ|ПРАВИЛО)[:\s]+(.*?)(?=\n[#\-]|\n\n|\Z)", "convention"), +] + +# Section headers that likely contain decisions +_DECISION_SECTIONS = [ + r"(?i)known\s+issues?", r"(?i)workaround", r"(?i)gotcha", + r"(?i)решени[яе]", r"(?i)грабл[ия]", + r"(?i)conventions?", r"(?i)правила", r"(?i)нюансы", +] + +# Section headers about UNRELATED services — skip these entirely +_UNRELATED_SECTION_PATTERNS = [ + r"(?i)jitsi", r"(?i)nextcloud", r"(?i)prosody", + r"(?i)coturn", r"(?i)turn\b", r"(?i)asterisk", + r"(?i)ghost\s+блог", r"(?i)onlyoffice", + r"(?i)git\s+sync", r"(?i)\.env\s+добав", + r"(?i)goip\s+watcher", r"(?i)tbank\s+monitor", # monitoring services + r"(?i)фикс\s+удален", # commit-level fixes (not decisions) +] + +# Noise patterns — individual items that look like noise, not decisions +_NOISE_PATTERNS = [ + r"^[0-9a-f]{6,40}$", # commit hashes + r"^\s*(docker|ssh|scp|git|curl|sudo)\s", # shell commands + r"^`[^`]+`$", # inline code-only items + r"(?i)(prosody|jitsi|jicofo|jvb|coturn|nextcloud|onlyoffice|ghost)", # unrelated services + r"(?i)\.jitsi-meet-cfg", # jitsi config paths + r"(?i)(meet\.jitsi|sitemeet\.org)", # jitsi domains + r"(?i)(cloud\.vault\.red|office\.vault)", # nextcloud domains + r"(?i)JWT_APP_(ID|SECRET)", # jwt config lines + r"(?i)XMPP_", # prosody config + r"\(коммит\s+`?[0-9a-f]+`?\)", # "(коммит `a33c2b9`)" references + r"(?i)known_uids|idle_loop|reconnect", # goip-watcher internals +] + + +def _is_noise(text: str) -> bool: + """Check if a decision candidate is noise.""" + # Clean markdown bold for matching + clean = re.sub(r"\*\*([^*]*)\*\*", r"\1", text).strip() + return any(re.search(p, clean) for p in _NOISE_PATTERNS) + + +def _split_into_sections(text: str) -> list[tuple[str, str]]: + """Split markdown into (header, body) pairs by ## headers. + + Returns list of (header_text, body_text) tuples. + Anything before the first ## is returned with header="". + """ + parts = re.split(r"(?m)^(##\s+.+)$", text) + sections = [] + current_header = "" + current_body = parts[0] if parts else "" + + for i in range(1, len(parts), 2): + if current_header or current_body.strip(): + sections.append((current_header, current_body)) + current_header = parts[i].strip() + current_body = parts[i + 1] if i + 1 < len(parts) else "" + + if current_header or current_body.strip(): + sections.append((current_header, current_body)) + + return sections + + +def _is_unrelated_section(header: str) -> bool: + """Check if a section header is about an unrelated service.""" + return any(re.search(p, header) for p in _UNRELATED_SECTION_PATTERNS) + + +def extract_decisions_from_claude_md( + project_path: Path, + project_id: str | None = None, + project_name: str | None = None, +) -> list[dict]: + """Parse CLAUDE.md for decisions, gotchas, workarounds. + + Filters out: + - Sections about unrelated services (Jitsi, Nextcloud, Prosody, etc.) + - Noise: commit hashes, docker/ssh commands, paths to external services + - If CLAUDE.md has multi-project sections, only extracts for current project + """ + claude_md = project_path / "CLAUDE.md" + if not claude_md.exists(): + return [] + + try: + text = claude_md.read_text(errors="replace") + except OSError: + return [] + + # Split into sections and filter out unrelated ones + sections = _split_into_sections(text) + relevant_text = [] + for header, body in sections: + if _is_unrelated_section(header): + continue + relevant_text.append(header + "\n" + body) + + filtered_text = "\n".join(relevant_text) + + decisions = [] + seen_titles = set() + + # Pattern-based extraction from relevant sections only + for pattern, dec_type in _DECISION_PATTERNS: + for m in re.finditer(pattern, filtered_text, re.DOTALL): + body = m.group(2).strip() + if not body or len(body) < 10: + continue + lines = body.split("\n") + title = lines[0].strip().rstrip(".")[:100] + desc = body + if _is_noise(title) or _is_noise(desc): + continue + if title not in seen_titles: + seen_titles.add(title) + decisions.append({ + "type": dec_type, + "title": title, + "description": desc, + "category": _guess_category(title + " " + desc), + }) + + # Section-based extraction: find ### or #### headers matching decision patterns + sub_sections = re.split(r"(?m)^(#{1,4}\s+.*?)$", filtered_text) + for i, section in enumerate(sub_sections): + if any(re.search(pat, section) for pat in _DECISION_SECTIONS): + if i + 1 < len(sub_sections): + content = sub_sections[i + 1].strip() + for line in content.split("\n"): + line = line.strip() + # Numbered items (1. **text**) or bullet items + item = None + if re.match(r"^\d+\.\s+", line): + item = re.sub(r"^\d+\.\s+", "", line).strip() + elif line.startswith(("- ", "* ", "• ")): + item = line.lstrip("-*• ").strip() + + if not item or len(item) < 10: + continue + # Clean bold markers for title + clean = re.sub(r"\*\*([^*]+)\*\*", r"\1", item) + if _is_noise(clean): + continue + title = clean[:100] + if title not in seen_titles: + seen_titles.add(title) + decisions.append({ + "type": "gotcha", + "title": title, + "description": item, + "category": _guess_category(item), + }) + + return decisions + + +def _guess_category(text: str) -> str: + """Best-effort category guess from text content.""" + t = text.lower() + if any(w in t for w in ("css", "ui", "vue", "компонент", "стил", "layout", "mobile", "safari", "bottom-sheet")): + return "ui" + if any(w in t for w in ("api", "endpoint", "rest", "route", "запрос", "fetch")): + return "api" + if any(w in t for w in ("sql", "база", "миграц", "postgres", "sqlite", "бд", "schema")): + return "architecture" + if any(w in t for w in ("безопас", "security", "xss", "auth", "token", "csrf", "injection")): + return "security" + if any(w in t for w in ("docker", "deploy", "nginx", "ci", "cd", "infra", "сервер")): + return "devops" + if any(w in t for w in ("performance", "cache", "оптимиз", "lazy", "скорость")): + return "performance" + return "architecture" + + +# --------------------------------------------------------------------------- +# Obsidian vault scanning +# --------------------------------------------------------------------------- + +def find_vault_root(vault_path: Path | None = None) -> Path | None: + """Find the Obsidian vault root directory. + + If vault_path is given but doesn't exist, returns None (don't fallback). + If vault_path is None, tries the default iCloud Obsidian location. + """ + if vault_path is not None: + return vault_path if vault_path.is_dir() else None + + # Default: iCloud Obsidian path + default = DEFAULT_VAULT + if default.is_dir(): + # Look for a vault inside (usually one level deep) + for child in default.iterdir(): + if child.is_dir() and not child.name.startswith("."): + return child + return None + + +def scan_obsidian( + vault_root: Path, + project_id: str, + project_name: str, + project_dir_name: str | None = None, +) -> dict: + """Scan Obsidian vault for project-related notes. + + Returns {"tasks": [...], "decisions": [...], "files_scanned": int} + """ + result = {"tasks": [], "decisions": [], "files_scanned": 0} + + # Build search terms + search_terms = {project_id.lower()} + if project_name: + search_terms.add(project_name.lower()) + if project_dir_name: + search_terms.add(project_dir_name.lower()) + + # Find project folder in vault + project_files: list[Path] = [] + for term in list(search_terms): + for child in vault_root.iterdir(): + if child.is_dir() and term in child.name.lower(): + for f in child.rglob("*.md"): + if f not in project_files: + project_files.append(f) + + # Also search for files mentioning the project by name + for md_file in vault_root.glob("*.md"): + try: + text = md_file.read_text(errors="replace")[:5000].lower() + except OSError: + continue + if any(term in text for term in search_terms): + if md_file not in project_files: + project_files.append(md_file) + + result["files_scanned"] = len(project_files) + + for f in project_files: + try: + text = f.read_text(errors="replace") + except OSError: + continue + + _extract_obsidian_tasks(text, f.stem, result["tasks"]) + _extract_obsidian_decisions(text, f.stem, result["decisions"]) + + return result + + +def _extract_obsidian_tasks(text: str, source: str, tasks: list[dict]): + """Extract checkbox items from Obsidian markdown.""" + for m in re.finditer(r"^[-*]\s+\[([ xX])\]\s+(.+)$", text, re.MULTILINE): + done = m.group(1).lower() == "x" + title = m.group(2).strip() + # Remove Obsidian wiki-links + title = re.sub(r"\[\[([^\]|]+)(?:\|[^\]]+)?\]\]", r"\1", title) + if len(title) > 5: + tasks.append({ + "title": title[:200], + "done": done, + "source": source, + }) + + +def _extract_obsidian_decisions(text: str, source: str, decisions: list[dict]): + """Extract decisions/gotchas from Obsidian notes.""" + for pattern, dec_type in _DECISION_PATTERNS: + for m in re.finditer(pattern, text, re.DOTALL): + body = m.group(2).strip() + if not body or len(body) < 10: + continue + title = body.split("\n")[0].strip()[:100] + if _is_noise(title) or _is_noise(body): + continue + decisions.append({ + "type": dec_type, + "title": title, + "description": body, + "category": _guess_category(body), + "source": source, + }) + + # Also look for ВАЖНО/GOTCHA/FIXME inline markers not caught above + for m in re.finditer(r"(?i)\*\*(ВАЖНО|GOTCHA|FIXME)\*\*[:\s]*(.*?)(?=\n|$)", text): + body = m.group(2).strip() + if not body or len(body) < 10: + continue + if _is_noise(body): + continue + decisions.append({ + "type": "gotcha", + "title": body[:100], + "description": body, + "category": _guess_category(body), + "source": source, + }) + + +# --------------------------------------------------------------------------- +# Formatting for CLI preview +# --------------------------------------------------------------------------- + +def format_preview( + project_id: str, + name: str, + path: str, + tech_stack: list[str], + modules: list[dict], + decisions: list[dict], + obsidian: dict | None = None, +) -> str: + """Format bootstrap results for user review.""" + lines = [ + f"Project: {project_id} — {name}", + f"Path: {path}", + "", + f"Tech stack: {', '.join(tech_stack) if tech_stack else '(not detected)'}", + "", + ] + + if modules: + lines.append(f"Modules ({len(modules)}):") + for m in modules: + lines.append(f" {m['name']} ({m['type']}) — {m['path']} ({m['file_count']} files)") + else: + lines.append("Modules: (none detected)") + lines.append("") + + if decisions: + lines.append(f"Decisions from CLAUDE.md ({len(decisions)}):") + for i, d in enumerate(decisions, 1): + lines.append(f" #{i} {d['type']}: {d['title']}") + else: + lines.append("Decisions from CLAUDE.md: (none found)") + + if obsidian: + lines.append("") + lines.append(f"Obsidian vault ({obsidian['files_scanned']} files scanned):") + if obsidian["tasks"]: + pending = [t for t in obsidian["tasks"] if not t["done"]] + done = [t for t in obsidian["tasks"] if t["done"]] + lines.append(f" Tasks: {len(pending)} pending, {len(done)} done") + for t in pending[:10]: + lines.append(f" [ ] {t['title']}") + if len(pending) > 10: + lines.append(f" ... and {len(pending) - 10} more") + for t in done[:5]: + lines.append(f" [x] {t['title']}") + if len(done) > 5: + lines.append(f" ... and {len(done) - 5} more done") + else: + lines.append(" Tasks: (none found)") + if obsidian["decisions"]: + lines.append(f" Decisions: {len(obsidian['decisions'])}") + for d in obsidian["decisions"][:5]: + lines.append(f" {d['type']}: {d['title']} (from {d['source']})") + if len(obsidian["decisions"]) > 5: + lines.append(f" ... and {len(obsidian['decisions']) - 5} more") + else: + lines.append(" Decisions: (none found)") + + return "\n".join(lines) + + +# --------------------------------------------------------------------------- +# Write to DB +# --------------------------------------------------------------------------- + +def save_to_db( + conn, + project_id: str, + name: str, + path: str, + tech_stack: list[str], + modules: list[dict], + decisions: list[dict], + obsidian: dict | None = None, +): + """Save all bootstrap data to kin.db via models.""" + from core import models + + # Create project + claude_md = Path(path).expanduser() / "CLAUDE.md" + models.create_project( + conn, project_id, name, path, + tech_stack=tech_stack, + claude_md_path=str(claude_md) if claude_md.exists() else None, + ) + + # Add modules + for m in modules: + models.add_module( + conn, project_id, m["name"], m["type"], m["path"], + description=f"{m['file_count']} files", + ) + + # Add decisions from CLAUDE.md + for d in decisions: + models.add_decision( + conn, project_id, d["type"], d["title"], d["description"], + category=d.get("category"), + ) + + # Add Obsidian decisions + if obsidian: + for d in obsidian.get("decisions", []): + models.add_decision( + conn, project_id, d["type"], d["title"], d["description"], + category=d.get("category"), + tags=[f"obsidian:{d['source']}"], + ) + + # Import Obsidian tasks + task_num = 1 + for t in obsidian.get("tasks", []): + task_id = f"{project_id.upper()}-OBS-{task_num:03d}" + status = "done" if t["done"] else "pending" + models.create_task( + conn, task_id, project_id, t["title"], + status=status, + brief={"source": f"obsidian:{t['source']}"}, + ) + task_num += 1 diff --git a/agents/prompts/followup.md b/agents/prompts/followup.md new file mode 100644 index 0000000..8d2f395 --- /dev/null +++ b/agents/prompts/followup.md @@ -0,0 +1,35 @@ +You are a Project Manager reviewing completed pipeline results. + +Your job: analyze the output from all pipeline steps and create follow-up tasks. + +## Rules + +- Create one task per actionable item found in the pipeline output +- Group small related fixes into a single task when logical (e.g. "CORS + Helmet + CSP headers" = one task) +- Set priority based on severity: CRITICAL=1, HIGH=2, MEDIUM=4, LOW=6, INFO=8 +- Set type: "hotfix" for CRITICAL/HIGH security, "debug" for bugs, "feature" for improvements, "refactor" for cleanup +- Each task must have a clear, actionable title +- Include enough context in brief so the assigned specialist can start without re-reading the full audit +- Skip informational/already-done items — only create tasks for things that need action +- If no follow-ups are needed, return an empty array + +## Output format + +Return ONLY valid JSON (no markdown, no explanation): + +```json +[ + { + "title": "Добавить requireAuth на admin endpoints", + "type": "hotfix", + "priority": 2, + "brief": "3 admin-эндпоинта без auth: /api/admin/collect-hot-tours, /api/admin/refresh-hotel-details, /api/admin/hotel-stats. Добавить middleware requireAuth." + }, + { + "title": "Rate limiting на /api/auth/login", + "type": "feature", + "priority": 4, + "brief": "Эндпоинт login не имеет rate limiting. Добавить express-rate-limit: 5 попыток / 15 мин на IP." + } +] +``` diff --git a/agents/prompts/pm.md b/agents/prompts/pm.md new file mode 100644 index 0000000..9120f82 --- /dev/null +++ b/agents/prompts/pm.md @@ -0,0 +1,58 @@ +You are a Project Manager for the Kin multi-agent orchestrator. + +Your job: decompose a task into a pipeline of specialist steps. + +## Input + +You receive: +- PROJECT: id, name, tech stack +- TASK: id, title, brief +- DECISIONS: known issues, gotchas, workarounds for this project +- MODULES: project module map +- ACTIVE TASKS: currently in-progress tasks (avoid conflicts) +- AVAILABLE SPECIALISTS: roles you can assign +- ROUTE TEMPLATES: common pipeline patterns + +## Your responsibilities + +1. Analyze the task and determine what type of work is needed +2. Select the right specialists from the available pool +3. Build an ordered pipeline with dependencies +4. Include relevant context hints for each specialist +5. Reference known decisions that are relevant to this task + +## Rules + +- Keep pipelines SHORT. 2-4 steps for most tasks. +- Always end with a tester or reviewer step for quality. +- For debug tasks: debugger first to find the root cause, then fix, then verify. +- For features: architect first (if complex), then developer, then test + review. +- Don't assign specialists who aren't needed. +- If a task is blocked or unclear, say so — don't guess. + +## Output format + +Return ONLY valid JSON (no markdown, no explanation): + +```json +{ + "analysis": "Brief analysis of what needs to be done", + "pipeline": [ + { + "role": "debugger", + "model": "sonnet", + "brief": "What this specialist should do", + "module": "search", + "relevant_decisions": [1, 5, 12] + }, + { + "role": "tester", + "model": "sonnet", + "depends_on": "debugger", + "brief": "Write regression test for the fix" + } + ], + "estimated_steps": 2, + "route_type": "debug" +} +``` diff --git a/agents/prompts/security.md b/agents/prompts/security.md new file mode 100644 index 0000000..cd8af8d --- /dev/null +++ b/agents/prompts/security.md @@ -0,0 +1,73 @@ +You are a Security Engineer performing a security audit. + +## Scope + +Analyze the codebase for security vulnerabilities. Focus on: + +1. **Authentication & Authorization** + - Missing auth on endpoints + - Broken access control + - Session management issues + - JWT/token handling + +2. **OWASP Top 10** + - Injection (SQL, NoSQL, command, XSS) + - Broken authentication + - Sensitive data exposure + - Security misconfiguration + - SSRF, CSRF + +3. **Secrets & Credentials** + - Hardcoded secrets, API keys, passwords + - Secrets in git history + - Unencrypted sensitive data + - .env files exposed + +4. **Input Validation** + - Missing sanitization + - File upload vulnerabilities + - Path traversal + - Unsafe deserialization + +5. **Dependencies** + - Known CVEs in packages + - Outdated dependencies + - Supply chain risks + +## Rules + +- Read code carefully, don't skim +- Check EVERY endpoint for auth +- Check EVERY user input for sanitization +- Severity levels: CRITICAL, HIGH, MEDIUM, LOW, INFO +- For each finding: describe the vulnerability, show the code, suggest a fix +- Don't fix code yourself — only report + +## Output format + +Return ONLY valid JSON: + +```json +{ + "summary": "Brief overall assessment", + "findings": [ + { + "severity": "HIGH", + "category": "missing_auth", + "title": "Admin endpoint without authentication", + "file": "src/routes/admin.js", + "line": 42, + "description": "The /api/admin/users endpoint has no auth middleware", + "recommendation": "Add requireAuth middleware before the handler", + "owasp": "A01:2021 Broken Access Control" + } + ], + "stats": { + "files_reviewed": 15, + "critical": 0, + "high": 2, + "medium": 3, + "low": 1 + } +} +``` diff --git a/agents/runner.py b/agents/runner.py new file mode 100644 index 0000000..d5c6c1a --- /dev/null +++ b/agents/runner.py @@ -0,0 +1,321 @@ +""" +Kin agent runner — launches Claude Code as subprocess with role-specific context. +Each agent = separate process with isolated context. +""" + +import json +import sqlite3 +import subprocess +import time +from pathlib import Path +from typing import Any + +from core import models +from core.context_builder import build_context, format_prompt + + +def run_agent( + conn: sqlite3.Connection, + role: str, + task_id: str, + project_id: str, + model: str = "sonnet", + previous_output: str | None = None, + brief_override: str | None = None, + dry_run: bool = False, + allow_write: bool = False, +) -> dict: + """Run a single Claude Code agent as a subprocess. + + 1. Build context from DB + 2. Format prompt with role template + 3. Run: claude -p "{prompt}" --output-format json + 4. Log result to agent_logs + 5. Return {success, output, tokens_used, duration_seconds, cost_usd} + """ + # Build context + ctx = build_context(conn, task_id, role, project_id) + if previous_output: + ctx["previous_output"] = previous_output + if brief_override: + if ctx.get("task"): + ctx["task"]["brief"] = brief_override + + prompt = format_prompt(ctx, role) + + if dry_run: + return { + "success": True, + "output": None, + "prompt": prompt, + "role": role, + "model": model, + "dry_run": True, + } + + # Determine working directory + project = models.get_project(conn, project_id) + working_dir = None + if project and role in ("debugger", "frontend_dev", "backend_dev", "tester", "security"): + project_path = Path(project["path"]).expanduser() + if project_path.is_dir(): + working_dir = str(project_path) + + # Run claude subprocess + start = time.monotonic() + result = _run_claude(prompt, model=model, working_dir=working_dir, + allow_write=allow_write) + duration = int(time.monotonic() - start) + + # Parse output — ensure output_text is always a string for DB storage + raw_output = result.get("output", "") + if not isinstance(raw_output, str): + raw_output = json.dumps(raw_output, ensure_ascii=False) + output_text = raw_output + success = result["returncode"] == 0 + parsed_output = _try_parse_json(output_text) + + # Log FULL output to DB (no truncation) + models.log_agent_run( + conn, + project_id=project_id, + task_id=task_id, + agent_role=role, + action="execute", + input_summary=f"task={task_id}, model={model}", + output_summary=output_text or None, + tokens_used=result.get("tokens_used"), + model=model, + cost_usd=result.get("cost_usd"), + success=success, + error_message=result.get("error") if not success else None, + duration_seconds=duration, + ) + + return { + "success": success, + "output": parsed_output if parsed_output else output_text, + "raw_output": output_text, + "role": role, + "model": model, + "duration_seconds": duration, + "tokens_used": result.get("tokens_used"), + "cost_usd": result.get("cost_usd"), + } + + +def _run_claude( + prompt: str, + model: str = "sonnet", + working_dir: str | None = None, + allow_write: bool = False, +) -> dict: + """Execute claude CLI as subprocess. Returns dict with output, returncode, etc.""" + cmd = [ + "claude", + "-p", prompt, + "--output-format", "json", + "--model", model, + ] + if allow_write: + cmd.append("--dangerously-skip-permissions") + + try: + proc = subprocess.run( + cmd, + capture_output=True, + text=True, + timeout=600, # 10 min max + cwd=working_dir, + ) + except FileNotFoundError: + return { + "output": "", + "error": "claude CLI not found in PATH", + "returncode": 127, + } + except subprocess.TimeoutExpired: + return { + "output": "", + "error": "Agent timed out after 600s", + "returncode": 124, + } + + # Always preserve the full raw stdout + raw_stdout = proc.stdout or "" + result: dict[str, Any] = { + "output": raw_stdout, + "error": proc.stderr if proc.returncode != 0 else None, + "returncode": proc.returncode, + } + + # Parse JSON wrapper from claude --output-format json + # Extract metadata (tokens, cost) but keep output as the full content string + parsed = _try_parse_json(raw_stdout) + if isinstance(parsed, dict): + result["tokens_used"] = parsed.get("usage", {}).get("total_tokens") + result["cost_usd"] = parsed.get("cost_usd") + # Extract the agent's actual response, converting to string if needed + content = parsed.get("result") or parsed.get("content") + if content is not None: + result["output"] = content if isinstance(content, str) else json.dumps(content, ensure_ascii=False) + + return result + + +def _try_parse_json(text: str) -> Any: + """Try to parse JSON from text. Returns parsed obj or None.""" + text = text.strip() + if not text: + return None + + # Direct parse + try: + return json.loads(text) + except json.JSONDecodeError: + pass + + # Try to find JSON block in markdown code fences + import re + m = re.search(r"```(?:json)?\s*\n(.*?)\n```", text, re.DOTALL) + if m: + try: + return json.loads(m.group(1)) + except json.JSONDecodeError: + pass + + # Try to find first { ... } or [ ... ] + for start_char, end_char in [("{", "}"), ("[", "]")]: + start = text.find(start_char) + if start >= 0: + # Find matching close + depth = 0 + for i in range(start, len(text)): + if text[i] == start_char: + depth += 1 + elif text[i] == end_char: + depth -= 1 + if depth == 0: + try: + return json.loads(text[start:i + 1]) + except json.JSONDecodeError: + break + return None + + +# --------------------------------------------------------------------------- +# Pipeline executor +# --------------------------------------------------------------------------- + +def run_pipeline( + conn: sqlite3.Connection, + task_id: str, + steps: list[dict], + dry_run: bool = False, + allow_write: bool = False, +) -> dict: + """Execute a multi-step pipeline of agents. + + steps = [ + {"role": "debugger", "model": "opus", "brief": "..."}, + {"role": "tester", "depends_on": "debugger", "brief": "..."}, + ] + + Returns {success, steps_completed, total_cost, total_tokens, total_duration, results} + """ + task = models.get_task(conn, task_id) + if not task: + return {"success": False, "error": f"Task '{task_id}' not found"} + + project_id = task["project_id"] + + # Determine route type from steps or task brief + route_type = "custom" + if task.get("brief") and isinstance(task["brief"], dict): + route_type = task["brief"].get("route_type", "custom") or "custom" + + # Create pipeline in DB + pipeline = None + if not dry_run: + pipeline = models.create_pipeline( + conn, task_id, project_id, route_type, steps, + ) + models.update_task(conn, task_id, status="in_progress") + + results = [] + total_cost = 0.0 + total_tokens = 0 + total_duration = 0 + previous_output = None + + for i, step in enumerate(steps): + role = step["role"] + model = step.get("model", "sonnet") + brief = step.get("brief") + + result = run_agent( + conn, role, task_id, project_id, + model=model, + previous_output=previous_output, + brief_override=brief, + dry_run=dry_run, + allow_write=allow_write, + ) + results.append(result) + + if dry_run: + continue + + # Accumulate stats + total_cost += result.get("cost_usd") or 0 + total_tokens += result.get("tokens_used") or 0 + total_duration += result.get("duration_seconds") or 0 + + if not result["success"]: + # Pipeline failed — stop and mark as failed + if pipeline: + models.update_pipeline( + conn, pipeline["id"], + status="failed", + total_cost_usd=total_cost, + total_tokens=total_tokens, + total_duration_seconds=total_duration, + ) + models.update_task(conn, task_id, status="blocked") + return { + "success": False, + "error": f"Step {i+1}/{len(steps)} ({role}) failed", + "steps_completed": i, + "results": results, + "total_cost_usd": total_cost, + "total_tokens": total_tokens, + "total_duration_seconds": total_duration, + "pipeline_id": pipeline["id"] if pipeline else None, + } + + # Chain output to next step + previous_output = result.get("raw_output") or result.get("output") + if isinstance(previous_output, (dict, list)): + previous_output = json.dumps(previous_output, ensure_ascii=False) + + # Pipeline completed + if pipeline and not dry_run: + models.update_pipeline( + conn, pipeline["id"], + status="completed", + total_cost_usd=total_cost, + total_tokens=total_tokens, + total_duration_seconds=total_duration, + ) + models.update_task(conn, task_id, status="review") + + return { + "success": True, + "steps_completed": len(steps), + "results": results, + "total_cost_usd": total_cost, + "total_tokens": total_tokens, + "total_duration_seconds": total_duration, + "pipeline_id": pipeline["id"] if pipeline else None, + "dry_run": dry_run, + } diff --git a/agents/specialists.yaml b/agents/specialists.yaml new file mode 100644 index 0000000..4e9342c --- /dev/null +++ b/agents/specialists.yaml @@ -0,0 +1,104 @@ +# Kin specialist pool — roles available for pipeline construction. +# PM selects from this pool based on task type. + +specialists: + pm: + name: "Project Manager" + model: sonnet + tools: [Read, Grep, Glob] + description: "Decomposes tasks, selects specialists, builds pipelines" + permissions: read_only + context_rules: + decisions: all + modules: all + + architect: + name: "Software Architect" + model: sonnet + tools: [Read, Grep, Glob] + description: "Designs solutions, reviews structure, writes specs" + permissions: read_only + context_rules: + decisions: all + modules: all + + debugger: + name: "Debugger" + model: sonnet + tools: [Read, Grep, Glob, Bash] + description: "Finds root causes, reads logs, traces execution" + permissions: read_bash + working_dir: project + context_rules: + decisions: [gotcha, workaround] + + frontend_dev: + name: "Frontend Developer" + model: sonnet + tools: [Read, Write, Edit, Bash, Glob, Grep] + description: "Implements UI: Vue, CSS, components, composables" + permissions: full + working_dir: project + context_rules: + decisions: [gotcha, workaround, convention] + + backend_dev: + name: "Backend Developer" + model: sonnet + tools: [Read, Write, Edit, Bash, Glob, Grep] + description: "Implements API, services, database, business logic" + permissions: full + working_dir: project + context_rules: + decisions: [gotcha, workaround, convention] + + tester: + name: "Tester" + model: sonnet + tools: [Read, Write, Bash, Glob, Grep] + description: "Writes and runs tests, verifies fixes" + permissions: full + working_dir: project + context_rules: + decisions: [] + + reviewer: + name: "Code Reviewer" + model: sonnet + tools: [Read, Grep, Glob] + description: "Reviews code for quality, conventions, bugs" + permissions: read_only + context_rules: + decisions: [convention] + + security: + name: "Security Engineer" + model: sonnet + tools: [Read, Grep, Glob, Bash] + description: "OWASP audit, auth checks, secrets scan, vulnerability analysis" + permissions: read_bash + working_dir: project + context_rules: + decisions_category: security + +# Route templates — PM uses these to build pipelines +routes: + debug: + steps: [debugger, tester, frontend_dev, tester] + description: "Find bug → verify → fix → verify fix" + + feature: + steps: [architect, frontend_dev, tester, reviewer] + description: "Design → implement → test → review" + + refactor: + steps: [architect, frontend_dev, tester, reviewer] + description: "Plan refactor → implement → test → review" + + hotfix: + steps: [debugger, frontend_dev, tester] + description: "Find → fix → verify (fast track)" + + security_audit: + steps: [security, architect] + description: "Audit → remediation plan" diff --git a/cli/__init__.py b/cli/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/cli/main.py b/cli/main.py new file mode 100644 index 0000000..8ed3281 --- /dev/null +++ b/cli/main.py @@ -0,0 +1,629 @@ +""" +Kin CLI — command-line interface for the multi-agent orchestrator. +Uses core.models for all data access, never raw SQL. +""" + +import json +import sys +from pathlib import Path + +import click + +# Ensure project root is on sys.path +sys.path.insert(0, str(Path(__file__).parent.parent)) + +from core.db import init_db +from core import models +from agents.bootstrap import ( + detect_tech_stack, detect_modules, extract_decisions_from_claude_md, + find_vault_root, scan_obsidian, format_preview, save_to_db, +) + +DEFAULT_DB = Path.home() / ".kin" / "kin.db" + + +def get_conn(db_path: Path = DEFAULT_DB): + db_path.parent.mkdir(parents=True, exist_ok=True) + return init_db(db_path) + + +def _parse_json(ctx, param, value): + """Click callback: parse a JSON string or return None.""" + if value is None: + return None + try: + return json.loads(value) + except json.JSONDecodeError: + raise click.BadParameter(f"Invalid JSON: {value}") + + +def _table(headers: list[str], rows: list[list[str]], min_width: int = 6): + """Render a simple aligned text table.""" + widths = [max(min_width, len(h)) for h in headers] + for row in rows: + for i, cell in enumerate(row): + if i < len(widths): + widths[i] = max(widths[i], len(str(cell))) + fmt = " ".join(f"{{:<{w}}}" for w in widths) + lines = [fmt.format(*headers), fmt.format(*("-" * w for w in widths))] + for row in rows: + lines.append(fmt.format(*[str(c) for c in row])) + return "\n".join(lines) + + +def _auto_task_id(conn, project_id: str) -> str: + """Generate next task ID like PROJ-001.""" + prefix = project_id.upper() + existing = models.list_tasks(conn, project_id=project_id) + max_num = 0 + for t in existing: + tid = t["id"] + if tid.startswith(prefix + "-"): + try: + num = int(tid.split("-", 1)[1]) + max_num = max(max_num, num) + except ValueError: + pass + return f"{prefix}-{max_num + 1:03d}" + + +# =========================================================================== +# Root group +# =========================================================================== + +@click.group() +@click.option("--db", type=click.Path(), default=None, envvar="KIN_DB", + help="Path to kin.db (default: ~/.kin/kin.db, or $KIN_DB)") +@click.pass_context +def cli(ctx, db): + """Kin — multi-agent project orchestrator.""" + ctx.ensure_object(dict) + db_path = Path(db) if db else DEFAULT_DB + ctx.obj["conn"] = get_conn(db_path) + + +# =========================================================================== +# project +# =========================================================================== + +@cli.group() +def project(): + """Manage projects.""" + + +@project.command("add") +@click.argument("id") +@click.argument("name") +@click.argument("path") +@click.option("--tech-stack", callback=_parse_json, default=None, help='JSON array, e.g. \'["vue3","nuxt"]\'') +@click.option("--status", default="active") +@click.option("--priority", type=int, default=5) +@click.option("--language", default="ru", help="Response language for agents (ru, en, etc.)") +@click.pass_context +def project_add(ctx, id, name, path, tech_stack, status, priority, language): + """Add a new project.""" + conn = ctx.obj["conn"] + p = models.create_project(conn, id, name, path, + tech_stack=tech_stack, status=status, priority=priority, + language=language) + click.echo(f"Created project: {p['id']} ({p['name']})") + + +@project.command("list") +@click.option("--status", default=None) +@click.pass_context +def project_list(ctx, status): + """List projects.""" + conn = ctx.obj["conn"] + projects = models.list_projects(conn, status=status) + if not projects: + click.echo("No projects found.") + return + rows = [[p["id"], p["name"], p["status"], str(p["priority"]), p["path"]] + for p in projects] + click.echo(_table(["ID", "Name", "Status", "Pri", "Path"], rows)) + + +@project.command("show") +@click.argument("id") +@click.pass_context +def project_show(ctx, id): + """Show project details.""" + conn = ctx.obj["conn"] + p = models.get_project(conn, id) + if not p: + click.echo(f"Project '{id}' not found.", err=True) + raise SystemExit(1) + click.echo(f"Project: {p['id']}") + click.echo(f" Name: {p['name']}") + click.echo(f" Path: {p['path']}") + click.echo(f" Status: {p['status']}") + click.echo(f" Priority: {p['priority']}") + if p.get("tech_stack"): + click.echo(f" Tech stack: {', '.join(p['tech_stack'])}") + if p.get("forgejo_repo"): + click.echo(f" Forgejo: {p['forgejo_repo']}") + click.echo(f" Created: {p['created_at']}") + + +# =========================================================================== +# task +# =========================================================================== + +@cli.group() +def task(): + """Manage tasks.""" + + +@task.command("add") +@click.argument("project_id") +@click.argument("title") +@click.option("--type", "route_type", type=click.Choice(["debug", "feature", "refactor", "hotfix"]), default=None) +@click.option("--priority", type=int, default=5) +@click.pass_context +def task_add(ctx, project_id, title, route_type, priority): + """Add a task to a project. ID is auto-generated (PROJ-001).""" + conn = ctx.obj["conn"] + p = models.get_project(conn, project_id) + if not p: + click.echo(f"Project '{project_id}' not found.", err=True) + raise SystemExit(1) + task_id = _auto_task_id(conn, project_id) + brief = {"route_type": route_type} if route_type else None + t = models.create_task(conn, task_id, project_id, title, + priority=priority, brief=brief) + click.echo(f"Created task: {t['id']} — {t['title']}") + + +@task.command("list") +@click.option("--project", "project_id", default=None) +@click.option("--status", default=None) +@click.pass_context +def task_list(ctx, project_id, status): + """List tasks.""" + conn = ctx.obj["conn"] + tasks = models.list_tasks(conn, project_id=project_id, status=status) + if not tasks: + click.echo("No tasks found.") + return + rows = [[t["id"], t["project_id"], t["title"][:40], t["status"], + str(t["priority"]), t.get("assigned_role") or "-"] + for t in tasks] + click.echo(_table(["ID", "Project", "Title", "Status", "Pri", "Role"], rows)) + + +@task.command("show") +@click.argument("id") +@click.pass_context +def task_show(ctx, id): + """Show task details.""" + conn = ctx.obj["conn"] + t = models.get_task(conn, id) + if not t: + click.echo(f"Task '{id}' not found.", err=True) + raise SystemExit(1) + click.echo(f"Task: {t['id']}") + click.echo(f" Project: {t['project_id']}") + click.echo(f" Title: {t['title']}") + click.echo(f" Status: {t['status']}") + click.echo(f" Priority: {t['priority']}") + if t.get("assigned_role"): + click.echo(f" Role: {t['assigned_role']}") + if t.get("parent_task_id"): + click.echo(f" Parent: {t['parent_task_id']}") + if t.get("brief"): + click.echo(f" Brief: {json.dumps(t['brief'], ensure_ascii=False)}") + if t.get("spec"): + click.echo(f" Spec: {json.dumps(t['spec'], ensure_ascii=False)}") + click.echo(f" Created: {t['created_at']}") + click.echo(f" Updated: {t['updated_at']}") + + +# =========================================================================== +# decision +# =========================================================================== + +@cli.group() +def decision(): + """Manage decisions and gotchas.""" + + +@decision.command("add") +@click.argument("project_id") +@click.argument("type", type=click.Choice(["decision", "gotcha", "workaround", "rejected_approach", "convention"])) +@click.argument("title") +@click.argument("description") +@click.option("--category", default=None) +@click.option("--tags", callback=_parse_json, default=None, help='JSON array, e.g. \'["ios","css"]\'') +@click.option("--task-id", default=None) +@click.pass_context +def decision_add(ctx, project_id, type, title, description, category, tags, task_id): + """Record a decision, gotcha, or convention.""" + conn = ctx.obj["conn"] + p = models.get_project(conn, project_id) + if not p: + click.echo(f"Project '{project_id}' not found.", err=True) + raise SystemExit(1) + d = models.add_decision(conn, project_id, type, title, description, + category=category, tags=tags, task_id=task_id) + click.echo(f"Added {d['type']}: #{d['id']} — {d['title']}") + + +@decision.command("list") +@click.argument("project_id") +@click.option("--category", default=None) +@click.option("--tag", multiple=True, help="Filter by tag (can repeat)") +@click.option("--type", "types", multiple=True, + type=click.Choice(["decision", "gotcha", "workaround", "rejected_approach", "convention"]), + help="Filter by type (can repeat)") +@click.pass_context +def decision_list(ctx, project_id, category, tag, types): + """List decisions for a project.""" + conn = ctx.obj["conn"] + tags_list = list(tag) if tag else None + types_list = list(types) if types else None + decisions = models.get_decisions(conn, project_id, category=category, + tags=tags_list, types=types_list) + if not decisions: + click.echo("No decisions found.") + return + rows = [[str(d["id"]), d["type"], d["category"] or "-", + d["title"][:50], d["created_at"][:10]] + for d in decisions] + click.echo(_table(["#", "Type", "Category", "Title", "Date"], rows)) + + +# =========================================================================== +# module +# =========================================================================== + +@cli.group() +def module(): + """Manage project modules.""" + + +@module.command("add") +@click.argument("project_id") +@click.argument("name") +@click.argument("type", type=click.Choice(["frontend", "backend", "shared", "infra"])) +@click.argument("path") +@click.option("--description", default=None) +@click.option("--owner-role", default=None) +@click.pass_context +def module_add(ctx, project_id, name, type, path, description, owner_role): + """Register a project module.""" + conn = ctx.obj["conn"] + p = models.get_project(conn, project_id) + if not p: + click.echo(f"Project '{project_id}' not found.", err=True) + raise SystemExit(1) + m = models.add_module(conn, project_id, name, type, path, + description=description, owner_role=owner_role) + click.echo(f"Added module: {m['name']} ({m['type']}) at {m['path']}") + + +@module.command("list") +@click.argument("project_id") +@click.pass_context +def module_list(ctx, project_id): + """List modules for a project.""" + conn = ctx.obj["conn"] + mods = models.get_modules(conn, project_id) + if not mods: + click.echo("No modules found.") + return + rows = [[m["name"], m["type"], m["path"], m.get("owner_role") or "-", + m.get("description") or ""] + for m in mods] + click.echo(_table(["Name", "Type", "Path", "Owner", "Description"], rows)) + + +# =========================================================================== +# status +# =========================================================================== + +@cli.command("status") +@click.argument("project_id", required=False) +@click.pass_context +def status(ctx, project_id): + """Project status overview. Without args — all projects. With id — detailed.""" + conn = ctx.obj["conn"] + + if project_id: + p = models.get_project(conn, project_id) + if not p: + click.echo(f"Project '{project_id}' not found.", err=True) + raise SystemExit(1) + tasks = models.list_tasks(conn, project_id=project_id) + counts = {} + for t in tasks: + counts[t["status"]] = counts.get(t["status"], 0) + 1 + + click.echo(f"Project: {p['id']} — {p['name']} [{p['status']}]") + click.echo(f" Path: {p['path']}") + if p.get("tech_stack"): + click.echo(f" Stack: {', '.join(p['tech_stack'])}") + click.echo(f" Tasks: {len(tasks)} total") + for s in ["pending", "in_progress", "review", "done", "blocked"]: + if counts.get(s, 0) > 0: + click.echo(f" {s}: {counts[s]}") + if tasks: + click.echo("") + rows = [[t["id"], t["title"][:40], t["status"], + t.get("assigned_role") or "-"] + for t in tasks] + click.echo(_table(["ID", "Title", "Status", "Role"], rows)) + else: + summary = models.get_project_summary(conn) + if not summary: + click.echo("No projects.") + return + rows = [[s["id"], s["name"][:25], s["status"], str(s["priority"]), + str(s["total_tasks"]), str(s["done_tasks"]), + str(s["active_tasks"]), str(s["blocked_tasks"])] + for s in summary] + click.echo(_table( + ["ID", "Name", "Status", "Pri", "Total", "Done", "Active", "Blocked"], + rows, + )) + + +# =========================================================================== +# cost +# =========================================================================== + +@cli.command("cost") +@click.option("--last", "period", default="7d", help="Period: 7d, 30d, etc.") +@click.pass_context +def cost(ctx, period): + """Show cost summary by project.""" + # Parse period like "7d", "30d" + period = period.strip().lower() + if period.endswith("d"): + try: + days = int(period[:-1]) + except ValueError: + click.echo(f"Invalid period: {period}. Use e.g. 7d, 30d.", err=True) + raise SystemExit(1) + else: + try: + days = int(period) + except ValueError: + click.echo(f"Invalid period: {period}. Use e.g. 7d, 30d.", err=True) + raise SystemExit(1) + + conn = ctx.obj["conn"] + costs = models.get_cost_summary(conn, days=days) + if not costs: + click.echo(f"No agent runs in the last {days} days.") + return + rows = [[c["project_id"], c["project_name"][:25], str(c["runs"]), + f"{c['total_tokens']:,}", f"${c['total_cost_usd']:.4f}", + f"{c['total_duration_seconds']}s"] + for c in costs] + click.echo(f"Cost summary (last {days} days):\n") + click.echo(_table( + ["Project", "Name", "Runs", "Tokens", "Cost", "Time"], + rows, + )) + total = sum(c["total_cost_usd"] for c in costs) + click.echo(f"\nTotal: ${total:.4f}") + + +# =========================================================================== +# approve +# =========================================================================== + +@cli.command("approve") +@click.argument("task_id") +@click.option("--followup", is_flag=True, help="Generate follow-up tasks from pipeline results") +@click.option("--decision", "decision_text", default=None, help="Record a decision with this text") +@click.pass_context +def approve_task(ctx, task_id, followup, decision_text): + """Approve a task (set status=done). Optionally generate follow-ups.""" + from core.followup import generate_followups, resolve_pending_action + + conn = ctx.obj["conn"] + task = models.get_task(conn, task_id) + if not task: + click.echo(f"Task '{task_id}' not found.", err=True) + raise SystemExit(1) + + models.update_task(conn, task_id, status="done") + click.echo(f"Approved: {task_id} → done") + + if decision_text: + models.add_decision( + conn, task["project_id"], "decision", decision_text, decision_text, + task_id=task_id, + ) + click.echo(f"Decision recorded.") + + if followup: + click.echo("Generating follow-up tasks...") + result = generate_followups(conn, task_id) + created = result["created"] + pending = result["pending_actions"] + + if created: + click.echo(f"Created {len(created)} follow-up tasks:") + for t in created: + click.echo(f" {t['id']}: {t['title']} (pri {t['priority']})") + + for action in pending: + click.echo(f"\nPermission issue: {action['description']}") + click.echo(" 1. Rerun with --dangerously-skip-permissions") + click.echo(" 2. Create task for manual fix") + click.echo(" 3. Skip") + choice_input = click.prompt("Choice", type=click.Choice(["1", "2", "3"]), default="2") + choice_map = {"1": "rerun", "2": "manual_task", "3": "skip"} + choice = choice_map[choice_input] + result = resolve_pending_action(conn, task_id, action, choice) + if choice == "rerun" and result: + rr = result.get("rerun_result", {}) + if rr.get("success"): + click.echo(" Re-run completed successfully.") + else: + click.echo(f" Re-run failed: {rr.get('error', 'unknown')}") + elif choice == "manual_task" and result: + click.echo(f" Created: {result['id']}: {result['title']}") + elif choice == "skip": + click.echo(" Skipped.") + + if not created and not pending: + click.echo("No follow-up tasks generated.") + + +# =========================================================================== +# run +# =========================================================================== + +@cli.command("run") +@click.argument("task_id") +@click.option("--dry-run", is_flag=True, help="Show pipeline plan without executing") +@click.pass_context +def run_task(ctx, task_id, dry_run): + """Run a task through the agent pipeline. + + PM decomposes the task into specialist steps, then the pipeline executes. + With --dry-run, shows the plan without running agents. + """ + from agents.runner import run_agent, run_pipeline + + conn = ctx.obj["conn"] + task = models.get_task(conn, task_id) + if not task: + click.echo(f"Task '{task_id}' not found.", err=True) + raise SystemExit(1) + + project_id = task["project_id"] + click.echo(f"Task: {task['id']} — {task['title']}") + + # Step 1: PM decomposes + click.echo("Running PM to decompose task...") + pm_result = run_agent( + conn, "pm", task_id, project_id, + model="sonnet", dry_run=dry_run, + ) + + if dry_run: + click.echo("\n--- PM Prompt (dry-run) ---") + click.echo(pm_result.get("prompt", "")[:2000]) + click.echo("\n(Dry-run: PM would produce a pipeline JSON)") + return + + if not pm_result["success"]: + click.echo(f"PM failed: {pm_result.get('output', 'unknown error')}", err=True) + raise SystemExit(1) + + # Parse PM output for pipeline + output = pm_result.get("output") + if isinstance(output, str): + try: + output = json.loads(output) + except json.JSONDecodeError: + click.echo(f"PM returned non-JSON output:\n{output[:500]}", err=True) + raise SystemExit(1) + + if not isinstance(output, dict) or "pipeline" not in output: + click.echo(f"PM output missing 'pipeline' key:\n{json.dumps(output, indent=2)[:500]}", err=True) + raise SystemExit(1) + + pipeline_steps = output["pipeline"] + analysis = output.get("analysis", "") + + click.echo(f"\nAnalysis: {analysis}") + click.echo(f"Pipeline ({len(pipeline_steps)} steps):") + for i, step in enumerate(pipeline_steps, 1): + click.echo(f" {i}. {step['role']} ({step.get('model', 'sonnet')}): {step.get('brief', '')}") + + if not click.confirm("\nExecute pipeline?"): + click.echo("Aborted.") + return + + # Step 2: Execute pipeline + click.echo("\nExecuting pipeline...") + result = run_pipeline(conn, task_id, pipeline_steps) + + if result["success"]: + click.echo(f"\nPipeline completed: {result['steps_completed']} steps") + else: + click.echo(f"\nPipeline failed at step: {result.get('error', 'unknown')}", err=True) + + if result.get("total_cost_usd"): + click.echo(f"Cost: ${result['total_cost_usd']:.4f}") + if result.get("total_duration_seconds"): + click.echo(f"Duration: {result['total_duration_seconds']}s") + + +# =========================================================================== +# bootstrap +# =========================================================================== + +@cli.command("bootstrap") +@click.argument("path", type=click.Path(exists=True)) +@click.option("--id", "project_id", required=True, help="Short project ID (e.g. vdol)") +@click.option("--name", required=True, help="Project display name") +@click.option("--vault", "vault_path", type=click.Path(), default=None, + help="Obsidian vault path (auto-detected if omitted)") +@click.option("-y", "--yes", is_flag=True, help="Skip confirmation") +@click.pass_context +def bootstrap(ctx, path, project_id, name, vault_path, yes): + """Auto-detect project stack, modules, decisions and import into Kin.""" + conn = ctx.obj["conn"] + project_path = Path(path).expanduser().resolve() + + # Check if project already exists + existing = models.get_project(conn, project_id) + if existing: + click.echo(f"Project '{project_id}' already exists. Use 'kin project show {project_id}'.", err=True) + raise SystemExit(1) + + # Detect everything + click.echo(f"Scanning {project_path} ...") + tech_stack = detect_tech_stack(project_path) + modules = detect_modules(project_path) + decisions = extract_decisions_from_claude_md(project_path, project_id, name) + + # Obsidian + obsidian = None + vault_root = find_vault_root(Path(vault_path) if vault_path else None) + if vault_root: + dir_name = project_path.name + obsidian = scan_obsidian(vault_root, project_id, name, dir_name) + if not obsidian["tasks"] and not obsidian["decisions"]: + obsidian = None # Nothing found, don't clutter output + + # Preview + click.echo("") + click.echo(format_preview( + project_id, name, str(project_path), tech_stack, + modules, decisions, obsidian, + )) + click.echo("") + + if not yes: + if not click.confirm("Save to kin.db?"): + click.echo("Aborted.") + return + + save_to_db(conn, project_id, name, str(project_path), + tech_stack, modules, decisions, obsidian) + + # Summary + task_count = 0 + dec_count = len(decisions) + if obsidian: + task_count += len(obsidian.get("tasks", [])) + dec_count += len(obsidian.get("decisions", [])) + + click.echo(f"Saved: 1 project, {len(modules)} modules, " + f"{dec_count} decisions, {task_count} tasks.") + + +# =========================================================================== +# Entry point +# =========================================================================== + +if __name__ == "__main__": + cli() diff --git a/core/__init__.py b/core/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/core/context_builder.py b/core/context_builder.py new file mode 100644 index 0000000..fad1313 --- /dev/null +++ b/core/context_builder.py @@ -0,0 +1,222 @@ +""" +Kin context builder — assembles role-specific context from DB for agent prompts. +Each role gets only the information it needs, keeping prompts focused. +""" + +import json +import sqlite3 +from pathlib import Path + +from core import models + +PROMPTS_DIR = Path(__file__).parent.parent / "agents" / "prompts" +SPECIALISTS_PATH = Path(__file__).parent.parent / "agents" / "specialists.yaml" + + +def _load_specialists() -> dict: + """Load specialists.yaml (lazy, no pyyaml dependency — simple parser).""" + path = SPECIALISTS_PATH + if not path.exists(): + return {} + import yaml + return yaml.safe_load(path.read_text()) + + +def build_context( + conn: sqlite3.Connection, + task_id: str, + role: str, + project_id: str, +) -> dict: + """Build role-specific context from DB. + + Returns a dict with keys: task, project, and role-specific data. + """ + task = models.get_task(conn, task_id) + project = models.get_project(conn, project_id) + + ctx = { + "task": _slim_task(task) if task else None, + "project": _slim_project(project) if project else None, + "role": role, + } + + if role == "pm": + ctx["modules"] = models.get_modules(conn, project_id) + ctx["decisions"] = models.get_decisions(conn, project_id) + ctx["active_tasks"] = models.list_tasks(conn, project_id=project_id, status="in_progress") + try: + specs = _load_specialists() + ctx["available_specialists"] = list(specs.get("specialists", {}).keys()) + ctx["routes"] = specs.get("routes", {}) + except Exception: + ctx["available_specialists"] = [] + ctx["routes"] = {} + + elif role == "architect": + ctx["modules"] = models.get_modules(conn, project_id) + ctx["decisions"] = models.get_decisions(conn, project_id) + + elif role == "debugger": + ctx["decisions"] = models.get_decisions( + conn, project_id, types=["gotcha", "workaround"], + ) + ctx["module_hint"] = _extract_module_hint(task) + + elif role in ("frontend_dev", "backend_dev"): + ctx["decisions"] = models.get_decisions( + conn, project_id, types=["gotcha", "workaround", "convention"], + ) + + elif role == "reviewer": + ctx["decisions"] = models.get_decisions( + conn, project_id, types=["convention"], + ) + + elif role == "tester": + # Minimal context — just the task spec + pass + + elif role == "security": + ctx["decisions"] = models.get_decisions( + conn, project_id, category="security", + ) + + else: + # Unknown role — give decisions as fallback + ctx["decisions"] = models.get_decisions(conn, project_id, limit=20) + + return ctx + + +def _slim_task(task: dict) -> dict: + """Extract only relevant fields from a task for the prompt.""" + return { + "id": task["id"], + "title": task["title"], + "status": task["status"], + "priority": task["priority"], + "assigned_role": task.get("assigned_role"), + "brief": task.get("brief"), + "spec": task.get("spec"), + } + + +def _slim_project(project: dict) -> dict: + """Extract only relevant fields from a project.""" + return { + "id": project["id"], + "name": project["name"], + "path": project["path"], + "tech_stack": project.get("tech_stack"), + "language": project.get("language", "ru"), + } + + +def _extract_module_hint(task: dict | None) -> str | None: + """Try to extract module name from task brief.""" + if not task: + return None + brief = task.get("brief") + if isinstance(brief, dict): + return brief.get("module") + return None + + +def format_prompt(context: dict, role: str, prompt_template: str | None = None) -> str: + """Format a prompt by injecting context into a role template. + + If prompt_template is None, loads from agents/prompts/{role}.md. + """ + if prompt_template is None: + prompt_path = PROMPTS_DIR / f"{role}.md" + if prompt_path.exists(): + prompt_template = prompt_path.read_text() + else: + prompt_template = f"You are a {role}. Complete the task described below." + + sections = [prompt_template, ""] + + # Project info + proj = context.get("project") + if proj: + sections.append(f"## Project: {proj['id']} — {proj['name']}") + if proj.get("tech_stack"): + sections.append(f"Tech stack: {', '.join(proj['tech_stack'])}") + sections.append(f"Path: {proj['path']}") + sections.append("") + + # Task info + task = context.get("task") + if task: + sections.append(f"## Task: {task['id']} — {task['title']}") + sections.append(f"Status: {task['status']}, Priority: {task['priority']}") + if task.get("brief"): + sections.append(f"Brief: {json.dumps(task['brief'], ensure_ascii=False)}") + if task.get("spec"): + sections.append(f"Spec: {json.dumps(task['spec'], ensure_ascii=False)}") + sections.append("") + + # Decisions + decisions = context.get("decisions") + if decisions: + sections.append(f"## Known decisions ({len(decisions)}):") + for d in decisions[:30]: # Cap at 30 to avoid token bloat + tags = f" [{', '.join(d['tags'])}]" if d.get("tags") else "" + sections.append(f"- #{d['id']} [{d['type']}] {d['title']}{tags}") + sections.append("") + + # Modules + modules = context.get("modules") + if modules: + sections.append(f"## Modules ({len(modules)}):") + for m in modules: + sections.append(f"- {m['name']} ({m['type']}) — {m['path']}") + sections.append("") + + # Active tasks (PM) + active = context.get("active_tasks") + if active: + sections.append(f"## Active tasks ({len(active)}):") + for t in active: + sections.append(f"- {t['id']}: {t['title']} [{t['status']}]") + sections.append("") + + # Available specialists (PM) + specialists = context.get("available_specialists") + if specialists: + sections.append(f"## Available specialists: {', '.join(specialists)}") + sections.append("") + + # Routes (PM) + routes = context.get("routes") + if routes: + sections.append("## Route templates:") + for name, route in routes.items(): + steps = " → ".join(route.get("steps", [])) + sections.append(f"- {name}: {steps}") + sections.append("") + + # Module hint (debugger) + hint = context.get("module_hint") + if hint: + sections.append(f"## Target module: {hint}") + sections.append("") + + # Previous step output (pipeline chaining) + prev = context.get("previous_output") + if prev: + sections.append("## Previous step output:") + sections.append(prev if isinstance(prev, str) else json.dumps(prev, ensure_ascii=False)) + sections.append("") + + # Language instruction — always last so it's fresh in context + proj = context.get("project") + language = proj.get("language", "ru") if proj else "ru" + _LANG_NAMES = {"ru": "Russian", "en": "English", "es": "Spanish", "de": "German", "fr": "French"} + lang_name = _LANG_NAMES.get(language, language) + sections.append(f"## Language") + sections.append(f"ALWAYS respond in {lang_name}. All summaries, analysis, comments, and recommendations must be in {lang_name}.") + sections.append("") + + return "\n".join(sections) diff --git a/core/db.py b/core/db.py new file mode 100644 index 0000000..284c66c --- /dev/null +++ b/core/db.py @@ -0,0 +1,192 @@ +""" +Kin — SQLite database schema and connection management. +All tables from DESIGN.md section 3.5 State Management. +""" + +import sqlite3 +from pathlib import Path + +DB_PATH = Path(__file__).parent.parent / "kin.db" + +SCHEMA = """ +-- Проекты (центральный реестр) +CREATE TABLE IF NOT EXISTS projects ( + id TEXT PRIMARY KEY, + name TEXT NOT NULL, + path TEXT NOT NULL, + tech_stack JSON, + status TEXT DEFAULT 'active', + priority INTEGER DEFAULT 5, + pm_prompt TEXT, + claude_md_path TEXT, + forgejo_repo TEXT, + language TEXT DEFAULT 'ru', + created_at DATETIME DEFAULT CURRENT_TIMESTAMP +); + +-- Задачи (привязаны к проекту) +CREATE TABLE IF NOT EXISTS tasks ( + id TEXT PRIMARY KEY, + project_id TEXT NOT NULL REFERENCES projects(id), + title TEXT NOT NULL, + status TEXT DEFAULT 'pending', + priority INTEGER DEFAULT 5, + assigned_role TEXT, + parent_task_id TEXT REFERENCES tasks(id), + brief JSON, + spec JSON, + review JSON, + test_result JSON, + security_result JSON, + forgejo_issue_id INTEGER, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP, + updated_at DATETIME DEFAULT CURRENT_TIMESTAMP +); + +-- Решения и грабли (внешняя память PM-агента) +CREATE TABLE IF NOT EXISTS decisions ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + project_id TEXT NOT NULL REFERENCES projects(id), + task_id TEXT REFERENCES tasks(id), + type TEXT NOT NULL, + category TEXT, + title TEXT NOT NULL, + description TEXT NOT NULL, + tags JSON, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP +); + +-- Логи агентов (дебаг, обучение, cost tracking) +CREATE TABLE IF NOT EXISTS agent_logs ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + project_id TEXT NOT NULL REFERENCES projects(id), + task_id TEXT REFERENCES tasks(id), + agent_role TEXT NOT NULL, + session_id TEXT, + action TEXT NOT NULL, + input_summary TEXT, + output_summary TEXT, + tokens_used INTEGER, + model TEXT, + cost_usd REAL, + success BOOLEAN, + error_message TEXT, + duration_seconds INTEGER, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP +); + +-- Модули проекта (карта для PM) +CREATE TABLE IF NOT EXISTS modules ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + project_id TEXT NOT NULL REFERENCES projects(id), + name TEXT NOT NULL, + type TEXT NOT NULL, + path TEXT NOT NULL, + description TEXT, + owner_role TEXT, + dependencies JSON, + UNIQUE(project_id, name) +); + +-- Pipelines (история запусков) +CREATE TABLE IF NOT EXISTS pipelines ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + task_id TEXT NOT NULL REFERENCES tasks(id), + project_id TEXT NOT NULL REFERENCES projects(id), + route_type TEXT NOT NULL, + steps JSON NOT NULL, + status TEXT DEFAULT 'running', + total_cost_usd REAL, + total_tokens INTEGER, + total_duration_seconds INTEGER, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP, + completed_at DATETIME +); + +-- Кросс-проектные зависимости +CREATE TABLE IF NOT EXISTS project_links ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + from_project TEXT NOT NULL REFERENCES projects(id), + to_project TEXT NOT NULL REFERENCES projects(id), + type TEXT NOT NULL, + description TEXT, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP +); + +-- Тикеты от пользователей +CREATE TABLE IF NOT EXISTS support_tickets ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + project_id TEXT NOT NULL REFERENCES projects(id), + source TEXT NOT NULL, + client_id TEXT, + client_message TEXT NOT NULL, + classification TEXT, + guard_result TEXT, + guard_reason TEXT, + anamnesis JSON, + task_id TEXT REFERENCES tasks(id), + response TEXT, + response_approved BOOLEAN DEFAULT FALSE, + status TEXT DEFAULT 'new', + created_at DATETIME DEFAULT CURRENT_TIMESTAMP, + resolved_at DATETIME +); + +-- Настройки бота для каждого проекта +CREATE TABLE IF NOT EXISTS support_bot_config ( + project_id TEXT PRIMARY KEY REFERENCES projects(id), + telegram_bot_token TEXT, + welcome_message TEXT, + faq JSON, + auto_reply BOOLEAN DEFAULT FALSE, + require_approval BOOLEAN DEFAULT TRUE, + brand_voice TEXT, + forbidden_topics JSON, + escalation_keywords JSON +); + +-- Индексы +CREATE INDEX IF NOT EXISTS idx_tasks_project_status ON tasks(project_id, status); +CREATE INDEX IF NOT EXISTS idx_decisions_project ON decisions(project_id); +CREATE INDEX IF NOT EXISTS idx_decisions_tags ON decisions(tags); +CREATE INDEX IF NOT EXISTS idx_agent_logs_project ON agent_logs(project_id, created_at); +CREATE INDEX IF NOT EXISTS idx_agent_logs_cost ON agent_logs(project_id, cost_usd); +CREATE INDEX IF NOT EXISTS idx_tickets_project ON support_tickets(project_id, status); +CREATE INDEX IF NOT EXISTS idx_tickets_client ON support_tickets(client_id); +""" + + +def get_connection(db_path: Path = DB_PATH) -> sqlite3.Connection: + conn = sqlite3.connect(str(db_path)) + conn.execute("PRAGMA journal_mode=WAL") + conn.execute("PRAGMA foreign_keys=ON") + conn.row_factory = sqlite3.Row + return conn + + +def _migrate(conn: sqlite3.Connection): + """Run migrations for existing databases.""" + # Check if language column exists on projects + cols = {r[1] for r in conn.execute("PRAGMA table_info(projects)").fetchall()} + if "language" not in cols: + conn.execute("ALTER TABLE projects ADD COLUMN language TEXT DEFAULT 'ru'") + conn.commit() + + +def init_db(db_path: Path = DB_PATH) -> sqlite3.Connection: + conn = get_connection(db_path) + conn.executescript(SCHEMA) + conn.commit() + _migrate(conn) + return conn + + +if __name__ == "__main__": + conn = init_db() + tables = conn.execute( + "SELECT name FROM sqlite_master WHERE type='table' ORDER BY name" + ).fetchall() + print(f"Initialized {len(tables)} tables:") + for t in tables: + print(f" - {t['name']}") + conn.close() diff --git a/core/followup.py b/core/followup.py new file mode 100644 index 0000000..df19328 --- /dev/null +++ b/core/followup.py @@ -0,0 +1,232 @@ +""" +Kin follow-up generator — analyzes pipeline output and creates follow-up tasks. +Runs a PM agent to parse results and produce actionable task list. +Detects permission-blocked items and returns them as pending actions. +""" + +import json +import re +import sqlite3 + +from core import models +from core.context_builder import format_prompt, PROMPTS_DIR + +_PERMISSION_PATTERNS = [ + r"(?i)permission\s+denied", + r"(?i)ручное\s+применение", + r"(?i)не\s+получил[иа]?\s+разрешени[ея]", + r"(?i)cannot\s+write", + r"(?i)read[- ]?only", + r"(?i)нет\s+прав\s+на\s+запись", + r"(?i)manually\s+appl", + r"(?i)apply\s+manually", + r"(?i)требуется\s+ручн", +] + + +def _is_permission_blocked(item: dict) -> bool: + """Check if a follow-up item describes a permission/write failure.""" + text = f"{item.get('title', '')} {item.get('brief', '')}".lower() + return any(re.search(p, text) for p in _PERMISSION_PATTERNS) + + +def _collect_pipeline_output(conn: sqlite3.Connection, task_id: str) -> str: + """Collect all pipeline step outputs for a task into a single string.""" + rows = conn.execute( + """SELECT agent_role, output_summary, success + FROM agent_logs WHERE task_id = ? ORDER BY created_at""", + (task_id,), + ).fetchall() + if not rows: + return "" + parts = [] + for r in rows: + status = "OK" if r["success"] else "FAILED" + parts.append(f"=== {r['agent_role']} [{status}] ===") + parts.append(r["output_summary"] or "(no output)") + parts.append("") + return "\n".join(parts) + + +def _next_task_id(conn: sqlite3.Connection, project_id: str) -> str: + """Generate the next sequential task ID for a project.""" + prefix = project_id.upper() + existing = models.list_tasks(conn, project_id=project_id) + max_num = 0 + for t in existing: + tid = t["id"] + if tid.startswith(prefix + "-"): + try: + num = int(tid.split("-", 1)[1]) + max_num = max(max_num, num) + except ValueError: + pass + return f"{prefix}-{max_num + 1:03d}" + + +def generate_followups( + conn: sqlite3.Connection, + task_id: str, + dry_run: bool = False, +) -> dict: + """Analyze pipeline output and create follow-up tasks. + + Returns dict: + { + "created": [task, ...], # tasks created immediately + "pending_actions": [action, ...], # items needing user decision + } + + A pending_action looks like: + { + "type": "permission_fix", + "description": "...", + "original_item": {...}, # raw item from PM + "options": ["rerun", "manual_task", "skip"], + } + """ + task = models.get_task(conn, task_id) + if not task: + return {"created": [], "pending_actions": []} + + project_id = task["project_id"] + project = models.get_project(conn, project_id) + if not project: + return {"created": [], "pending_actions": []} + + pipeline_output = _collect_pipeline_output(conn, task_id) + if not pipeline_output: + return {"created": [], "pending_actions": []} + + # Build context for followup agent + language = project.get("language", "ru") + context = { + "project": { + "id": project["id"], + "name": project["name"], + "path": project["path"], + "tech_stack": project.get("tech_stack"), + "language": language, + }, + "task": { + "id": task["id"], + "title": task["title"], + "status": task["status"], + "priority": task["priority"], + "brief": task.get("brief"), + "spec": task.get("spec"), + }, + "previous_output": pipeline_output, + } + + prompt = format_prompt(context, "followup") + + if dry_run: + return {"created": [{"_dry_run": True, "_prompt": prompt}], "pending_actions": []} + + # Run followup agent + from agents.runner import _run_claude, _try_parse_json + + result = _run_claude(prompt, model="sonnet") + output = result.get("output", "") + + # Parse the task list from output + parsed = _try_parse_json(output) + if not isinstance(parsed, list): + if isinstance(parsed, dict): + parsed = parsed.get("tasks") or parsed.get("followups") or [] + else: + return {"created": [], "pending_actions": []} + + # Separate permission-blocked items from normal ones + created = [] + pending_actions = [] + + for item in parsed: + if not isinstance(item, dict) or "title" not in item: + continue + + if _is_permission_blocked(item): + pending_actions.append({ + "type": "permission_fix", + "description": item["title"], + "original_item": item, + "options": ["rerun", "manual_task", "skip"], + }) + else: + new_id = _next_task_id(conn, project_id) + brief_dict = {"source": f"followup:{task_id}"} + if item.get("type"): + brief_dict["route_type"] = item["type"] + if item.get("brief"): + brief_dict["description"] = item["brief"] + + t = models.create_task( + conn, new_id, project_id, + title=item["title"], + priority=item.get("priority", 5), + parent_task_id=task_id, + brief=brief_dict, + ) + created.append(t) + + # Log the followup generation + models.log_agent_run( + conn, project_id, "followup_pm", "generate_followups", + task_id=task_id, + output_summary=json.dumps({ + "created": [{"id": t["id"], "title": t["title"]} for t in created], + "pending": len(pending_actions), + }, ensure_ascii=False), + success=True, + ) + + return {"created": created, "pending_actions": pending_actions} + + +def resolve_pending_action( + conn: sqlite3.Connection, + task_id: str, + action: dict, + choice: str, +) -> dict | None: + """Resolve a single pending action. + + choice: "rerun" | "manual_task" | "skip" + Returns created task dict for "manual_task", None otherwise. + """ + task = models.get_task(conn, task_id) + if not task: + return None + + project_id = task["project_id"] + item = action.get("original_item", {}) + + if choice == "skip": + return None + + if choice == "manual_task": + new_id = _next_task_id(conn, project_id) + brief_dict = {"source": f"followup:{task_id}"} + if item.get("type"): + brief_dict["route_type"] = item["type"] + if item.get("brief"): + brief_dict["description"] = item["brief"] + return models.create_task( + conn, new_id, project_id, + title=item.get("title", "Manual fix required"), + priority=item.get("priority", 5), + parent_task_id=task_id, + brief=brief_dict, + ) + + if choice == "rerun": + # Re-run pipeline for the parent task with allow_write + from agents.runner import run_pipeline + steps = [{"role": item.get("type", "frontend_dev"), + "brief": item.get("brief", item.get("title", "")), + "model": "sonnet"}] + result = run_pipeline(conn, task_id, steps, allow_write=True) + return {"rerun_result": result} + + return None diff --git a/core/models.py b/core/models.py new file mode 100644 index 0000000..d7bb075 --- /dev/null +++ b/core/models.py @@ -0,0 +1,447 @@ +""" +Kin — data access functions for all tables. +Pure functions: (conn, params) → dict | list[dict]. No ORM, no classes. +""" + +import json +import sqlite3 +from datetime import datetime +from typing import Any + + +def _row_to_dict(row: sqlite3.Row | None) -> dict | None: + """Convert sqlite3.Row to dict with JSON fields decoded.""" + if row is None: + return None + d = dict(row) + for key, val in d.items(): + if isinstance(val, str) and val.startswith(("[", "{")): + try: + d[key] = json.loads(val) + except (json.JSONDecodeError, ValueError): + pass + return d + + +def _rows_to_list(rows: list[sqlite3.Row]) -> list[dict]: + """Convert list of sqlite3.Row to list of dicts.""" + return [_row_to_dict(r) for r in rows] + + +def _json_encode(val: Any) -> Any: + """Encode lists/dicts to JSON strings for storage.""" + if isinstance(val, (list, dict)): + return json.dumps(val, ensure_ascii=False) + return val + + +# --------------------------------------------------------------------------- +# Projects +# --------------------------------------------------------------------------- + +def create_project( + conn: sqlite3.Connection, + id: str, + name: str, + path: str, + tech_stack: list | None = None, + status: str = "active", + priority: int = 5, + pm_prompt: str | None = None, + claude_md_path: str | None = None, + forgejo_repo: str | None = None, + language: str = "ru", +) -> dict: + """Create a new project and return it as dict.""" + conn.execute( + """INSERT INTO projects (id, name, path, tech_stack, status, priority, + pm_prompt, claude_md_path, forgejo_repo, language) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""", + (id, name, path, _json_encode(tech_stack), status, priority, + pm_prompt, claude_md_path, forgejo_repo, language), + ) + conn.commit() + return get_project(conn, id) + + +def get_project(conn: sqlite3.Connection, id: str) -> dict | None: + """Get project by id.""" + row = conn.execute("SELECT * FROM projects WHERE id = ?", (id,)).fetchone() + return _row_to_dict(row) + + +def list_projects(conn: sqlite3.Connection, status: str | None = None) -> list[dict]: + """List projects, optionally filtered by status.""" + if status: + rows = conn.execute( + "SELECT * FROM projects WHERE status = ? ORDER BY priority, name", + (status,), + ).fetchall() + else: + rows = conn.execute( + "SELECT * FROM projects ORDER BY priority, name" + ).fetchall() + return _rows_to_list(rows) + + +def update_project(conn: sqlite3.Connection, id: str, **fields) -> dict: + """Update project fields. Returns updated project.""" + if not fields: + return get_project(conn, id) + for key in ("tech_stack",): + if key in fields: + fields[key] = _json_encode(fields[key]) + sets = ", ".join(f"{k} = ?" for k in fields) + vals = list(fields.values()) + [id] + conn.execute(f"UPDATE projects SET {sets} WHERE id = ?", vals) + conn.commit() + return get_project(conn, id) + + +# --------------------------------------------------------------------------- +# Tasks +# --------------------------------------------------------------------------- + +def create_task( + conn: sqlite3.Connection, + id: str, + project_id: str, + title: str, + status: str = "pending", + priority: int = 5, + assigned_role: str | None = None, + parent_task_id: str | None = None, + brief: dict | None = None, + spec: dict | None = None, + forgejo_issue_id: int | None = None, +) -> dict: + """Create a task linked to a project.""" + conn.execute( + """INSERT INTO tasks (id, project_id, title, status, priority, + assigned_role, parent_task_id, brief, spec, forgejo_issue_id) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""", + (id, project_id, title, status, priority, assigned_role, + parent_task_id, _json_encode(brief), _json_encode(spec), + forgejo_issue_id), + ) + conn.commit() + return get_task(conn, id) + + +def get_task(conn: sqlite3.Connection, id: str) -> dict | None: + """Get task by id.""" + row = conn.execute("SELECT * FROM tasks WHERE id = ?", (id,)).fetchone() + return _row_to_dict(row) + + +def list_tasks( + conn: sqlite3.Connection, + project_id: str | None = None, + status: str | None = None, +) -> list[dict]: + """List tasks with optional project/status filters.""" + query = "SELECT * FROM tasks WHERE 1=1" + params: list = [] + if project_id: + query += " AND project_id = ?" + params.append(project_id) + if status: + query += " AND status = ?" + params.append(status) + query += " ORDER BY priority, created_at" + return _rows_to_list(conn.execute(query, params).fetchall()) + + +def update_task(conn: sqlite3.Connection, id: str, **fields) -> dict: + """Update task fields. Auto-sets updated_at.""" + if not fields: + return get_task(conn, id) + json_cols = ("brief", "spec", "review", "test_result", "security_result") + for key in json_cols: + if key in fields: + fields[key] = _json_encode(fields[key]) + fields["updated_at"] = datetime.now().isoformat() + sets = ", ".join(f"{k} = ?" for k in fields) + vals = list(fields.values()) + [id] + conn.execute(f"UPDATE tasks SET {sets} WHERE id = ?", vals) + conn.commit() + return get_task(conn, id) + + +# --------------------------------------------------------------------------- +# Decisions +# --------------------------------------------------------------------------- + +def add_decision( + conn: sqlite3.Connection, + project_id: str, + type: str, + title: str, + description: str, + category: str | None = None, + tags: list | None = None, + task_id: str | None = None, +) -> dict: + """Record a decision, gotcha, or convention for a project.""" + cur = conn.execute( + """INSERT INTO decisions (project_id, task_id, type, category, + title, description, tags) + VALUES (?, ?, ?, ?, ?, ?, ?)""", + (project_id, task_id, type, category, title, description, + _json_encode(tags)), + ) + conn.commit() + row = conn.execute( + "SELECT * FROM decisions WHERE id = ?", (cur.lastrowid,) + ).fetchone() + return _row_to_dict(row) + + +def get_decisions( + conn: sqlite3.Connection, + project_id: str, + category: str | None = None, + tags: list | None = None, + types: list | None = None, + limit: int | None = None, +) -> list[dict]: + """Query decisions for a project with optional filters. + + tags: matches if ANY tag is present (OR logic via json_each). + types: filter by decision type (decision, gotcha, workaround, etc). + """ + query = "SELECT DISTINCT d.* FROM decisions d WHERE d.project_id = ?" + params: list = [project_id] + if category: + query += " AND d.category = ?" + params.append(category) + if types: + placeholders = ", ".join("?" for _ in types) + query += f" AND d.type IN ({placeholders})" + params.extend(types) + if tags: + query += """ AND d.id IN ( + SELECT d2.id FROM decisions d2, json_each(d2.tags) AS t + WHERE t.value IN ({}) + )""".format(", ".join("?" for _ in tags)) + params.extend(tags) + query += " ORDER BY d.created_at DESC" + if limit: + query += " LIMIT ?" + params.append(limit) + return _rows_to_list(conn.execute(query, params).fetchall()) + + +# --------------------------------------------------------------------------- +# Modules +# --------------------------------------------------------------------------- + +def add_module( + conn: sqlite3.Connection, + project_id: str, + name: str, + type: str, + path: str, + description: str | None = None, + owner_role: str | None = None, + dependencies: list | None = None, +) -> dict: + """Register a project module.""" + cur = conn.execute( + """INSERT INTO modules (project_id, name, type, path, description, + owner_role, dependencies) + VALUES (?, ?, ?, ?, ?, ?, ?)""", + (project_id, name, type, path, description, owner_role, + _json_encode(dependencies)), + ) + conn.commit() + row = conn.execute( + "SELECT * FROM modules WHERE id = ?", (cur.lastrowid,) + ).fetchone() + return _row_to_dict(row) + + +def get_modules(conn: sqlite3.Connection, project_id: str) -> list[dict]: + """Get all modules for a project.""" + rows = conn.execute( + "SELECT * FROM modules WHERE project_id = ? ORDER BY type, name", + (project_id,), + ).fetchall() + return _rows_to_list(rows) + + +# --------------------------------------------------------------------------- +# Agent Logs +# --------------------------------------------------------------------------- + +def log_agent_run( + conn: sqlite3.Connection, + project_id: str, + agent_role: str, + action: str, + task_id: str | None = None, + session_id: str | None = None, + input_summary: str | None = None, + output_summary: str | None = None, + tokens_used: int | None = None, + model: str | None = None, + cost_usd: float | None = None, + success: bool = True, + error_message: str | None = None, + duration_seconds: int | None = None, +) -> dict: + """Log an agent execution run.""" + cur = conn.execute( + """INSERT INTO agent_logs (project_id, task_id, agent_role, session_id, + action, input_summary, output_summary, tokens_used, model, + cost_usd, success, error_message, duration_seconds) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""", + (project_id, task_id, agent_role, session_id, action, input_summary, + output_summary, tokens_used, model, cost_usd, success, + error_message, duration_seconds), + ) + conn.commit() + row = conn.execute( + "SELECT * FROM agent_logs WHERE id = ?", (cur.lastrowid,) + ).fetchone() + return _row_to_dict(row) + + +# --------------------------------------------------------------------------- +# Pipelines +# --------------------------------------------------------------------------- + +def create_pipeline( + conn: sqlite3.Connection, + task_id: str, + project_id: str, + route_type: str, + steps: list | dict, +) -> dict: + """Create a new pipeline run.""" + cur = conn.execute( + """INSERT INTO pipelines (task_id, project_id, route_type, steps) + VALUES (?, ?, ?, ?)""", + (task_id, project_id, route_type, _json_encode(steps)), + ) + conn.commit() + row = conn.execute( + "SELECT * FROM pipelines WHERE id = ?", (cur.lastrowid,) + ).fetchone() + return _row_to_dict(row) + + +def update_pipeline( + conn: sqlite3.Connection, + id: int, + status: str | None = None, + total_cost_usd: float | None = None, + total_tokens: int | None = None, + total_duration_seconds: int | None = None, +) -> dict: + """Update pipeline status and stats.""" + fields: dict[str, Any] = {} + if status is not None: + fields["status"] = status + if status in ("completed", "failed", "cancelled"): + fields["completed_at"] = datetime.now().isoformat() + if total_cost_usd is not None: + fields["total_cost_usd"] = total_cost_usd + if total_tokens is not None: + fields["total_tokens"] = total_tokens + if total_duration_seconds is not None: + fields["total_duration_seconds"] = total_duration_seconds + if fields: + sets = ", ".join(f"{k} = ?" for k in fields) + vals = list(fields.values()) + [id] + conn.execute(f"UPDATE pipelines SET {sets} WHERE id = ?", vals) + conn.commit() + row = conn.execute( + "SELECT * FROM pipelines WHERE id = ?", (id,) + ).fetchone() + return _row_to_dict(row) + + +# --------------------------------------------------------------------------- +# Support +# --------------------------------------------------------------------------- + +def create_ticket( + conn: sqlite3.Connection, + project_id: str, + source: str, + client_message: str, + client_id: str | None = None, + classification: str | None = None, +) -> dict: + """Create a support ticket.""" + cur = conn.execute( + """INSERT INTO support_tickets (project_id, source, client_id, + client_message, classification) + VALUES (?, ?, ?, ?, ?)""", + (project_id, source, client_id, client_message, classification), + ) + conn.commit() + row = conn.execute( + "SELECT * FROM support_tickets WHERE id = ?", (cur.lastrowid,) + ).fetchone() + return _row_to_dict(row) + + +def list_tickets( + conn: sqlite3.Connection, + project_id: str | None = None, + status: str | None = None, +) -> list[dict]: + """List support tickets with optional filters.""" + query = "SELECT * FROM support_tickets WHERE 1=1" + params: list = [] + if project_id: + query += " AND project_id = ?" + params.append(project_id) + if status: + query += " AND status = ?" + params.append(status) + query += " ORDER BY created_at DESC" + return _rows_to_list(conn.execute(query, params).fetchall()) + + +# --------------------------------------------------------------------------- +# Statistics / Dashboard +# --------------------------------------------------------------------------- + +def get_project_summary(conn: sqlite3.Connection) -> list[dict]: + """Get all projects with task counts by status.""" + rows = conn.execute(""" + SELECT p.*, + COUNT(t.id) AS total_tasks, + SUM(CASE WHEN t.status = 'done' THEN 1 ELSE 0 END) AS done_tasks, + SUM(CASE WHEN t.status = 'in_progress' THEN 1 ELSE 0 END) AS active_tasks, + SUM(CASE WHEN t.status = 'blocked' THEN 1 ELSE 0 END) AS blocked_tasks, + SUM(CASE WHEN t.status = 'review' THEN 1 ELSE 0 END) AS review_tasks + FROM projects p + LEFT JOIN tasks t ON t.project_id = p.id + GROUP BY p.id + ORDER BY p.priority, p.name + """).fetchall() + return _rows_to_list(rows) + + +def get_cost_summary(conn: sqlite3.Connection, days: int = 7) -> list[dict]: + """Get cost summary by project for the last N days.""" + rows = conn.execute(""" + SELECT + p.id AS project_id, + p.name AS project_name, + COUNT(a.id) AS runs, + COALESCE(SUM(a.tokens_used), 0) AS total_tokens, + COALESCE(SUM(a.cost_usd), 0) AS total_cost_usd, + COALESCE(SUM(a.duration_seconds), 0) AS total_duration_seconds + FROM projects p + LEFT JOIN agent_logs a ON a.project_id = p.id + AND a.created_at >= datetime('now', ?) + GROUP BY p.id + HAVING runs > 0 + ORDER BY total_cost_usd DESC + """, (f"-{days} days",)).fetchall() + return _rows_to_list(rows) diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..ad9e1fa --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,16 @@ +[build-system] +requires = ["setuptools>=68.0"] +build-backend = "setuptools.backends._legacy:_Backend" + +[project] +name = "kin" +version = "0.1.0" +description = "Multi-agent project orchestrator" +requires-python = ">=3.11" +dependencies = ["click>=8.0", "fastapi>=0.110", "uvicorn>=0.29"] + +[project.scripts] +kin = "cli.main:cli" + +[tool.pytest.ini_options] +testpaths = ["tests"] diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_api.py b/tests/test_api.py new file mode 100644 index 0000000..8d7ea42 --- /dev/null +++ b/tests/test_api.py @@ -0,0 +1,185 @@ +"""Tests for web/api.py — new task endpoints (pipeline, approve, reject, full).""" + +import pytest +from pathlib import Path +from fastapi.testclient import TestClient + +# Patch DB_PATH before importing app +import web.api as api_module + +@pytest.fixture +def client(tmp_path): + db_path = tmp_path / "test.db" + api_module.DB_PATH = db_path + from web.api import app + c = TestClient(app) + # Seed data + c.post("/api/projects", json={"id": "p1", "name": "P1", "path": "/p1"}) + c.post("/api/tasks", json={"project_id": "p1", "title": "Fix bug"}) + return c + + +def test_get_task(client): + r = client.get("/api/tasks/P1-001") + assert r.status_code == 200 + assert r.json()["title"] == "Fix bug" + + +def test_get_task_not_found(client): + r = client.get("/api/tasks/NOPE") + assert r.status_code == 404 + + +def test_task_pipeline_empty(client): + r = client.get("/api/tasks/P1-001/pipeline") + assert r.status_code == 200 + assert r.json() == [] + + +def test_task_pipeline_with_logs(client): + # Insert agent logs directly + from core.db import init_db + from core import models + conn = init_db(api_module.DB_PATH) + models.log_agent_run(conn, "p1", "debugger", "execute", + task_id="P1-001", output_summary="Found bug", + tokens_used=1000, duration_seconds=5, success=True) + models.log_agent_run(conn, "p1", "tester", "execute", + task_id="P1-001", output_summary="Tests pass", + tokens_used=500, duration_seconds=3, success=True) + conn.close() + + r = client.get("/api/tasks/P1-001/pipeline") + assert r.status_code == 200 + steps = r.json() + assert len(steps) == 2 + assert steps[0]["agent_role"] == "debugger" + assert steps[0]["output_summary"] == "Found bug" + assert steps[1]["agent_role"] == "tester" + + +def test_task_full(client): + r = client.get("/api/tasks/P1-001/full") + assert r.status_code == 200 + data = r.json() + assert data["id"] == "P1-001" + assert "pipeline_steps" in data + assert "related_decisions" in data + + +def test_task_full_not_found(client): + r = client.get("/api/tasks/NOPE/full") + assert r.status_code == 404 + + +def test_approve_task(client): + # First set task to review + from core.db import init_db + from core import models + conn = init_db(api_module.DB_PATH) + models.update_task(conn, "P1-001", status="review") + conn.close() + + r = client.post("/api/tasks/P1-001/approve", json={}) + assert r.status_code == 200 + assert r.json()["status"] == "done" + + # Verify task is done + r = client.get("/api/tasks/P1-001") + assert r.json()["status"] == "done" + + +def test_approve_with_decision(client): + r = client.post("/api/tasks/P1-001/approve", json={ + "decision_title": "Use AbortController", + "decision_description": "Fix race condition with AbortController", + "decision_type": "decision", + }) + assert r.status_code == 200 + assert r.json()["decision"] is not None + assert r.json()["decision"]["title"] == "Use AbortController" + + +def test_approve_not_found(client): + r = client.post("/api/tasks/NOPE/approve", json={}) + assert r.status_code == 404 + + +def test_reject_task(client): + from core.db import init_db + from core import models + conn = init_db(api_module.DB_PATH) + models.update_task(conn, "P1-001", status="review") + conn.close() + + r = client.post("/api/tasks/P1-001/reject", json={ + "reason": "Didn't fix the root cause" + }) + assert r.status_code == 200 + assert r.json()["status"] == "pending" + + # Verify task is pending with review reason + r = client.get("/api/tasks/P1-001") + data = r.json() + assert data["status"] == "pending" + assert data["review"]["rejected"] == "Didn't fix the root cause" + + +def test_reject_not_found(client): + r = client.post("/api/tasks/NOPE/reject", json={"reason": "bad"}) + assert r.status_code == 404 + + +def test_task_pipeline_not_found(client): + r = client.get("/api/tasks/NOPE/pipeline") + assert r.status_code == 404 + + +def test_running_endpoint_no_pipeline(client): + r = client.get("/api/tasks/P1-001/running") + assert r.status_code == 200 + assert r.json()["running"] is False + + +def test_running_endpoint_with_pipeline(client): + from core.db import init_db + from core import models + conn = init_db(api_module.DB_PATH) + models.create_pipeline(conn, "P1-001", "p1", "debug", + [{"role": "debugger"}]) + conn.close() + + r = client.get("/api/tasks/P1-001/running") + assert r.status_code == 200 + assert r.json()["running"] is True + + +def test_running_endpoint_not_found(client): + r = client.get("/api/tasks/NOPE/running") + assert r.status_code == 404 + + +def test_run_sets_in_progress(client): + """POST /run should set task to in_progress immediately.""" + r = client.post("/api/tasks/P1-001/run") + assert r.status_code == 202 + + r = client.get("/api/tasks/P1-001") + assert r.json()["status"] == "in_progress" + + +def test_run_not_found(client): + r = client.post("/api/tasks/NOPE/run") + assert r.status_code == 404 + + +def test_project_summary_includes_review(client): + from core.db import init_db + from core import models + conn = init_db(api_module.DB_PATH) + models.update_task(conn, "P1-001", status="review") + conn.close() + + r = client.get("/api/projects") + projects = r.json() + assert projects[0]["review_tasks"] == 1 diff --git a/tests/test_bootstrap.py b/tests/test_bootstrap.py new file mode 100644 index 0000000..20dc5ea --- /dev/null +++ b/tests/test_bootstrap.py @@ -0,0 +1,427 @@ +"""Tests for agents/bootstrap.py — tech detection, modules, decisions, obsidian.""" + +import json +import pytest +from pathlib import Path +from click.testing import CliRunner + +from agents.bootstrap import ( + detect_tech_stack, detect_modules, extract_decisions_from_claude_md, + find_vault_root, scan_obsidian, format_preview, save_to_db, +) +from core.db import init_db +from core import models +from cli.main import cli + + +# --------------------------------------------------------------------------- +# Tech stack detection +# --------------------------------------------------------------------------- + +def test_detect_node_project(tmp_path): + (tmp_path / "package.json").write_text(json.dumps({ + "dependencies": {"vue": "^3.4", "pinia": "^2.0"}, + "devDependencies": {"typescript": "^5.0", "vite": "^5.0"}, + })) + (tmp_path / "tsconfig.json").write_text("{}") + (tmp_path / "nuxt.config.ts").write_text("export default {}") + + stack = detect_tech_stack(tmp_path) + assert "vue3" in stack + assert "typescript" in stack + assert "nuxt3" in stack + assert "pinia" in stack + assert "vite" in stack + + +def test_detect_python_project(tmp_path): + (tmp_path / "requirements.txt").write_text("fastapi==0.104\npydantic>=2.0\n") + (tmp_path / "pyproject.toml").write_text("[project]\nname='x'\n") + + stack = detect_tech_stack(tmp_path) + assert "python" in stack + assert "fastapi" in stack + assert "pydantic" in stack + + +def test_detect_go_project(tmp_path): + (tmp_path / "go.mod").write_text("module example.com/foo\nrequire gin-gonic v1.9\n") + + stack = detect_tech_stack(tmp_path) + assert "go" in stack + assert "gin" in stack + + +def test_detect_monorepo(tmp_path): + fe = tmp_path / "frontend" + fe.mkdir() + (fe / "package.json").write_text(json.dumps({ + "dependencies": {"vue": "^3.0"}, + })) + be = tmp_path / "backend" + be.mkdir() + (be / "requirements.txt").write_text("fastapi\n") + + stack = detect_tech_stack(tmp_path) + assert "vue3" in stack + assert "fastapi" in stack + + +def test_detect_deep_monorepo(tmp_path): + """Test that files nested 2-3 levels deep are found (like vdolipoperek).""" + fe = tmp_path / "frontend" / "src" + fe.mkdir(parents=True) + (tmp_path / "frontend" / "package.json").write_text(json.dumps({ + "dependencies": {"vue": "^3.4"}, + "devDependencies": {"vite": "^5.0", "tailwindcss": "^3.4"}, + })) + (tmp_path / "frontend" / "vite.config.js").write_text("export default {}") + (tmp_path / "frontend" / "tailwind.config.js").write_text("module.exports = {}") + + be = tmp_path / "backend-pg" / "src" + be.mkdir(parents=True) + (be / "index.js").write_text("const express = require('express');") + + stack = detect_tech_stack(tmp_path) + assert "vue3" in stack + assert "vite" in stack + assert "tailwind" in stack + + +def test_detect_empty_dir(tmp_path): + assert detect_tech_stack(tmp_path) == [] + + +# --------------------------------------------------------------------------- +# Module detection +# --------------------------------------------------------------------------- + +def test_detect_modules_vue(tmp_path): + src = tmp_path / "src" + (src / "components" / "search").mkdir(parents=True) + (src / "components" / "search" / "Search.vue").write_text("") + (src / "components" / "search" / "SearchFilter.vue").write_text("") + (src / "api" / "auth").mkdir(parents=True) + (src / "api" / "auth" / "login.ts").write_text("import express from 'express';\nconst router = express.Router();") + + modules = detect_modules(tmp_path) + names = {m["name"] for m in modules} + assert "components" in names or "search" in names + assert "api" in names or "auth" in names + + +def test_detect_modules_empty(tmp_path): + assert detect_modules(tmp_path) == [] + + +def test_detect_modules_backend_pg(tmp_path): + """Test detection in backend-pg/src/ pattern (like vdolipoperek).""" + src = tmp_path / "backend-pg" / "src" / "services" + src.mkdir(parents=True) + (src / "tourMapper.js").write_text("const express = require('express');") + (src / "dbService.js").write_text("module.exports = { query };") + + modules = detect_modules(tmp_path) + assert any(m["name"] == "services" for m in modules) + + +def test_detect_modules_monorepo(tmp_path): + """Full monorepo: frontend/src/ + backend-pg/src/.""" + # Frontend + fe_views = tmp_path / "frontend" / "src" / "views" + fe_views.mkdir(parents=True) + (fe_views / "Hotel.vue").write_text("") + fe_comp = tmp_path / "frontend" / "src" / "components" + fe_comp.mkdir(parents=True) + (fe_comp / "Search.vue").write_text("") + + # Backend + be_svc = tmp_path / "backend-pg" / "src" / "services" + be_svc.mkdir(parents=True) + (be_svc / "db.js").write_text("const express = require('express');") + be_routes = tmp_path / "backend-pg" / "src" / "routes" + be_routes.mkdir(parents=True) + (be_routes / "api.js").write_text("const router = require('express').Router();") + + modules = detect_modules(tmp_path) + names = {m["name"] for m in modules} + assert "views" in names + assert "components" in names + assert "services" in names + assert "routes" in names + # Check types + types = {m["name"]: m["type"] for m in modules} + assert types["views"] == "frontend" + assert types["components"] == "frontend" + + +# --------------------------------------------------------------------------- +# Decisions from CLAUDE.md +# --------------------------------------------------------------------------- + +def test_extract_decisions(tmp_path): + (tmp_path / "CLAUDE.md").write_text("""# Project + +## Rules +- Use WAL mode for SQLite + +ВАЖНО: docker-compose v1 глючит → только raw docker commands +WORKAROUND: position:fixed breaks on iOS Safari, use transform instead +GOTCHA: Sletat API бан при параллельных запросах +FIXME: race condition in useSearch composable + +## Known Issues +- Mobile bottom-sheet не работает в landscape mode +- CSS grid fallback для IE11 (но мы его не поддерживаем) +""") + + decisions = extract_decisions_from_claude_md(tmp_path, "myproj", "My Project") + assert len(decisions) >= 4 + + types = {d["type"] for d in decisions} + assert "gotcha" in types + assert "workaround" in types + + +def test_extract_decisions_no_claude_md(tmp_path): + assert extract_decisions_from_claude_md(tmp_path) == [] + + +def test_extract_decisions_filters_unrelated_sections(tmp_path): + """Sections about Jitsi, Nextcloud, Prosody should be skipped.""" + (tmp_path / "CLAUDE.md").write_text("""# vdolipoperek + +## Known Issues +1. **Hotel ID mismatch** — Sletat GetTours vs GetHotels разные ID +2. **db.js export** — module.exports = pool (НЕ { pool }) + +## Jitsi + Nextcloud интеграция (2026-03-04) + +ВАЖНО: JWT_APP_SECRET must be synced between Prosody and Nextcloud +GOTCHA: focus.meet.jitsi must be pinned in custom-config.js + +## Prosody config + +ВАЖНО: conf.d files принадлежат root → писать через docker exec + +## Git Sync (2026-03-03) + +ВАЖНО: Все среды синхронизированы на коммите 4ee5603 +""") + + decisions = extract_decisions_from_claude_md(tmp_path, "vdol", "vdolipoperek") + + titles = [d["title"] for d in decisions] + # Should have the real known issues + assert any("Hotel ID mismatch" in t for t in titles) + assert any("db.js export" in t for t in titles) + # Should NOT have Jitsi/Prosody/Nextcloud noise + assert not any("JWT_APP_SECRET" in t for t in titles) + assert not any("focus.meet.jitsi" in t for t in titles) + assert not any("conf.d files" in t for t in titles) + + +def test_extract_decisions_filters_noise(tmp_path): + """Commit hashes and shell commands should not be decisions.""" + (tmp_path / "CLAUDE.md").write_text("""# Project + +## Known Issues +1. **Real bug** — actual architectural issue that matters +- docker exec -it prosody bash +- ssh dev "cd /opt/project && git pull" +""") + + decisions = extract_decisions_from_claude_md(tmp_path) + titles = [d["title"] for d in decisions] + assert any("Real bug" in t for t in titles) + # Shell commands should be filtered + assert not any("docker exec" in t for t in titles) + assert not any("ssh dev" in t for t in titles) + + +# --------------------------------------------------------------------------- +# Obsidian vault +# --------------------------------------------------------------------------- + +def test_scan_obsidian(tmp_path): + # Create a mock vault + vault = tmp_path / "vault" + proj_dir = vault / "myproject" + proj_dir.mkdir(parents=True) + + (proj_dir / "kanban.md").write_text("""--- +kanban-plugin: board +--- + +## В работе +- [ ] Fix login page +- [ ] Add search filter +- [x] Setup CI/CD + +## Done +- [x] Initial deploy + +**ВАЖНО:** Не забыть обновить SSL сертификат +""") + + (proj_dir / "notes.md").write_text("""# Notes +GOTCHA: API rate limit is 10 req/s +- [ ] Write tests for auth module +""") + + result = scan_obsidian(vault, "myproject", "My Project", "myproject") + assert result["files_scanned"] == 2 + assert len(result["tasks"]) >= 4 # 3 pending + at least 1 done + assert len(result["decisions"]) >= 1 # At least the ВАЖНО one + + pending = [t for t in result["tasks"] if not t["done"]] + done = [t for t in result["tasks"] if t["done"]] + assert len(pending) >= 3 + assert len(done) >= 1 + + +def test_scan_obsidian_no_match(tmp_path): + vault = tmp_path / "vault" + vault.mkdir() + (vault / "other.md").write_text("# Unrelated note\nSomething else.") + + result = scan_obsidian(vault, "myproject", "My Project") + assert result["files_scanned"] == 0 + assert result["tasks"] == [] + + +def test_find_vault_root_explicit(tmp_path): + vault = tmp_path / "vault" + vault.mkdir() + assert find_vault_root(vault) == vault + + +def test_find_vault_root_none(): + assert find_vault_root(Path("/nonexistent/path")) is None + + +# --------------------------------------------------------------------------- +# Save to DB +# --------------------------------------------------------------------------- + +def test_save_to_db(tmp_path): + conn = init_db(":memory:") + + save_to_db( + conn, + project_id="test", + name="Test Project", + path=str(tmp_path), + tech_stack=["python", "fastapi"], + modules=[ + {"name": "api", "type": "backend", "path": "src/api/", "file_count": 5}, + {"name": "ui", "type": "frontend", "path": "src/ui/", "file_count": 8}, + ], + decisions=[ + {"type": "gotcha", "title": "Bug X", "description": "desc", + "category": "ui"}, + ], + obsidian={ + "tasks": [ + {"title": "Fix login", "done": False, "source": "kanban"}, + {"title": "Setup CI", "done": True, "source": "kanban"}, + ], + "decisions": [ + {"type": "gotcha", "title": "API limit", "description": "10 req/s", + "category": "api", "source": "notes"}, + ], + "files_scanned": 2, + }, + ) + + p = models.get_project(conn, "test") + assert p is not None + assert p["tech_stack"] == ["python", "fastapi"] + + mods = models.get_modules(conn, "test") + assert len(mods) == 2 + + decs = models.get_decisions(conn, "test") + assert len(decs) == 2 # 1 from CLAUDE.md + 1 from Obsidian + + tasks = models.list_tasks(conn, project_id="test") + assert len(tasks) == 2 # 2 from Obsidian + assert any(t["status"] == "done" for t in tasks) + assert any(t["status"] == "pending" for t in tasks) + + conn.close() + + +# --------------------------------------------------------------------------- +# format_preview +# --------------------------------------------------------------------------- + +def test_format_preview(): + text = format_preview( + "vdol", "ВДОЛЬ", "~/projects/vdol", + ["vue3", "typescript"], + [{"name": "search", "type": "frontend", "path": "src/search/", "file_count": 4}], + [{"type": "gotcha", "title": "Safari bug"}], + {"files_scanned": 3, "tasks": [ + {"title": "Fix X", "done": False, "source": "kb"}, + ], "decisions": []}, + ) + assert "vue3" in text + assert "search" in text + assert "Safari bug" in text + assert "Fix X" in text + + +# --------------------------------------------------------------------------- +# CLI integration +# --------------------------------------------------------------------------- + +def test_cli_bootstrap(tmp_path): + # Create a minimal project to bootstrap + proj = tmp_path / "myproj" + proj.mkdir() + (proj / "package.json").write_text(json.dumps({ + "dependencies": {"vue": "^3.0"}, + })) + src = proj / "src" / "components" + src.mkdir(parents=True) + (src / "App.vue").write_text("") + + db_path = tmp_path / "test.db" + runner = CliRunner() + result = runner.invoke(cli, [ + "--db", str(db_path), + "bootstrap", str(proj), + "--id", "myproj", + "--name", "My Project", + "--vault", str(tmp_path / "nonexistent_vault"), + "-y", + ]) + assert result.exit_code == 0 + assert "vue3" in result.output + assert "Saved:" in result.output + + # Verify in DB + conn = init_db(db_path) + p = models.get_project(conn, "myproj") + assert p is not None + assert "vue3" in p["tech_stack"] + conn.close() + + +def test_cli_bootstrap_already_exists(tmp_path): + proj = tmp_path / "myproj" + proj.mkdir() + + db_path = tmp_path / "test.db" + runner = CliRunner() + # Create project first + runner.invoke(cli, ["--db", str(db_path), "project", "add", "myproj", "X", str(proj)]) + # Try bootstrap — should fail + result = runner.invoke(cli, [ + "--db", str(db_path), + "bootstrap", str(proj), + "--id", "myproj", "--name", "X", "-y", + ]) + assert result.exit_code == 1 + assert "already exists" in result.output diff --git a/tests/test_cli.py b/tests/test_cli.py new file mode 100644 index 0000000..b19551b --- /dev/null +++ b/tests/test_cli.py @@ -0,0 +1,207 @@ +"""Tests for cli/main.py using click's CliRunner with in-memory-like temp DB.""" + +import json +import tempfile +from pathlib import Path + +import pytest +from click.testing import CliRunner + +from cli.main import cli + + +@pytest.fixture +def runner(tmp_path): + """CliRunner that uses a temp DB file.""" + db_path = tmp_path / "test.db" + return CliRunner(), ["--db", str(db_path)] + + +def invoke(runner_tuple, args): + runner, base = runner_tuple + result = runner.invoke(cli, base + args) + return result + + +# -- project -- + +def test_project_add_and_list(runner): + r = invoke(runner, ["project", "add", "vdol", "В долю поперёк", + "~/projects/vdolipoperek", "--tech-stack", '["vue3","nuxt"]']) + assert r.exit_code == 0 + assert "vdol" in r.output + + r = invoke(runner, ["project", "list"]) + assert r.exit_code == 0 + assert "vdol" in r.output + assert "В долю поперёк" in r.output + + +def test_project_list_empty(runner): + r = invoke(runner, ["project", "list"]) + assert r.exit_code == 0 + assert "No projects" in r.output + + +def test_project_list_filter_status(runner): + invoke(runner, ["project", "add", "a", "A", "/a", "--status", "active"]) + invoke(runner, ["project", "add", "b", "B", "/b", "--status", "paused"]) + + r = invoke(runner, ["project", "list", "--status", "active"]) + assert "a" in r.output + assert "b" not in r.output + + +def test_project_show(runner): + invoke(runner, ["project", "add", "vdol", "В долю", "/vdol", + "--tech-stack", '["vue3"]', "--priority", "2"]) + r = invoke(runner, ["project", "show", "vdol"]) + assert r.exit_code == 0 + assert "vue3" in r.output + assert "Priority: 2" in r.output + + +def test_project_show_not_found(runner): + r = invoke(runner, ["project", "show", "nope"]) + assert r.exit_code == 1 + assert "not found" in r.output + + +# -- task -- + +def test_task_add_and_list(runner): + invoke(runner, ["project", "add", "p1", "P1", "/p1"]) + r = invoke(runner, ["task", "add", "p1", "Fix login bug", "--type", "debug"]) + assert r.exit_code == 0 + assert "P1-001" in r.output + + r = invoke(runner, ["task", "add", "p1", "Add search"]) + assert "P1-002" in r.output + + r = invoke(runner, ["task", "list"]) + assert "P1-001" in r.output + assert "P1-002" in r.output + + +def test_task_add_project_not_found(runner): + r = invoke(runner, ["task", "add", "nope", "Some task"]) + assert r.exit_code == 1 + assert "not found" in r.output + + +def test_task_list_filter(runner): + invoke(runner, ["project", "add", "p1", "P1", "/p1"]) + invoke(runner, ["project", "add", "p2", "P2", "/p2"]) + invoke(runner, ["task", "add", "p1", "A"]) + invoke(runner, ["task", "add", "p2", "B"]) + + r = invoke(runner, ["task", "list", "--project", "p1"]) + assert "P1-001" in r.output + assert "P2-001" not in r.output + + +def test_task_show(runner): + invoke(runner, ["project", "add", "p1", "P1", "/p1"]) + invoke(runner, ["task", "add", "p1", "Fix bug", "--type", "debug"]) + r = invoke(runner, ["task", "show", "P1-001"]) + assert r.exit_code == 0 + assert "Fix bug" in r.output + + +def test_task_show_not_found(runner): + r = invoke(runner, ["task", "show", "X-999"]) + assert r.exit_code == 1 + assert "not found" in r.output + + +# -- decision -- + +def test_decision_add_and_list(runner): + invoke(runner, ["project", "add", "p1", "P1", "/p1"]) + r = invoke(runner, ["decision", "add", "p1", "gotcha", + "Safari bug", "position:fixed breaks", + "--category", "ui", "--tags", '["ios","css"]']) + assert r.exit_code == 0 + assert "gotcha" in r.output + + r = invoke(runner, ["decision", "list", "p1"]) + assert "Safari bug" in r.output + + +def test_decision_list_filter(runner): + invoke(runner, ["project", "add", "p1", "P1", "/p1"]) + invoke(runner, ["decision", "add", "p1", "gotcha", "A", "a", "--category", "ui"]) + invoke(runner, ["decision", "add", "p1", "decision", "B", "b", "--category", "arch"]) + + r = invoke(runner, ["decision", "list", "p1", "--type", "gotcha"]) + assert "A" in r.output + assert "B" not in r.output + + +# -- module -- + +def test_module_add_and_list(runner): + invoke(runner, ["project", "add", "p1", "P1", "/p1"]) + r = invoke(runner, ["module", "add", "p1", "search", "frontend", "src/search/", + "--description", "Search UI"]) + assert r.exit_code == 0 + assert "search" in r.output + + r = invoke(runner, ["module", "list", "p1"]) + assert "search" in r.output + assert "Search UI" in r.output + + +# -- status -- + +def test_status_all(runner): + invoke(runner, ["project", "add", "p1", "P1", "/p1"]) + invoke(runner, ["task", "add", "p1", "A"]) + invoke(runner, ["task", "add", "p1", "B"]) + + r = invoke(runner, ["status"]) + assert r.exit_code == 0 + assert "p1" in r.output + assert "2" in r.output # total tasks + + +def test_status_single_project(runner): + invoke(runner, ["project", "add", "p1", "P1", "/p1"]) + invoke(runner, ["task", "add", "p1", "A"]) + + r = invoke(runner, ["status", "p1"]) + assert r.exit_code == 0 + assert "P1-001" in r.output + assert "pending" in r.output + + +def test_status_not_found(runner): + r = invoke(runner, ["status", "nope"]) + assert r.exit_code == 1 + assert "not found" in r.output + + +# -- cost -- + +def test_cost_empty(runner): + r = invoke(runner, ["cost"]) + assert r.exit_code == 0 + assert "No agent runs" in r.output + + +def test_cost_with_data(runner): + invoke(runner, ["project", "add", "p1", "P1", "/p1"]) + # Insert agent log directly via models (no CLI command for this) + from core.db import init_db + from core import models as m + # Re-open the same DB the runner uses + db_path = runner[1][1] + conn = init_db(Path(db_path)) + m.log_agent_run(conn, "p1", "dev", "implement", + cost_usd=0.10, tokens_used=5000) + conn.close() + + r = invoke(runner, ["cost", "--last", "7d"]) + assert r.exit_code == 0 + assert "p1" in r.output + assert "$0.1000" in r.output diff --git a/tests/test_context_builder.py b/tests/test_context_builder.py new file mode 100644 index 0000000..64bf732 --- /dev/null +++ b/tests/test_context_builder.py @@ -0,0 +1,163 @@ +"""Tests for core/context_builder.py — context assembly per role.""" + +import pytest +from core.db import init_db +from core import models +from core.context_builder import build_context, format_prompt + + +@pytest.fixture +def conn(): + c = init_db(":memory:") + # Seed project, modules, decisions, tasks + models.create_project(c, "vdol", "ВДОЛЬ и ПОПЕРЕК", "~/projects/vdolipoperek", + tech_stack=["vue3", "typescript", "nodejs"]) + models.add_module(c, "vdol", "search", "frontend", "src/search/") + models.add_module(c, "vdol", "api", "backend", "src/api/") + models.add_decision(c, "vdol", "gotcha", "Safari bug", + "position:fixed breaks", category="ui", tags=["ios"]) + models.add_decision(c, "vdol", "workaround", "API rate limit", + "10 req/s max", category="api") + models.add_decision(c, "vdol", "convention", "Use WAL mode", + "Always use WAL for SQLite", category="architecture") + models.add_decision(c, "vdol", "decision", "Auth required", + "All endpoints need auth", category="security") + models.create_task(c, "VDOL-001", "vdol", "Fix search filters", + brief={"module": "search", "route_type": "debug"}) + models.create_task(c, "VDOL-002", "vdol", "Add payments", + status="in_progress") + yield c + c.close() + + +class TestBuildContext: + def test_pm_gets_everything(self, conn): + ctx = build_context(conn, "VDOL-001", "pm", "vdol") + assert ctx["task"]["id"] == "VDOL-001" + assert ctx["project"]["id"] == "vdol" + assert len(ctx["modules"]) == 2 + assert len(ctx["decisions"]) == 4 # all decisions + assert len(ctx["active_tasks"]) == 1 # VDOL-002 in_progress + assert "pm" in ctx["available_specialists"] + + def test_architect_gets_all_decisions_and_modules(self, conn): + ctx = build_context(conn, "VDOL-001", "architect", "vdol") + assert len(ctx["modules"]) == 2 + assert len(ctx["decisions"]) == 4 + + def test_debugger_gets_only_gotcha_workaround(self, conn): + ctx = build_context(conn, "VDOL-001", "debugger", "vdol") + types = {d["type"] for d in ctx["decisions"]} + assert types <= {"gotcha", "workaround"} + assert "convention" not in types + assert "decision" not in types + assert ctx["module_hint"] == "search" + + def test_frontend_dev_gets_gotcha_workaround_convention(self, conn): + ctx = build_context(conn, "VDOL-001", "frontend_dev", "vdol") + types = {d["type"] for d in ctx["decisions"]} + assert "gotcha" in types + assert "workaround" in types + assert "convention" in types + assert "decision" not in types # plain decisions excluded + + def test_backend_dev_same_as_frontend(self, conn): + ctx = build_context(conn, "VDOL-001", "backend_dev", "vdol") + types = {d["type"] for d in ctx["decisions"]} + assert types == {"gotcha", "workaround", "convention"} + + def test_reviewer_gets_only_conventions(self, conn): + ctx = build_context(conn, "VDOL-001", "reviewer", "vdol") + types = {d["type"] for d in ctx["decisions"]} + assert types == {"convention"} + + def test_tester_gets_minimal_context(self, conn): + ctx = build_context(conn, "VDOL-001", "tester", "vdol") + assert ctx["task"] is not None + assert ctx["project"] is not None + assert "decisions" not in ctx + assert "modules" not in ctx + + def test_security_gets_security_decisions(self, conn): + ctx = build_context(conn, "VDOL-001", "security", "vdol") + categories = {d.get("category") for d in ctx["decisions"]} + assert categories == {"security"} + + def test_unknown_role_gets_fallback(self, conn): + ctx = build_context(conn, "VDOL-001", "unknown_role", "vdol") + assert "decisions" in ctx + assert len(ctx["decisions"]) > 0 + + +class TestFormatPrompt: + def test_format_with_template(self, conn): + ctx = build_context(conn, "VDOL-001", "debugger", "vdol") + prompt = format_prompt(ctx, "debugger", "You are a debugger. Find bugs.") + assert "You are a debugger" in prompt + assert "VDOL-001" in prompt + assert "Fix search filters" in prompt + assert "vdol" in prompt + assert "vue3" in prompt + + def test_format_includes_decisions(self, conn): + ctx = build_context(conn, "VDOL-001", "debugger", "vdol") + prompt = format_prompt(ctx, "debugger", "Debug this.") + assert "Safari bug" in prompt + assert "API rate limit" in prompt + # Convention should NOT be here (debugger doesn't get it) + assert "WAL mode" not in prompt + + def test_format_pm_includes_specialists(self, conn): + ctx = build_context(conn, "VDOL-001", "pm", "vdol") + prompt = format_prompt(ctx, "pm", "You are PM.") + assert "Available specialists" in prompt + assert "debugger" in prompt + assert "Active tasks" in prompt + assert "VDOL-002" in prompt + + def test_format_with_previous_output(self, conn): + ctx = build_context(conn, "VDOL-001", "tester", "vdol") + ctx["previous_output"] = "Found race condition in useSearch.ts" + prompt = format_prompt(ctx, "tester", "Write tests.") + assert "Previous step output" in prompt + assert "race condition" in prompt + + def test_format_loads_prompt_file(self, conn): + ctx = build_context(conn, "VDOL-001", "pm", "vdol") + prompt = format_prompt(ctx, "pm") # Should load from agents/prompts/pm.md + assert "decompose" in prompt.lower() or "pipeline" in prompt.lower() + + def test_format_missing_prompt_file(self, conn): + ctx = build_context(conn, "VDOL-001", "analyst", "vdol") + prompt = format_prompt(ctx, "analyst") # No analyst.md exists + assert "analyst" in prompt.lower() + + def test_format_includes_language_ru(self, conn): + ctx = build_context(conn, "VDOL-001", "debugger", "vdol") + prompt = format_prompt(ctx, "debugger", "Debug.") + assert "## Language" in prompt + assert "Russian" in prompt + assert "ALWAYS respond in Russian" in prompt + + def test_format_includes_language_en(self, conn): + # Update project language to en + conn.execute("UPDATE projects SET language='en' WHERE id='vdol'") + conn.commit() + ctx = build_context(conn, "VDOL-001", "debugger", "vdol") + prompt = format_prompt(ctx, "debugger", "Debug.") + assert "ALWAYS respond in English" in prompt + + +class TestLanguageInProject: + def test_project_has_language_default(self, conn): + p = models.get_project(conn, "vdol") + assert p["language"] == "ru" + + def test_create_project_with_language(self, conn): + p = models.create_project(conn, "en-proj", "English Project", "/en", + language="en") + assert p["language"] == "en" + + def test_context_carries_language(self, conn): + ctx = build_context(conn, "VDOL-001", "pm", "vdol") + assert ctx["project"]["language"] == "ru" diff --git a/tests/test_followup.py b/tests/test_followup.py new file mode 100644 index 0000000..9bf13c7 --- /dev/null +++ b/tests/test_followup.py @@ -0,0 +1,224 @@ +"""Tests for core/followup.py — follow-up task generation with permission handling.""" + +import json +import pytest +from unittest.mock import patch, MagicMock + +from core.db import init_db +from core import models +from core.followup import ( + generate_followups, resolve_pending_action, + _collect_pipeline_output, _next_task_id, _is_permission_blocked, +) + + +@pytest.fixture +def conn(): + c = init_db(":memory:") + models.create_project(c, "vdol", "ВДОЛЬ", "~/projects/vdolipoperek", + tech_stack=["vue3"], language="ru") + models.create_task(c, "VDOL-001", "vdol", "Security audit", + status="done", brief={"route_type": "security_audit"}) + models.log_agent_run(c, "vdol", "security", "execute", + task_id="VDOL-001", + output_summary=json.dumps({ + "summary": "8 уязвимостей найдено", + "findings": [ + {"severity": "HIGH", "title": "Admin endpoint без auth", + "file": "index.js", "line": 42}, + {"severity": "MEDIUM", "title": "Нет rate limiting на login", + "file": "auth.js", "line": 15}, + ], + }, ensure_ascii=False), + success=True) + yield c + c.close() + + +class TestCollectPipelineOutput: + def test_collects_all_steps(self, conn): + output = _collect_pipeline_output(conn, "VDOL-001") + assert "security" in output + assert "Admin endpoint" in output + + def test_empty_for_no_logs(self, conn): + assert _collect_pipeline_output(conn, "NONEXISTENT") == "" + + +class TestNextTaskId: + def test_increments(self, conn): + assert _next_task_id(conn, "vdol") == "VDOL-002" + + def test_handles_obs_ids(self, conn): + models.create_task(conn, "VDOL-OBS-001", "vdol", "Obsidian task") + assert _next_task_id(conn, "vdol") == "VDOL-002" + + +class TestIsPermissionBlocked: + def test_detects_permission_denied(self): + assert _is_permission_blocked({"title": "Fix X", "brief": "permission denied on write"}) + + def test_detects_manual_application_ru(self): + assert _is_permission_blocked({"title": "Ручное применение фикса для auth.js"}) + + def test_detects_no_write_permission_ru(self): + assert _is_permission_blocked({"title": "X", "brief": "не получили разрешение на запись"}) + + def test_detects_read_only(self): + assert _is_permission_blocked({"title": "Apply manually", "brief": "file is read-only"}) + + def test_normal_item_not_blocked(self): + assert not _is_permission_blocked({"title": "Fix admin auth", "brief": "Add requireAuth"}) + + def test_empty_item(self): + assert not _is_permission_blocked({}) + + +class TestGenerateFollowups: + @patch("agents.runner._run_claude") + def test_creates_followup_tasks(self, mock_claude, conn): + mock_claude.return_value = { + "output": json.dumps([ + {"title": "Fix admin auth", "type": "hotfix", "priority": 2, + "brief": "Add requireAuth to admin endpoints"}, + {"title": "Add rate limiting", "type": "feature", "priority": 4, + "brief": "Rate limit login to 5/15min"}, + ]), + "returncode": 0, + } + + result = generate_followups(conn, "VDOL-001") + + assert len(result["created"]) == 2 + assert len(result["pending_actions"]) == 0 + assert result["created"][0]["id"] == "VDOL-002" + assert result["created"][0]["parent_task_id"] == "VDOL-001" + + @patch("agents.runner._run_claude") + def test_separates_permission_items(self, mock_claude, conn): + mock_claude.return_value = { + "output": json.dumps([ + {"title": "Fix admin auth", "type": "hotfix", "priority": 2, + "brief": "Add requireAuth"}, + {"title": "Ручное применение .dockerignore", + "type": "hotfix", "priority": 3, + "brief": "Не получили разрешение на запись в файл"}, + {"title": "Apply CSP headers manually", + "type": "feature", "priority": 4, + "brief": "Permission denied writing nginx.conf"}, + ]), + "returncode": 0, + } + + result = generate_followups(conn, "VDOL-001") + + assert len(result["created"]) == 1 # Only "Fix admin auth" + assert result["created"][0]["title"] == "Fix admin auth" + assert len(result["pending_actions"]) == 2 + assert result["pending_actions"][0]["type"] == "permission_fix" + assert "options" in result["pending_actions"][0] + assert "rerun" in result["pending_actions"][0]["options"] + + @patch("agents.runner._run_claude") + def test_handles_empty_response(self, mock_claude, conn): + mock_claude.return_value = {"output": "[]", "returncode": 0} + result = generate_followups(conn, "VDOL-001") + assert result["created"] == [] + assert result["pending_actions"] == [] + + @patch("agents.runner._run_claude") + def test_handles_wrapped_response(self, mock_claude, conn): + mock_claude.return_value = { + "output": json.dumps({"tasks": [ + {"title": "Fix X", "priority": 3}, + ]}), + "returncode": 0, + } + result = generate_followups(conn, "VDOL-001") + assert len(result["created"]) == 1 + + @patch("agents.runner._run_claude") + def test_handles_invalid_json(self, mock_claude, conn): + mock_claude.return_value = {"output": "not json", "returncode": 0} + result = generate_followups(conn, "VDOL-001") + assert result["created"] == [] + + def test_no_logs_returns_empty(self, conn): + models.create_task(conn, "VDOL-999", "vdol", "Empty task") + result = generate_followups(conn, "VDOL-999") + assert result["created"] == [] + + def test_nonexistent_task(self, conn): + result = generate_followups(conn, "NOPE") + assert result["created"] == [] + + def test_dry_run(self, conn): + result = generate_followups(conn, "VDOL-001", dry_run=True) + assert len(result["created"]) == 1 + assert result["created"][0]["_dry_run"] is True + + @patch("agents.runner._run_claude") + def test_logs_generation(self, mock_claude, conn): + mock_claude.return_value = { + "output": json.dumps([{"title": "Fix A", "priority": 2}]), + "returncode": 0, + } + generate_followups(conn, "VDOL-001") + + logs = conn.execute( + "SELECT * FROM agent_logs WHERE agent_role='followup_pm'" + ).fetchall() + assert len(logs) == 1 + + @patch("agents.runner._run_claude") + def test_prompt_includes_language(self, mock_claude, conn): + mock_claude.return_value = {"output": "[]", "returncode": 0} + generate_followups(conn, "VDOL-001") + prompt = mock_claude.call_args[0][0] + assert "Russian" in prompt + + +class TestResolvePendingAction: + def test_skip_returns_none(self, conn): + action = {"type": "permission_fix", "original_item": {"title": "X"}} + assert resolve_pending_action(conn, "VDOL-001", action, "skip") is None + + def test_manual_task_creates_task(self, conn): + action = { + "type": "permission_fix", + "original_item": {"title": "Fix .dockerignore", "type": "hotfix", + "priority": 3, "brief": "Create .dockerignore"}, + } + result = resolve_pending_action(conn, "VDOL-001", action, "manual_task") + assert result is not None + assert result["title"] == "Fix .dockerignore" + assert result["parent_task_id"] == "VDOL-001" + assert result["priority"] == 3 + + @patch("agents.runner._run_claude") + def test_rerun_launches_pipeline(self, mock_claude, conn): + mock_claude.return_value = { + "output": json.dumps({"result": "applied fix"}), + "returncode": 0, + } + action = { + "type": "permission_fix", + "original_item": {"title": "Fix X", "type": "frontend_dev", + "brief": "Apply the fix"}, + } + result = resolve_pending_action(conn, "VDOL-001", action, "rerun") + assert "rerun_result" in result + + # Verify --dangerously-skip-permissions was passed + call_args = mock_claude.call_args + cmd = call_args[0][0] if call_args[0] else None + # _run_claude is called with allow_write=True which adds the flag + # Check via the cmd list in subprocess.run mock... but _run_claude + # is mocked at a higher level. Let's check the allow_write param. + # The pipeline calls run_agent with allow_write=True which calls + # _run_claude with allow_write=True + assert result["rerun_result"]["success"] is True + + def test_nonexistent_task(self, conn): + action = {"type": "permission_fix", "original_item": {}} + assert resolve_pending_action(conn, "NOPE", action, "skip") is None diff --git a/tests/test_models.py b/tests/test_models.py new file mode 100644 index 0000000..9982e39 --- /dev/null +++ b/tests/test_models.py @@ -0,0 +1,240 @@ +"""Tests for core/models.py — all functions, in-memory SQLite.""" + +import pytest +from core.db import init_db +from core import models + + +@pytest.fixture +def conn(): + """Fresh in-memory DB for each test.""" + c = init_db(db_path=":memory:") + yield c + c.close() + + +# -- Projects -- + +def test_create_and_get_project(conn): + p = models.create_project(conn, "vdol", "В долю поперёк", "~/projects/vdolipoperek", + tech_stack=["vue3", "nuxt"]) + assert p["id"] == "vdol" + assert p["tech_stack"] == ["vue3", "nuxt"] + assert p["status"] == "active" + + fetched = models.get_project(conn, "vdol") + assert fetched["name"] == "В долю поперёк" + + +def test_get_project_not_found(conn): + assert models.get_project(conn, "nope") is None + + +def test_list_projects_filter(conn): + models.create_project(conn, "a", "A", "/a", status="active") + models.create_project(conn, "b", "B", "/b", status="paused") + models.create_project(conn, "c", "C", "/c", status="active") + + assert len(models.list_projects(conn)) == 3 + assert len(models.list_projects(conn, status="active")) == 2 + assert len(models.list_projects(conn, status="paused")) == 1 + + +def test_update_project(conn): + models.create_project(conn, "x", "X", "/x", priority=5) + updated = models.update_project(conn, "x", priority=1, status="maintenance") + assert updated["priority"] == 1 + assert updated["status"] == "maintenance" + + +def test_update_project_tech_stack_json(conn): + models.create_project(conn, "x", "X", "/x", tech_stack=["python"]) + updated = models.update_project(conn, "x", tech_stack=["python", "fastapi"]) + assert updated["tech_stack"] == ["python", "fastapi"] + + +# -- Tasks -- + +def test_create_and_get_task(conn): + models.create_project(conn, "p1", "P1", "/p1") + t = models.create_task(conn, "P1-001", "p1", "Fix bug", + brief={"summary": "broken login"}) + assert t["id"] == "P1-001" + assert t["brief"] == {"summary": "broken login"} + assert t["status"] == "pending" + + +def test_list_tasks_filters(conn): + models.create_project(conn, "p1", "P1", "/p1") + models.create_project(conn, "p2", "P2", "/p2") + models.create_task(conn, "P1-001", "p1", "Task A", status="pending") + models.create_task(conn, "P1-002", "p1", "Task B", status="done") + models.create_task(conn, "P2-001", "p2", "Task C", status="pending") + + assert len(models.list_tasks(conn)) == 3 + assert len(models.list_tasks(conn, project_id="p1")) == 2 + assert len(models.list_tasks(conn, status="pending")) == 2 + assert len(models.list_tasks(conn, project_id="p1", status="done")) == 1 + + +def test_update_task(conn): + models.create_project(conn, "p1", "P1", "/p1") + models.create_task(conn, "P1-001", "p1", "Task") + updated = models.update_task(conn, "P1-001", status="in_progress", + spec={"steps": [1, 2, 3]}) + assert updated["status"] == "in_progress" + assert updated["spec"] == {"steps": [1, 2, 3]} + assert updated["updated_at"] is not None + + +def test_subtask(conn): + models.create_project(conn, "p1", "P1", "/p1") + models.create_task(conn, "P1-001", "p1", "Parent") + child = models.create_task(conn, "P1-001a", "p1", "Child", + parent_task_id="P1-001") + assert child["parent_task_id"] == "P1-001" + + +# -- Decisions -- + +def test_add_and_get_decisions(conn): + models.create_project(conn, "p1", "P1", "/p1") + d = models.add_decision(conn, "p1", "gotcha", "iOS Safari bottom sheet", + "position:fixed breaks on iOS Safari", + category="ui", tags=["ios-safari", "css"]) + assert d["type"] == "gotcha" + assert d["tags"] == ["ios-safari", "css"] + + results = models.get_decisions(conn, "p1") + assert len(results) == 1 + + +def test_decisions_filter_by_category(conn): + models.create_project(conn, "p1", "P1", "/p1") + models.add_decision(conn, "p1", "decision", "Use WAL", "perf", + category="architecture") + models.add_decision(conn, "p1", "gotcha", "Safari bug", "css", + category="ui") + assert len(models.get_decisions(conn, "p1", category="ui")) == 1 + + +def test_decisions_filter_by_tags(conn): + models.create_project(conn, "p1", "P1", "/p1") + models.add_decision(conn, "p1", "gotcha", "Bug A", "desc", + tags=["safari", "css"]) + models.add_decision(conn, "p1", "gotcha", "Bug B", "desc", + tags=["chrome", "js"]) + models.add_decision(conn, "p1", "gotcha", "Bug C", "desc", + tags=["safari", "js"]) + + assert len(models.get_decisions(conn, "p1", tags=["safari"])) == 2 + assert len(models.get_decisions(conn, "p1", tags=["js"])) == 2 + assert len(models.get_decisions(conn, "p1", tags=["css"])) == 1 + + +def test_decisions_filter_by_types(conn): + models.create_project(conn, "p1", "P1", "/p1") + models.add_decision(conn, "p1", "decision", "A", "a") + models.add_decision(conn, "p1", "gotcha", "B", "b") + models.add_decision(conn, "p1", "workaround", "C", "c") + + assert len(models.get_decisions(conn, "p1", types=["gotcha", "workaround"])) == 2 + + +def test_decisions_limit(conn): + models.create_project(conn, "p1", "P1", "/p1") + for i in range(10): + models.add_decision(conn, "p1", "decision", f"D{i}", f"desc{i}") + assert len(models.get_decisions(conn, "p1", limit=3)) == 3 + + +# -- Modules -- + +def test_add_and_get_modules(conn): + models.create_project(conn, "p1", "P1", "/p1") + m = models.add_module(conn, "p1", "search", "frontend", "src/search/", + description="Search UI", dependencies=["auth"]) + assert m["name"] == "search" + assert m["dependencies"] == ["auth"] + + mods = models.get_modules(conn, "p1") + assert len(mods) == 1 + + +# -- Agent Logs -- + +def test_log_agent_run(conn): + models.create_project(conn, "p1", "P1", "/p1") + log = models.log_agent_run(conn, "p1", "developer", "implement", + tokens_used=5000, model="sonnet", + cost_usd=0.015, duration_seconds=45) + assert log["agent_role"] == "developer" + assert log["cost_usd"] == 0.015 + assert log["success"] == 1 # SQLite boolean + + +# -- Pipelines -- + +def test_create_and_update_pipeline(conn): + models.create_project(conn, "p1", "P1", "/p1") + models.create_task(conn, "P1-001", "p1", "Task") + pipe = models.create_pipeline(conn, "P1-001", "p1", "feature", + [{"step": "architect"}, {"step": "dev"}]) + assert pipe["status"] == "running" + assert pipe["steps"] == [{"step": "architect"}, {"step": "dev"}] + + updated = models.update_pipeline(conn, pipe["id"], status="completed", + total_cost_usd=0.05, total_tokens=10000) + assert updated["status"] == "completed" + assert updated["completed_at"] is not None + + +# -- Support -- + +def test_create_and_list_tickets(conn): + models.create_project(conn, "p1", "P1", "/p1") + t = models.create_ticket(conn, "p1", "telegram_bot", "Не работает поиск", + client_id="tg:12345", classification="bug") + assert t["source"] == "telegram_bot" + assert t["status"] == "new" + + tickets = models.list_tickets(conn, project_id="p1") + assert len(tickets) == 1 + + assert len(models.list_tickets(conn, status="resolved")) == 0 + + +# -- Statistics -- + +def test_project_summary(conn): + models.create_project(conn, "p1", "P1", "/p1") + models.create_task(conn, "P1-001", "p1", "A", status="done") + models.create_task(conn, "P1-002", "p1", "B", status="in_progress") + models.create_task(conn, "P1-003", "p1", "C", status="blocked") + + summary = models.get_project_summary(conn) + assert len(summary) == 1 + s = summary[0] + assert s["total_tasks"] == 3 + assert s["done_tasks"] == 1 + assert s["active_tasks"] == 1 + assert s["blocked_tasks"] == 1 + + +def test_cost_summary(conn): + models.create_project(conn, "p1", "P1", "/p1") + models.log_agent_run(conn, "p1", "dev", "implement", + cost_usd=0.10, tokens_used=5000) + models.log_agent_run(conn, "p1", "reviewer", "review", + cost_usd=0.05, tokens_used=2000) + + costs = models.get_cost_summary(conn, days=1) + assert len(costs) == 1 + assert costs[0]["total_cost_usd"] == pytest.approx(0.15) + assert costs[0]["total_tokens"] == 7000 + assert costs[0]["runs"] == 2 + + +def test_cost_summary_empty(conn): + models.create_project(conn, "p1", "P1", "/p1") + assert models.get_cost_summary(conn, days=7) == [] diff --git a/tests/test_runner.py b/tests/test_runner.py new file mode 100644 index 0000000..f1dd4cd --- /dev/null +++ b/tests/test_runner.py @@ -0,0 +1,276 @@ +"""Tests for agents/runner.py — agent execution with mocked claude CLI.""" + +import json +import pytest +from unittest.mock import patch, MagicMock +from core.db import init_db +from core import models +from agents.runner import run_agent, run_pipeline, _try_parse_json + + +@pytest.fixture +def conn(): + c = init_db(":memory:") + models.create_project(c, "vdol", "ВДОЛЬ", "~/projects/vdolipoperek", + tech_stack=["vue3"]) + models.create_task(c, "VDOL-001", "vdol", "Fix bug", + brief={"route_type": "debug"}) + yield c + c.close() + + +def _mock_claude_success(output_data): + """Create a mock subprocess result with successful claude output.""" + mock = MagicMock() + mock.stdout = json.dumps(output_data) if isinstance(output_data, dict) else output_data + mock.stderr = "" + mock.returncode = 0 + return mock + + +def _mock_claude_failure(error_msg): + mock = MagicMock() + mock.stdout = "" + mock.stderr = error_msg + mock.returncode = 1 + return mock + + +# --------------------------------------------------------------------------- +# run_agent +# --------------------------------------------------------------------------- + +class TestRunAgent: + @patch("agents.runner.subprocess.run") + def test_successful_agent_run(self, mock_run, conn): + mock_run.return_value = _mock_claude_success({ + "result": "Found race condition in useSearch.ts", + "usage": {"total_tokens": 5000}, + "cost_usd": 0.015, + }) + + result = run_agent(conn, "debugger", "VDOL-001", "vdol") + + assert result["success"] is True + assert result["role"] == "debugger" + assert result["model"] == "sonnet" + assert result["duration_seconds"] >= 0 + + # Verify claude was called with right args + call_args = mock_run.call_args + cmd = call_args[0][0] + assert "claude" in cmd[0] + assert "-p" in cmd + assert "--output-format" in cmd + assert "json" in cmd + + @patch("agents.runner.subprocess.run") + def test_failed_agent_run(self, mock_run, conn): + mock_run.return_value = _mock_claude_failure("API error") + + result = run_agent(conn, "debugger", "VDOL-001", "vdol") + + assert result["success"] is False + + # Should be logged in agent_logs + logs = conn.execute("SELECT * FROM agent_logs WHERE task_id='VDOL-001'").fetchall() + assert len(logs) == 1 + assert logs[0]["success"] == 0 + + def test_dry_run_returns_prompt(self, conn): + result = run_agent(conn, "debugger", "VDOL-001", "vdol", dry_run=True) + + assert result["dry_run"] is True + assert result["prompt"] is not None + assert "VDOL-001" in result["prompt"] + assert result["output"] is None + + @patch("agents.runner.subprocess.run") + def test_agent_logs_to_db(self, mock_run, conn): + mock_run.return_value = _mock_claude_success({"result": "ok"}) + + run_agent(conn, "tester", "VDOL-001", "vdol") + + logs = conn.execute("SELECT * FROM agent_logs WHERE agent_role='tester'").fetchall() + assert len(logs) == 1 + assert logs[0]["project_id"] == "vdol" + + @patch("agents.runner.subprocess.run") + def test_full_output_saved_to_db(self, mock_run, conn): + """Bug fix: output_summary must contain the FULL output, not truncated.""" + long_json = json.dumps({ + "result": json.dumps({ + "summary": "Security audit complete", + "findings": [{"title": f"Finding {i}", "severity": "HIGH"} for i in range(50)], + }), + }) + mock = MagicMock() + mock.stdout = long_json + mock.stderr = "" + mock.returncode = 0 + mock_run.return_value = mock + + run_agent(conn, "security", "VDOL-001", "vdol") + + logs = conn.execute("SELECT output_summary FROM agent_logs WHERE agent_role='security'").fetchall() + assert len(logs) == 1 + output = logs[0]["output_summary"] + assert output is not None + assert len(output) > 1000 # Must not be truncated + # Should contain all 50 findings + assert "Finding 49" in output + + @patch("agents.runner.subprocess.run") + def test_dict_output_saved_as_json_string(self, mock_run, conn): + """When claude returns structured JSON, it must be saved as string.""" + mock_run.return_value = _mock_claude_success({ + "result": {"status": "ok", "files": ["a.py", "b.py"]}, + }) + + result = run_agent(conn, "debugger", "VDOL-001", "vdol") + + # output should be a string (JSON serialized), not a dict + assert isinstance(result["raw_output"], str) + + logs = conn.execute("SELECT output_summary FROM agent_logs WHERE agent_role='debugger'").fetchall() + saved = logs[0]["output_summary"] + assert isinstance(saved, str) + assert "a.py" in saved + + @patch("agents.runner.subprocess.run") + def test_previous_output_passed(self, mock_run, conn): + mock_run.return_value = _mock_claude_success({"result": "tests pass"}) + + run_agent(conn, "tester", "VDOL-001", "vdol", + previous_output="Found bug in line 42") + + call_args = mock_run.call_args + prompt = call_args[0][0][2] # -p argument + assert "line 42" in prompt + + +# --------------------------------------------------------------------------- +# run_pipeline +# --------------------------------------------------------------------------- + +class TestRunPipeline: + @patch("agents.runner.subprocess.run") + def test_successful_pipeline(self, mock_run, conn): + mock_run.return_value = _mock_claude_success({"result": "done"}) + + steps = [ + {"role": "debugger", "brief": "find bug"}, + {"role": "tester", "depends_on": "debugger", "brief": "verify"}, + ] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + assert result["steps_completed"] == 2 + assert len(result["results"]) == 2 + + # Pipeline created in DB + pipe = conn.execute("SELECT * FROM pipelines WHERE task_id='VDOL-001'").fetchone() + assert pipe is not None + assert pipe["status"] == "completed" + + # Task updated to review + task = models.get_task(conn, "VDOL-001") + assert task["status"] == "review" + + @patch("agents.runner.subprocess.run") + def test_pipeline_fails_on_step(self, mock_run, conn): + # First step succeeds, second fails + mock_run.side_effect = [ + _mock_claude_success({"result": "found bug"}), + _mock_claude_failure("compilation error"), + ] + + steps = [ + {"role": "debugger", "brief": "find"}, + {"role": "frontend_dev", "brief": "fix"}, + {"role": "tester", "brief": "test"}, + ] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is False + assert result["steps_completed"] == 1 # Only debugger completed + assert "frontend_dev" in result["error"] + + # Pipeline marked as failed + pipe = conn.execute("SELECT * FROM pipelines WHERE task_id='VDOL-001'").fetchone() + assert pipe["status"] == "failed" + + # Task marked as blocked + task = models.get_task(conn, "VDOL-001") + assert task["status"] == "blocked" + + def test_pipeline_dry_run(self, conn): + steps = [ + {"role": "debugger", "brief": "find"}, + {"role": "tester", "brief": "verify"}, + ] + result = run_pipeline(conn, "VDOL-001", steps, dry_run=True) + + assert result["dry_run"] is True + assert result["success"] is True + assert result["steps_completed"] == 2 + + # No pipeline created in DB + pipes = conn.execute("SELECT * FROM pipelines").fetchall() + assert len(pipes) == 0 + + @patch("agents.runner.subprocess.run") + def test_pipeline_chains_output(self, mock_run, conn): + """Output from step N is passed as previous_output to step N+1.""" + call_count = [0] + + def side_effect(*args, **kwargs): + call_count[0] += 1 + if call_count[0] == 1: + return _mock_claude_success({"result": "bug is in line 42"}) + return _mock_claude_success({"result": "test written"}) + + mock_run.side_effect = side_effect + + steps = [ + {"role": "debugger", "brief": "find"}, + {"role": "tester", "brief": "write test"}, + ] + run_pipeline(conn, "VDOL-001", steps) + + # Second call should include first step's output in prompt + second_call = mock_run.call_args_list[1] + prompt = second_call[0][0][2] # -p argument + assert "line 42" in prompt or "bug" in prompt + + def test_pipeline_task_not_found(self, conn): + result = run_pipeline(conn, "NONEXISTENT", [{"role": "debugger"}]) + assert result["success"] is False + assert "not found" in result["error"] + + +# --------------------------------------------------------------------------- +# JSON parsing +# --------------------------------------------------------------------------- + +class TestTryParseJson: + def test_direct_json(self): + assert _try_parse_json('{"a": 1}') == {"a": 1} + + def test_json_in_code_fence(self): + text = 'Some text\n```json\n{"a": 1}\n```\nMore text' + assert _try_parse_json(text) == {"a": 1} + + def test_json_embedded_in_text(self): + text = 'Here is the result: {"status": "ok", "count": 42} and more' + result = _try_parse_json(text) + assert result == {"status": "ok", "count": 42} + + def test_empty_string(self): + assert _try_parse_json("") is None + + def test_no_json(self): + assert _try_parse_json("just plain text") is None + + def test_json_array(self): + assert _try_parse_json('[1, 2, 3]') == [1, 2, 3] diff --git a/web/api.py b/web/api.py new file mode 100644 index 0000000..6536a77 --- /dev/null +++ b/web/api.py @@ -0,0 +1,416 @@ +""" +Kin Web API — FastAPI backend reading ~/.kin/kin.db via core.models. +Run: uvicorn web.api:app --reload --port 8420 +""" + +import subprocess +import sys +from pathlib import Path + +# Ensure project root on sys.path +sys.path.insert(0, str(Path(__file__).parent.parent)) + +from fastapi import FastAPI, HTTPException, Query +from fastapi.middleware.cors import CORSMiddleware +from fastapi.responses import JSONResponse +from pydantic import BaseModel + +from core.db import init_db +from core import models +from agents.bootstrap import ( + detect_tech_stack, detect_modules, extract_decisions_from_claude_md, + find_vault_root, scan_obsidian, save_to_db, +) + +DB_PATH = Path.home() / ".kin" / "kin.db" + +app = FastAPI(title="Kin API", version="0.1.0") + +app.add_middleware( + CORSMiddleware, + allow_origins=["http://localhost:5173", "http://127.0.0.1:5173"], + allow_methods=["*"], + allow_headers=["*"], +) + + +def get_conn(): + return init_db(DB_PATH) + + +# --------------------------------------------------------------------------- +# Projects +# --------------------------------------------------------------------------- + +@app.get("/api/projects") +def list_projects(status: str | None = None): + conn = get_conn() + summary = models.get_project_summary(conn) + if status: + summary = [s for s in summary if s["status"] == status] + conn.close() + return summary + + +@app.get("/api/projects/{project_id}") +def get_project(project_id: str): + conn = get_conn() + p = models.get_project(conn, project_id) + if not p: + conn.close() + raise HTTPException(404, f"Project '{project_id}' not found") + tasks = models.list_tasks(conn, project_id=project_id) + mods = models.get_modules(conn, project_id) + decisions = models.get_decisions(conn, project_id) + conn.close() + return {**p, "tasks": tasks, "modules": mods, "decisions": decisions} + + +class ProjectCreate(BaseModel): + id: str + name: str + path: str + tech_stack: list[str] | None = None + status: str = "active" + priority: int = 5 + + +@app.post("/api/projects") +def create_project(body: ProjectCreate): + conn = get_conn() + if models.get_project(conn, body.id): + conn.close() + raise HTTPException(409, f"Project '{body.id}' already exists") + p = models.create_project( + conn, body.id, body.name, body.path, + tech_stack=body.tech_stack, status=body.status, priority=body.priority, + ) + conn.close() + return p + + +# --------------------------------------------------------------------------- +# Tasks +# --------------------------------------------------------------------------- + +@app.get("/api/tasks/{task_id}") +def get_task(task_id: str): + conn = get_conn() + t = models.get_task(conn, task_id) + conn.close() + if not t: + raise HTTPException(404, f"Task '{task_id}' not found") + return t + + +class TaskCreate(BaseModel): + project_id: str + title: str + priority: int = 5 + route_type: str | None = None + + +@app.post("/api/tasks") +def create_task(body: TaskCreate): + conn = get_conn() + p = models.get_project(conn, body.project_id) + if not p: + conn.close() + raise HTTPException(404, f"Project '{body.project_id}' not found") + # Auto-generate task ID + existing = models.list_tasks(conn, project_id=body.project_id) + prefix = body.project_id.upper() + max_num = 0 + for t in existing: + if t["id"].startswith(prefix + "-"): + try: + num = int(t["id"].split("-", 1)[1]) + max_num = max(max_num, num) + except ValueError: + pass + task_id = f"{prefix}-{max_num + 1:03d}" + brief = {"route_type": body.route_type} if body.route_type else None + t = models.create_task(conn, task_id, body.project_id, body.title, + priority=body.priority, brief=brief) + conn.close() + return t + + +@app.get("/api/tasks/{task_id}/pipeline") +def get_task_pipeline(task_id: str): + """Get agent_logs for a task (pipeline steps).""" + conn = get_conn() + t = models.get_task(conn, task_id) + if not t: + conn.close() + raise HTTPException(404, f"Task '{task_id}' not found") + rows = conn.execute( + """SELECT id, agent_role, action, output_summary, success, + duration_seconds, tokens_used, model, cost_usd, created_at + FROM agent_logs WHERE task_id = ? ORDER BY created_at""", + (task_id,), + ).fetchall() + steps = [dict(r) for r in rows] + conn.close() + return steps + + +@app.get("/api/tasks/{task_id}/full") +def get_task_full(task_id: str): + """Task + pipeline steps + related decisions.""" + conn = get_conn() + t = models.get_task(conn, task_id) + if not t: + conn.close() + raise HTTPException(404, f"Task '{task_id}' not found") + rows = conn.execute( + """SELECT id, agent_role, action, output_summary, success, + duration_seconds, tokens_used, model, cost_usd, created_at + FROM agent_logs WHERE task_id = ? ORDER BY created_at""", + (task_id,), + ).fetchall() + steps = [dict(r) for r in rows] + decisions = models.get_decisions(conn, t["project_id"]) + # Filter to decisions linked to this task + task_decisions = [d for d in decisions if d.get("task_id") == task_id] + conn.close() + return {**t, "pipeline_steps": steps, "related_decisions": task_decisions} + + +class TaskApprove(BaseModel): + decision_title: str | None = None + decision_description: str | None = None + decision_type: str = "decision" + create_followups: bool = False + + +@app.post("/api/tasks/{task_id}/approve") +def approve_task(task_id: str, body: TaskApprove | None = None): + """Approve a task: set status=done, optionally add decision and create follow-ups.""" + from core.followup import generate_followups + + conn = get_conn() + t = models.get_task(conn, task_id) + if not t: + conn.close() + raise HTTPException(404, f"Task '{task_id}' not found") + models.update_task(conn, task_id, status="done") + decision = None + if body and body.decision_title: + decision = models.add_decision( + conn, t["project_id"], body.decision_type, + body.decision_title, body.decision_description or body.decision_title, + task_id=task_id, + ) + followup_tasks = [] + pending_actions = [] + if body and body.create_followups: + result = generate_followups(conn, task_id) + followup_tasks = result["created"] + pending_actions = result["pending_actions"] + conn.close() + return { + "status": "done", + "decision": decision, + "followup_tasks": followup_tasks, + "needs_decision": len(pending_actions) > 0, + "pending_actions": pending_actions, + } + + +class ResolveAction(BaseModel): + action: dict + choice: str # "rerun" | "manual_task" | "skip" + + +@app.post("/api/tasks/{task_id}/resolve") +def resolve_action(task_id: str, body: ResolveAction): + """Resolve a pending permission action from follow-up generation.""" + from core.followup import resolve_pending_action + + if body.choice not in ("rerun", "manual_task", "skip"): + raise HTTPException(400, f"Invalid choice: {body.choice}") + conn = get_conn() + t = models.get_task(conn, task_id) + if not t: + conn.close() + raise HTTPException(404, f"Task '{task_id}' not found") + result = resolve_pending_action(conn, task_id, body.action, body.choice) + conn.close() + return {"choice": body.choice, "result": result} + + +class TaskReject(BaseModel): + reason: str + + +@app.post("/api/tasks/{task_id}/reject") +def reject_task(task_id: str, body: TaskReject): + """Reject a task: set status=pending with reason in review field.""" + conn = get_conn() + t = models.get_task(conn, task_id) + if not t: + conn.close() + raise HTTPException(404, f"Task '{task_id}' not found") + models.update_task(conn, task_id, status="pending", review={"rejected": body.reason}) + conn.close() + return {"status": "pending", "reason": body.reason} + + +@app.get("/api/tasks/{task_id}/running") +def is_task_running(task_id: str): + """Check if task has an active (running) pipeline.""" + conn = get_conn() + t = models.get_task(conn, task_id) + if not t: + conn.close() + raise HTTPException(404, f"Task '{task_id}' not found") + row = conn.execute( + "SELECT id, status FROM pipelines WHERE task_id = ? ORDER BY created_at DESC LIMIT 1", + (task_id,), + ).fetchone() + conn.close() + if row and row["status"] == "running": + return {"running": True, "pipeline_id": row["id"]} + return {"running": False} + + +@app.post("/api/tasks/{task_id}/run") +def run_task(task_id: str): + """Launch pipeline for a task in background. Returns 202.""" + conn = get_conn() + t = models.get_task(conn, task_id) + if not t: + conn.close() + raise HTTPException(404, f"Task '{task_id}' not found") + # Set task to in_progress immediately so UI updates + models.update_task(conn, task_id, status="in_progress") + conn.close() + # Launch kin run in background subprocess + kin_root = Path(__file__).parent.parent + try: + proc = subprocess.Popen( + [sys.executable, "-m", "cli.main", "--db", str(DB_PATH), + "run", task_id], + cwd=str(kin_root), + stdout=subprocess.DEVNULL, + ) + import logging + logging.getLogger("kin").info(f"Pipeline started for {task_id}, pid={proc.pid}") + except Exception as e: + raise HTTPException(500, f"Failed to start pipeline: {e}") + return JSONResponse({"status": "started", "task_id": task_id}, status_code=202) + + +# --------------------------------------------------------------------------- +# Decisions +# --------------------------------------------------------------------------- + +@app.get("/api/decisions") +def list_decisions( + project: str = Query(...), + category: str | None = None, + tag: list[str] | None = Query(None), + type: list[str] | None = Query(None), +): + conn = get_conn() + decisions = models.get_decisions( + conn, project, category=category, tags=tag, types=type, + ) + conn.close() + return decisions + + +class DecisionCreate(BaseModel): + project_id: str + type: str + title: str + description: str + category: str | None = None + tags: list[str] | None = None + task_id: str | None = None + + +@app.post("/api/decisions") +def create_decision(body: DecisionCreate): + conn = get_conn() + p = models.get_project(conn, body.project_id) + if not p: + conn.close() + raise HTTPException(404, f"Project '{body.project_id}' not found") + d = models.add_decision( + conn, body.project_id, body.type, body.title, body.description, + category=body.category, tags=body.tags, task_id=body.task_id, + ) + conn.close() + return d + + +# --------------------------------------------------------------------------- +# Cost +# --------------------------------------------------------------------------- + +@app.get("/api/cost") +def cost_summary(days: int = 7): + conn = get_conn() + costs = models.get_cost_summary(conn, days=days) + conn.close() + return costs + + +# --------------------------------------------------------------------------- +# Support +# --------------------------------------------------------------------------- + +@app.get("/api/support/tickets") +def list_tickets(project: str | None = None, status: str | None = None): + conn = get_conn() + tickets = models.list_tickets(conn, project_id=project, status=status) + conn.close() + return tickets + + +# --------------------------------------------------------------------------- +# Bootstrap +# --------------------------------------------------------------------------- + +class BootstrapRequest(BaseModel): + path: str + id: str + name: str + vault_path: str | None = None + + +@app.post("/api/bootstrap") +def bootstrap(body: BootstrapRequest): + project_path = Path(body.path).expanduser().resolve() + if not project_path.is_dir(): + raise HTTPException(400, f"Path '{body.path}' is not a directory") + + conn = get_conn() + if models.get_project(conn, body.id): + conn.close() + raise HTTPException(409, f"Project '{body.id}' already exists") + + tech_stack = detect_tech_stack(project_path) + modules = detect_modules(project_path) + decisions = extract_decisions_from_claude_md(project_path, body.id, body.name) + + obsidian = None + vault_root = find_vault_root(Path(body.vault_path) if body.vault_path else None) + if vault_root: + dir_name = project_path.name + obs = scan_obsidian(vault_root, body.id, body.name, dir_name) + if obs["tasks"] or obs["decisions"]: + obsidian = obs + + save_to_db(conn, body.id, body.name, str(project_path), + tech_stack, modules, decisions, obsidian) + p = models.get_project(conn, body.id) + conn.close() + return { + "project": p, + "modules_count": len(modules), + "decisions_count": len(decisions) + len((obsidian or {}).get("decisions", [])), + "tasks_count": len((obsidian or {}).get("tasks", [])), + } diff --git a/web/frontend/.gitignore b/web/frontend/.gitignore new file mode 100644 index 0000000..a547bf3 --- /dev/null +++ b/web/frontend/.gitignore @@ -0,0 +1,24 @@ +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* +lerna-debug.log* + +node_modules +dist +dist-ssr +*.local + +# Editor directories and files +.vscode/* +!.vscode/extensions.json +.idea +.DS_Store +*.suo +*.ntvs* +*.njsproj +*.sln +*.sw? diff --git a/web/frontend/index.html b/web/frontend/index.html new file mode 100644 index 0000000..b634d37 --- /dev/null +++ b/web/frontend/index.html @@ -0,0 +1,12 @@ + + + + + + Kin + + +
+ + + diff --git a/web/frontend/package-lock.json b/web/frontend/package-lock.json new file mode 100644 index 0000000..06f1b0a --- /dev/null +++ b/web/frontend/package-lock.json @@ -0,0 +1,2345 @@ +{ + "name": "frontend", + "version": "0.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "frontend", + "version": "0.0.0", + "dependencies": { + "vue": "^3.5.30", + "vue-router": "^4.6.4" + }, + "devDependencies": { + "@types/node": "^24.12.0", + "@vitejs/plugin-vue": "^6.0.5", + "@vue/tsconfig": "^0.9.0", + "autoprefixer": "^10.4.27", + "postcss": "^8.5.8", + "tailwindcss": "^3.4.19", + "typescript": "~5.9.3", + "vite": "^8.0.0", + "vue-tsc": "^3.2.5" + } + }, + "node_modules/@alloc/quick-lru": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/@alloc/quick-lru/-/quick-lru-5.2.0.tgz", + "integrity": "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", + "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/parser": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.29.0.tgz", + "integrity": "sha512-IyDgFV5GeDUVX4YdF/3CPULtVGSXXMLh1xVIgdCgxApktqnQV0r7/8Nqthg+8YLGaAtdyIlo2qIdZrbCv4+7ww==", + "license": "MIT", + "dependencies": { + "@babel/types": "^7.29.0" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/types": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.29.0.tgz", + "integrity": "sha512-LwdZHpScM4Qz8Xw2iKSzS+cfglZzJGvofQICy7W7v4caru4EaAmyUuO6BGrbyQ2mYV11W0U8j5mBhd14dd3B0A==", + "license": "MIT", + "dependencies": { + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.28.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@emnapi/core": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@emnapi/core/-/core-1.9.0.tgz", + "integrity": "sha512-0DQ98G9ZQZOxfUcQn1waV2yS8aWdZ6kJMbYCJB3oUBecjWYO1fqJ+a1DRfPF3O5JEkwqwP1A9QEN/9mYm2Yd0w==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "@emnapi/wasi-threads": "1.2.0", + "tslib": "^2.4.0" + } + }, + "node_modules/@emnapi/runtime": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.9.0.tgz", + "integrity": "sha512-QN75eB0IH2ywSpRpNddCRfQIhmJYBCJ1x5Lb3IscKAL8bMnVAKnRg8dCoXbHzVLLH7P38N2Z3mtulB7W0J0FKw==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/@emnapi/wasi-threads": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@emnapi/wasi-threads/-/wasi-threads-1.2.0.tgz", + "integrity": "sha512-N10dEJNSsUx41Z6pZsXU8FjPjpBEplgH24sfkmITrBED1/U2Esum9F3lfLrMjKHHjmi557zQn7kR9R+XWXu5Rg==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.13", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", + "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.31", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@napi-rs/wasm-runtime": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@napi-rs/wasm-runtime/-/wasm-runtime-1.1.1.tgz", + "integrity": "sha512-p64ah1M1ld8xjWv3qbvFwHiFVWrq1yFvV4f7w+mzaqiR4IlSgkqhcRdHwsGgomwzBH51sRY4NEowLxnaBjcW/A==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "@emnapi/core": "^1.7.1", + "@emnapi/runtime": "^1.7.1", + "@tybys/wasm-util": "^0.10.1" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/Brooooooklyn" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@oxc-project/runtime": { + "version": "0.115.0", + "resolved": "https://registry.npmjs.org/@oxc-project/runtime/-/runtime-0.115.0.tgz", + "integrity": "sha512-Rg8Wlt5dCbXhQnsXPrkOjL1DTSvXLgb2R/KYfnf1/K+R0k6UMLEmbQXPM+kwrWqSmWA2t0B1EtHy2/3zikQpvQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@oxc-project/types": { + "version": "0.115.0", + "resolved": "https://registry.npmjs.org/@oxc-project/types/-/types-0.115.0.tgz", + "integrity": "sha512-4n91DKnebUS4yjUHl2g3/b2T+IUdCfmoZGhmwsovZCDaJSs+QkVAM+0AqqTxHSsHfeiMuueT75cZaZcT/m0pSw==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/Boshen" + } + }, + "node_modules/@rolldown/binding-android-arm64": { + "version": "1.0.0-rc.9", + "resolved": "https://registry.npmjs.org/@rolldown/binding-android-arm64/-/binding-android-arm64-1.0.0-rc.9.tgz", + "integrity": "sha512-lcJL0bN5hpgJfSIz/8PIf02irmyL43P+j1pTCfbD1DbLkmGRuFIA4DD3B3ZOvGqG0XiVvRznbKtN0COQVaKUTg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-darwin-arm64": { + "version": "1.0.0-rc.9", + "resolved": "https://registry.npmjs.org/@rolldown/binding-darwin-arm64/-/binding-darwin-arm64-1.0.0-rc.9.tgz", + "integrity": "sha512-J7Zk3kLYFsLtuH6U+F4pS2sYVzac0qkjcO5QxHS7OS7yZu2LRs+IXo+uvJ/mvpyUljDJ3LROZPoQfgBIpCMhdQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-darwin-x64": { + "version": "1.0.0-rc.9", + "resolved": "https://registry.npmjs.org/@rolldown/binding-darwin-x64/-/binding-darwin-x64-1.0.0-rc.9.tgz", + "integrity": "sha512-iwtmmghy8nhfRGeNAIltcNXzD0QMNaaA5U/NyZc1Ia4bxrzFByNMDoppoC+hl7cDiUq5/1CnFthpT9n+UtfFyg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-freebsd-x64": { + "version": "1.0.0-rc.9", + "resolved": "https://registry.npmjs.org/@rolldown/binding-freebsd-x64/-/binding-freebsd-x64-1.0.0-rc.9.tgz", + "integrity": "sha512-DLFYI78SCiZr5VvdEplsVC2Vx53lnA4/Ga5C65iyldMVaErr86aiqCoNBLl92PXPfDtUYjUh+xFFor40ueNs4Q==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-linux-arm-gnueabihf": { + "version": "1.0.0-rc.9", + "resolved": "https://registry.npmjs.org/@rolldown/binding-linux-arm-gnueabihf/-/binding-linux-arm-gnueabihf-1.0.0-rc.9.tgz", + "integrity": "sha512-CsjTmTwd0Hri6iTw/DRMK7kOZ7FwAkrO4h8YWKoX/kcj833e4coqo2wzIFywtch/8Eb5enQ/lwLM7w6JX1W5RQ==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-linux-arm64-gnu": { + "version": "1.0.0-rc.9", + "resolved": "https://registry.npmjs.org/@rolldown/binding-linux-arm64-gnu/-/binding-linux-arm64-gnu-1.0.0-rc.9.tgz", + "integrity": "sha512-2x9O2JbSPxpxMDhP9Z74mahAStibTlrBMW0520+epJH5sac7/LwZW5Bmg/E6CXuEF53JJFW509uP+lSedaUNxg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-linux-arm64-musl": { + "version": "1.0.0-rc.9", + "resolved": "https://registry.npmjs.org/@rolldown/binding-linux-arm64-musl/-/binding-linux-arm64-musl-1.0.0-rc.9.tgz", + "integrity": "sha512-JA1QRW31ogheAIRhIg9tjMfsYbglXXYGNPLdPEYrwFxdbkQCAzvpSCSHCDWNl4hTtrol8WeboCSEpjdZK8qrCg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-linux-ppc64-gnu": { + "version": "1.0.0-rc.9", + "resolved": "https://registry.npmjs.org/@rolldown/binding-linux-ppc64-gnu/-/binding-linux-ppc64-gnu-1.0.0-rc.9.tgz", + "integrity": "sha512-aOKU9dJheda8Kj8Y3w9gnt9QFOO+qKPAl8SWd7JPHP+Cu0EuDAE5wokQubLzIDQWg2myXq2XhTpOVS07qqvT+w==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-linux-s390x-gnu": { + "version": "1.0.0-rc.9", + "resolved": "https://registry.npmjs.org/@rolldown/binding-linux-s390x-gnu/-/binding-linux-s390x-gnu-1.0.0-rc.9.tgz", + "integrity": "sha512-OalO94fqj7IWRn3VdXWty75jC5dk4C197AWEuMhIpvVv2lw9fiPhud0+bW2ctCxb3YoBZor71QHbY+9/WToadA==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-linux-x64-gnu": { + "version": "1.0.0-rc.9", + "resolved": "https://registry.npmjs.org/@rolldown/binding-linux-x64-gnu/-/binding-linux-x64-gnu-1.0.0-rc.9.tgz", + "integrity": "sha512-cVEl1vZtBsBZna3YMjGXNvnYYrOJ7RzuWvZU0ffvJUexWkukMaDuGhUXn0rjnV0ptzGVkvc+vW9Yqy6h8YX4pg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-linux-x64-musl": { + "version": "1.0.0-rc.9", + "resolved": "https://registry.npmjs.org/@rolldown/binding-linux-x64-musl/-/binding-linux-x64-musl-1.0.0-rc.9.tgz", + "integrity": "sha512-UzYnKCIIc4heAKgI4PZ3dfBGUZefGCJ1TPDuLHoCzgrMYPb5Rv6TLFuYtyM4rWyHM7hymNdsg5ik2C+UD9VDbA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-openharmony-arm64": { + "version": "1.0.0-rc.9", + "resolved": "https://registry.npmjs.org/@rolldown/binding-openharmony-arm64/-/binding-openharmony-arm64-1.0.0-rc.9.tgz", + "integrity": "sha512-+6zoiF+RRyf5cdlFQP7nm58mq7+/2PFaY2DNQeD4B87N36JzfF/l9mdBkkmTvSYcYPE8tMh/o3cRlsx1ldLfog==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-wasm32-wasi": { + "version": "1.0.0-rc.9", + "resolved": "https://registry.npmjs.org/@rolldown/binding-wasm32-wasi/-/binding-wasm32-wasi-1.0.0-rc.9.tgz", + "integrity": "sha512-rgFN6sA/dyebil3YTlL2evvi/M+ivhfnyxec7AccTpRPccno/rPoNlqybEZQBkcbZu8Hy+eqNJCqfBR8P7Pg8g==", + "cpu": [ + "wasm32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "@napi-rs/wasm-runtime": "^1.1.1" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@rolldown/binding-win32-arm64-msvc": { + "version": "1.0.0-rc.9", + "resolved": "https://registry.npmjs.org/@rolldown/binding-win32-arm64-msvc/-/binding-win32-arm64-msvc-1.0.0-rc.9.tgz", + "integrity": "sha512-lHVNUG/8nlF1IQk1C0Ci574qKYyty2goMiPlRqkC5R+3LkXDkL5Dhx8ytbxq35m+pkHVIvIxviD+TWLdfeuadA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-win32-x64-msvc": { + "version": "1.0.0-rc.9", + "resolved": "https://registry.npmjs.org/@rolldown/binding-win32-x64-msvc/-/binding-win32-x64-msvc-1.0.0-rc.9.tgz", + "integrity": "sha512-G0oA4+w1iY5AGi5HcDTxWsoxF509hrFIPB2rduV5aDqS9FtDg1CAfa7V34qImbjfhIcA8C+RekocJZA96EarwQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/pluginutils": { + "version": "1.0.0-rc.2", + "resolved": "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-rc.2.tgz", + "integrity": "sha512-izyXV/v+cHiRfozX62W9htOAvwMo4/bXKDrQ+vom1L1qRuexPock/7VZDAhnpHCLNejd3NJ6hiab+tO0D44Rgw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@tybys/wasm-util": { + "version": "0.10.1", + "resolved": "https://registry.npmjs.org/@tybys/wasm-util/-/wasm-util-0.10.1.tgz", + "integrity": "sha512-9tTaPJLSiejZKx+Bmog4uSubteqTvFrVrURwkmHixBo0G4seD0zUxp98E1DzUBJxLQ3NPwXrGKDiVjwx/DpPsg==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/@types/node": { + "version": "24.12.0", + "resolved": "https://registry.npmjs.org/@types/node/-/node-24.12.0.tgz", + "integrity": "sha512-GYDxsZi3ChgmckRT9HPU0WEhKLP08ev/Yfcq2AstjrDASOYCSXeyjDsHg4v5t4jOj7cyDX3vmprafKlWIG9MXQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "undici-types": "~7.16.0" + } + }, + "node_modules/@vitejs/plugin-vue": { + "version": "6.0.5", + "resolved": "https://registry.npmjs.org/@vitejs/plugin-vue/-/plugin-vue-6.0.5.tgz", + "integrity": "sha512-bL3AxKuQySfk1iGcBsQnoRVexTPJq0Z/ixFVM8OhVJAP6ZXXXLtM7NFKWhLl30Kg7uTBqIaPXbh+nuQCuBDedg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@rolldown/pluginutils": "1.0.0-rc.2" + }, + "engines": { + "node": "^20.19.0 || >=22.12.0" + }, + "peerDependencies": { + "vite": "^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0", + "vue": "^3.2.25" + } + }, + "node_modules/@volar/language-core": { + "version": "2.4.28", + "resolved": "https://registry.npmjs.org/@volar/language-core/-/language-core-2.4.28.tgz", + "integrity": "sha512-w4qhIJ8ZSitgLAkVay6AbcnC7gP3glYM3fYwKV3srj8m494E3xtrCv6E+bWviiK/8hs6e6t1ij1s2Endql7vzQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@volar/source-map": "2.4.28" + } + }, + "node_modules/@volar/source-map": { + "version": "2.4.28", + "resolved": "https://registry.npmjs.org/@volar/source-map/-/source-map-2.4.28.tgz", + "integrity": "sha512-yX2BDBqJkRXfKw8my8VarTyjv48QwxdJtvRgUpNE5erCsgEUdI2DsLbpa+rOQVAJYshY99szEcRDmyHbF10ggQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/@volar/typescript": { + "version": "2.4.28", + "resolved": "https://registry.npmjs.org/@volar/typescript/-/typescript-2.4.28.tgz", + "integrity": "sha512-Ja6yvWrbis2QtN4ClAKreeUZPVYMARDYZl9LMEv1iQ1QdepB6wn0jTRxA9MftYmYa4DQ4k/DaSZpFPUfxl8giw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@volar/language-core": "2.4.28", + "path-browserify": "^1.0.1", + "vscode-uri": "^3.0.8" + } + }, + "node_modules/@vue/compiler-core": { + "version": "3.5.30", + "resolved": "https://registry.npmjs.org/@vue/compiler-core/-/compiler-core-3.5.30.tgz", + "integrity": "sha512-s3DfdZkcu/qExZ+td75015ljzHc6vE+30cFMGRPROYjqkroYI5NV2X1yAMX9UeyBNWB9MxCfPcsjpLS11nzkkw==", + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.29.0", + "@vue/shared": "3.5.30", + "entities": "^7.0.1", + "estree-walker": "^2.0.2", + "source-map-js": "^1.2.1" + } + }, + "node_modules/@vue/compiler-dom": { + "version": "3.5.30", + "resolved": "https://registry.npmjs.org/@vue/compiler-dom/-/compiler-dom-3.5.30.tgz", + "integrity": "sha512-eCFYESUEVYHhiMuK4SQTldO3RYxyMR/UQL4KdGD1Yrkfdx4m/HYuZ9jSfPdA+nWJY34VWndiYdW/wZXyiPEB9g==", + "license": "MIT", + "dependencies": { + "@vue/compiler-core": "3.5.30", + "@vue/shared": "3.5.30" + } + }, + "node_modules/@vue/compiler-sfc": { + "version": "3.5.30", + "resolved": "https://registry.npmjs.org/@vue/compiler-sfc/-/compiler-sfc-3.5.30.tgz", + "integrity": "sha512-LqmFPDn89dtU9vI3wHJnwaV6GfTRD87AjWpTWpyrdVOObVtjIuSeZr181z5C4PmVx/V3j2p+0f7edFKGRMpQ5A==", + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.29.0", + "@vue/compiler-core": "3.5.30", + "@vue/compiler-dom": "3.5.30", + "@vue/compiler-ssr": "3.5.30", + "@vue/shared": "3.5.30", + "estree-walker": "^2.0.2", + "magic-string": "^0.30.21", + "postcss": "^8.5.8", + "source-map-js": "^1.2.1" + } + }, + "node_modules/@vue/compiler-ssr": { + "version": "3.5.30", + "resolved": "https://registry.npmjs.org/@vue/compiler-ssr/-/compiler-ssr-3.5.30.tgz", + "integrity": "sha512-NsYK6OMTnx109PSL2IAyf62JP6EUdk4Dmj6AkWcJGBvN0dQoMYtVekAmdqgTtWQgEJo+Okstbf/1p7qZr5H+bA==", + "license": "MIT", + "dependencies": { + "@vue/compiler-dom": "3.5.30", + "@vue/shared": "3.5.30" + } + }, + "node_modules/@vue/devtools-api": { + "version": "6.6.4", + "resolved": "https://registry.npmjs.org/@vue/devtools-api/-/devtools-api-6.6.4.tgz", + "integrity": "sha512-sGhTPMuXqZ1rVOk32RylztWkfXTRhuS7vgAKv0zjqk8gbsHkJ7xfFf+jbySxt7tWObEJwyKaHMikV/WGDiQm8g==", + "license": "MIT" + }, + "node_modules/@vue/language-core": { + "version": "3.2.5", + "resolved": "https://registry.npmjs.org/@vue/language-core/-/language-core-3.2.5.tgz", + "integrity": "sha512-d3OIxN/+KRedeM5wQ6H6NIpwS3P5gC9nmyaHgBk+rO6dIsjY+tOh4UlPpiZbAh3YtLdCGEX4M16RmsBqPmJV+g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@volar/language-core": "2.4.28", + "@vue/compiler-dom": "^3.5.0", + "@vue/shared": "^3.5.0", + "alien-signals": "^3.0.0", + "muggle-string": "^0.4.1", + "path-browserify": "^1.0.1", + "picomatch": "^4.0.2" + } + }, + "node_modules/@vue/reactivity": { + "version": "3.5.30", + "resolved": "https://registry.npmjs.org/@vue/reactivity/-/reactivity-3.5.30.tgz", + "integrity": "sha512-179YNgKATuwj9gB+66snskRDOitDiuOZqkYia7mHKJaidOMo/WJxHKF8DuGc4V4XbYTJANlfEKb0yxTQotnx4Q==", + "license": "MIT", + "dependencies": { + "@vue/shared": "3.5.30" + } + }, + "node_modules/@vue/runtime-core": { + "version": "3.5.30", + "resolved": "https://registry.npmjs.org/@vue/runtime-core/-/runtime-core-3.5.30.tgz", + "integrity": "sha512-e0Z+8PQsUTdwV8TtEsLzUM7SzC7lQwYKePydb7K2ZnmS6jjND+WJXkmmfh/swYzRyfP1EY3fpdesyYoymCzYfg==", + "license": "MIT", + "dependencies": { + "@vue/reactivity": "3.5.30", + "@vue/shared": "3.5.30" + } + }, + "node_modules/@vue/runtime-dom": { + "version": "3.5.30", + "resolved": "https://registry.npmjs.org/@vue/runtime-dom/-/runtime-dom-3.5.30.tgz", + "integrity": "sha512-2UIGakjU4WSQ0T4iwDEW0W7vQj6n7AFn7taqZ9Cvm0Q/RA2FFOziLESrDL4GmtI1wV3jXg5nMoJSYO66egDUBw==", + "license": "MIT", + "dependencies": { + "@vue/reactivity": "3.5.30", + "@vue/runtime-core": "3.5.30", + "@vue/shared": "3.5.30", + "csstype": "^3.2.3" + } + }, + "node_modules/@vue/server-renderer": { + "version": "3.5.30", + "resolved": "https://registry.npmjs.org/@vue/server-renderer/-/server-renderer-3.5.30.tgz", + "integrity": "sha512-v+R34icapydRwbZRD0sXwtHqrQJv38JuMB4JxbOxd8NEpGLny7cncMp53W9UH/zo4j8eDHjQ1dEJXwzFQknjtQ==", + "license": "MIT", + "dependencies": { + "@vue/compiler-ssr": "3.5.30", + "@vue/shared": "3.5.30" + }, + "peerDependencies": { + "vue": "3.5.30" + } + }, + "node_modules/@vue/shared": { + "version": "3.5.30", + "resolved": "https://registry.npmjs.org/@vue/shared/-/shared-3.5.30.tgz", + "integrity": "sha512-YXgQ7JjaO18NeK2K9VTbDHaFy62WrObMa6XERNfNOkAhD1F1oDSf3ZJ7K6GqabZ0BvSDHajp8qfS5Sa2I9n8uQ==", + "license": "MIT" + }, + "node_modules/@vue/tsconfig": { + "version": "0.9.0", + "resolved": "https://registry.npmjs.org/@vue/tsconfig/-/tsconfig-0.9.0.tgz", + "integrity": "sha512-RP+v9Cpbsk1ZVXltCHHkYBr7+624x6gcijJXVjIcsYk7JXqvIpRtMwU2ARLvWDhmy9ffdFYxhsfJnPztADBohQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "typescript": "5.x", + "vue": "^3.4.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + }, + "vue": { + "optional": true + } + } + }, + "node_modules/alien-signals": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/alien-signals/-/alien-signals-3.1.2.tgz", + "integrity": "sha512-d9dYqZTS90WLiU0I5c6DHj/HcKkF8ZyGN3G5x8wSbslulz70KOxaqCT0hQCo9KOyhVqzqGojvNdJXoTumZOtcw==", + "dev": true, + "license": "MIT" + }, + "node_modules/any-promise": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz", + "integrity": "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==", + "dev": true, + "license": "MIT" + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dev": true, + "license": "ISC", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/anymatch/node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/arg": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz", + "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==", + "dev": true, + "license": "MIT" + }, + "node_modules/autoprefixer": { + "version": "10.4.27", + "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.27.tgz", + "integrity": "sha512-NP9APE+tO+LuJGn7/9+cohklunJsXWiaWEfV3si4Gi/XHDwVNgkwr1J3RQYFIvPy76GmJ9/bW8vyoU1LcxwKHA==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/autoprefixer" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "browserslist": "^4.28.1", + "caniuse-lite": "^1.0.30001774", + "fraction.js": "^5.3.4", + "picocolors": "^1.1.1", + "postcss-value-parser": "^4.2.0" + }, + "bin": { + "autoprefixer": "bin/autoprefixer" + }, + "engines": { + "node": "^10 || ^12 || >=14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/baseline-browser-mapping": { + "version": "2.10.8", + "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.10.8.tgz", + "integrity": "sha512-PCLz/LXGBsNTErbtB6i5u4eLpHeMfi93aUv5duMmj6caNu6IphS4q6UevDnL36sZQv9lrP11dbPKGMaXPwMKfQ==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "baseline-browser-mapping": "dist/cli.cjs" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browserslist": { + "version": "4.28.1", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.1.tgz", + "integrity": "sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "baseline-browser-mapping": "^2.9.0", + "caniuse-lite": "^1.0.30001759", + "electron-to-chromium": "^1.5.263", + "node-releases": "^2.0.27", + "update-browserslist-db": "^1.2.0" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/camelcase-css": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz", + "integrity": "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001779", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001779.tgz", + "integrity": "sha512-U5og2PN7V4DMgF50YPNtnZJGWVLFjjsN3zb6uMT5VGYIewieDj1upwfuVNXf4Kor+89c3iCRJnSzMD5LmTvsfA==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/chokidar": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/chokidar/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/commander": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", + "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + "dev": true, + "license": "MIT", + "bin": { + "cssesc": "bin/cssesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/csstype": { + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz", + "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==", + "license": "MIT" + }, + "node_modules/detect-libc": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.1.2.tgz", + "integrity": "sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=8" + } + }, + "node_modules/didyoumean": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/didyoumean/-/didyoumean-1.2.2.tgz", + "integrity": "sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/dlv": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/dlv/-/dlv-1.1.3.tgz", + "integrity": "sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==", + "dev": true, + "license": "MIT" + }, + "node_modules/electron-to-chromium": { + "version": "1.5.313", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.313.tgz", + "integrity": "sha512-QBMrTWEf00GXZmJyx2lbYD45jpI3TUFnNIzJ5BBc8piGUDwMPa1GV6HJWTZVvY/eiN3fSopl7NRbgGp9sZ9LTA==", + "dev": true, + "license": "ISC" + }, + "node_modules/entities": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/entities/-/entities-7.0.1.tgz", + "integrity": "sha512-TWrgLOFUQTH994YUyl1yT4uyavY5nNB5muff+RtWaqNVCAK408b5ZnnbNAUEWLTCpum9w6arT70i1XdQ4UeOPA==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/estree-walker": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz", + "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==", + "license": "MIT" + }, + "node_modules/fast-glob": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz", + "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.8" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-glob/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fastq": { + "version": "1.20.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.20.1.tgz", + "integrity": "sha512-GGToxJ/w1x32s/D2EKND7kTil4n8OVk/9mycTc4VDza13lOvpUZTGX3mFSCtV9ksdGBVzvsyAVLM6mHFThxXxw==", + "dev": true, + "license": "ISC", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/fraction.js": { + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-5.3.4.tgz", + "integrity": "sha512-1X1NTtiJphryn/uLQz3whtY6jK3fTqoE3ohKs0tT+Ujr1W59oopxmoEh7Lu5p6vBaPbgoM0bzveAW4Qi5RyWDQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": "*" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/rawify" + } + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dev": true, + "license": "MIT", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-core-module": { + "version": "2.16.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", + "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", + "dev": true, + "license": "MIT", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/jiti": { + "version": "1.21.7", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.7.tgz", + "integrity": "sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A==", + "dev": true, + "license": "MIT", + "bin": { + "jiti": "bin/jiti.js" + } + }, + "node_modules/lightningcss": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss/-/lightningcss-1.32.0.tgz", + "integrity": "sha512-NXYBzinNrblfraPGyrbPoD19C1h9lfI/1mzgWYvXUTe414Gz/X1FD2XBZSZM7rRTrMA8JL3OtAaGifrIKhQ5yQ==", + "dev": true, + "license": "MPL-2.0", + "dependencies": { + "detect-libc": "^2.0.3" + }, + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + }, + "optionalDependencies": { + "lightningcss-android-arm64": "1.32.0", + "lightningcss-darwin-arm64": "1.32.0", + "lightningcss-darwin-x64": "1.32.0", + "lightningcss-freebsd-x64": "1.32.0", + "lightningcss-linux-arm-gnueabihf": "1.32.0", + "lightningcss-linux-arm64-gnu": "1.32.0", + "lightningcss-linux-arm64-musl": "1.32.0", + "lightningcss-linux-x64-gnu": "1.32.0", + "lightningcss-linux-x64-musl": "1.32.0", + "lightningcss-win32-arm64-msvc": "1.32.0", + "lightningcss-win32-x64-msvc": "1.32.0" + } + }, + "node_modules/lightningcss-android-arm64": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-android-arm64/-/lightningcss-android-arm64-1.32.0.tgz", + "integrity": "sha512-YK7/ClTt4kAK0vo6w3X+Pnm0D2cf2vPHbhOXdoNti1Ga0al1P4TBZhwjATvjNwLEBCnKvjJc2jQgHXH0NEwlAg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-darwin-arm64": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-darwin-arm64/-/lightningcss-darwin-arm64-1.32.0.tgz", + "integrity": "sha512-RzeG9Ju5bag2Bv1/lwlVJvBE3q6TtXskdZLLCyfg5pt+HLz9BqlICO7LZM7VHNTTn/5PRhHFBSjk5lc4cmscPQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-darwin-x64": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-darwin-x64/-/lightningcss-darwin-x64-1.32.0.tgz", + "integrity": "sha512-U+QsBp2m/s2wqpUYT/6wnlagdZbtZdndSmut/NJqlCcMLTWp5muCrID+K5UJ6jqD2BFshejCYXniPDbNh73V8w==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-freebsd-x64": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-freebsd-x64/-/lightningcss-freebsd-x64-1.32.0.tgz", + "integrity": "sha512-JCTigedEksZk3tHTTthnMdVfGf61Fky8Ji2E4YjUTEQX14xiy/lTzXnu1vwiZe3bYe0q+SpsSH/CTeDXK6WHig==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm-gnueabihf": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm-gnueabihf/-/lightningcss-linux-arm-gnueabihf-1.32.0.tgz", + "integrity": "sha512-x6rnnpRa2GL0zQOkt6rts3YDPzduLpWvwAF6EMhXFVZXD4tPrBkEFqzGowzCsIWsPjqSK+tyNEODUBXeeVHSkw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm64-gnu": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-gnu/-/lightningcss-linux-arm64-gnu-1.32.0.tgz", + "integrity": "sha512-0nnMyoyOLRJXfbMOilaSRcLH3Jw5z9HDNGfT/gwCPgaDjnx0i8w7vBzFLFR1f6CMLKF8gVbebmkUN3fa/kQJpQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm64-musl": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-musl/-/lightningcss-linux-arm64-musl-1.32.0.tgz", + "integrity": "sha512-UpQkoenr4UJEzgVIYpI80lDFvRmPVg6oqboNHfoH4CQIfNA+HOrZ7Mo7KZP02dC6LjghPQJeBsvXhJod/wnIBg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-x64-gnu": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-gnu/-/lightningcss-linux-x64-gnu-1.32.0.tgz", + "integrity": "sha512-V7Qr52IhZmdKPVr+Vtw8o+WLsQJYCTd8loIfpDaMRWGUZfBOYEJeyJIkqGIDMZPwPx24pUMfwSxxI8phr/MbOA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-x64-musl": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-musl/-/lightningcss-linux-x64-musl-1.32.0.tgz", + "integrity": "sha512-bYcLp+Vb0awsiXg/80uCRezCYHNg1/l3mt0gzHnWV9XP1W5sKa5/TCdGWaR/zBM2PeF/HbsQv/j2URNOiVuxWg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-win32-arm64-msvc": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-win32-arm64-msvc/-/lightningcss-win32-arm64-msvc-1.32.0.tgz", + "integrity": "sha512-8SbC8BR40pS6baCM8sbtYDSwEVQd4JlFTOlaD3gWGHfThTcABnNDBda6eTZeqbofalIJhFx0qKzgHJmcPTnGdw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-win32-x64-msvc": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-win32-x64-msvc/-/lightningcss-win32-x64-msvc-1.32.0.tgz", + "integrity": "sha512-Amq9B/SoZYdDi1kFrojnoqPLxYhQ4Wo5XiL8EVJrVsB8ARoC1PWW6VGtT0WKCemjy8aC+louJnjS7U18x3b06Q==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lilconfig": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.3.tgz", + "integrity": "sha512-/vlFKAoH5Cgt3Ie+JLhRbwOsCQePABiU3tJ1egGvyQ+33R/vcwM2Zl2QR/LzjsBeItPt3oSVXapn+m4nQDvpzw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/antonk52" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + "dev": true, + "license": "MIT" + }, + "node_modules/magic-string": { + "version": "0.30.21", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.21.tgz", + "integrity": "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==", + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.5" + } + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "dev": true, + "license": "MIT", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/micromatch/node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/muggle-string": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/muggle-string/-/muggle-string-0.4.1.tgz", + "integrity": "sha512-VNTrAak/KhO2i8dqqnqnAHOa3cYBwXEZe9h+D5h/1ZqFSTEFHdM65lR7RoIqq3tBBYavsOXV84NoHXZ0AkPyqQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/mz": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz", + "integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "any-promise": "^1.0.0", + "object-assign": "^4.0.1", + "thenify-all": "^1.0.0" + } + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/node-releases": { + "version": "2.0.36", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.36.tgz", + "integrity": "sha512-TdC8FSgHz8Mwtw9g5L4gR/Sh9XhSP/0DEkQxfEFXOpiul5IiHgHan2VhYYb6agDSfp4KuvltmGApc8HMgUrIkA==", + "dev": true, + "license": "MIT" + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-hash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/object-hash/-/object-hash-3.0.0.tgz", + "integrity": "sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/path-browserify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-browserify/-/path-browserify-1.0.1.tgz", + "integrity": "sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g==", + "dev": true, + "license": "MIT" + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "dev": true, + "license": "MIT" + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pify": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", + "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/pirates": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.7.tgz", + "integrity": "sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/postcss": { + "version": "8.5.8", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.8.tgz", + "integrity": "sha512-OW/rX8O/jXnm82Ey1k44pObPtdblfiuWnrd8X7GJ7emImCOstunGbXUpp7HdBrFQX6rJzn3sPT397Wp5aCwCHg==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/postcss-import": { + "version": "15.1.0", + "resolved": "https://registry.npmjs.org/postcss-import/-/postcss-import-15.1.0.tgz", + "integrity": "sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew==", + "dev": true, + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.0.0", + "read-cache": "^1.0.0", + "resolve": "^1.1.7" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "postcss": "^8.0.0" + } + }, + "node_modules/postcss-js": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/postcss-js/-/postcss-js-4.1.0.tgz", + "integrity": "sha512-oIAOTqgIo7q2EOwbhb8UalYePMvYoIeRY2YKntdpFQXNosSu3vLrniGgmH9OKs/qAkfoj5oB3le/7mINW1LCfw==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "camelcase-css": "^2.0.1" + }, + "engines": { + "node": "^12 || ^14 || >= 16" + }, + "peerDependencies": { + "postcss": "^8.4.21" + } + }, + "node_modules/postcss-load-config": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-6.0.1.tgz", + "integrity": "sha512-oPtTM4oerL+UXmx+93ytZVN82RrlY/wPUV8IeDxFrzIjXOLF1pN+EmKPLbubvKHT2HC20xXsCAH2Z+CKV6Oz/g==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "lilconfig": "^3.1.1" + }, + "engines": { + "node": ">= 18" + }, + "peerDependencies": { + "jiti": ">=1.21.0", + "postcss": ">=8.0.9", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "jiti": { + "optional": true + }, + "postcss": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, + "node_modules/postcss-nested": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.2.0.tgz", + "integrity": "sha512-HQbt28KulC5AJzG+cZtj9kvKB93CFCdLvog1WFLf1D+xmMvPGlBstkpTEZfK5+AN9hfJocyBFCNiqyS48bpgzQ==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "postcss-selector-parser": "^6.1.1" + }, + "engines": { + "node": ">=12.0" + }, + "peerDependencies": { + "postcss": "^8.2.14" + } + }, + "node_modules/postcss-selector-parser": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.1.2.tgz", + "integrity": "sha512-Q8qQfPiZ+THO/3ZrOrO0cJJKfpYCagtMUkXbnEfmgUjwXg6z/WBeOyS9APBBPCTSiDV+s4SwQGu8yFsiMRIudg==", + "dev": true, + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-value-parser": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", + "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/read-cache": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/read-cache/-/read-cache-1.0.0.tgz", + "integrity": "sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==", + "dev": true, + "license": "MIT", + "dependencies": { + "pify": "^2.3.0" + } + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/readdirp/node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/resolve": { + "version": "1.22.11", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.11.tgz", + "integrity": "sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-core-module": "^2.16.1", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/reusify": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz", + "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==", + "dev": true, + "license": "MIT", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rolldown": { + "version": "1.0.0-rc.9", + "resolved": "https://registry.npmjs.org/rolldown/-/rolldown-1.0.0-rc.9.tgz", + "integrity": "sha512-9EbgWge7ZH+yqb4d2EnELAntgPTWbfL8ajiTW+SyhJEC4qhBbkCKbqFV4Ge4zmu5ziQuVbWxb/XwLZ+RIO7E8Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@oxc-project/types": "=0.115.0", + "@rolldown/pluginutils": "1.0.0-rc.9" + }, + "bin": { + "rolldown": "bin/cli.mjs" + }, + "engines": { + "node": "^20.19.0 || >=22.12.0" + }, + "optionalDependencies": { + "@rolldown/binding-android-arm64": "1.0.0-rc.9", + "@rolldown/binding-darwin-arm64": "1.0.0-rc.9", + "@rolldown/binding-darwin-x64": "1.0.0-rc.9", + "@rolldown/binding-freebsd-x64": "1.0.0-rc.9", + "@rolldown/binding-linux-arm-gnueabihf": "1.0.0-rc.9", + "@rolldown/binding-linux-arm64-gnu": "1.0.0-rc.9", + "@rolldown/binding-linux-arm64-musl": "1.0.0-rc.9", + "@rolldown/binding-linux-ppc64-gnu": "1.0.0-rc.9", + "@rolldown/binding-linux-s390x-gnu": "1.0.0-rc.9", + "@rolldown/binding-linux-x64-gnu": "1.0.0-rc.9", + "@rolldown/binding-linux-x64-musl": "1.0.0-rc.9", + "@rolldown/binding-openharmony-arm64": "1.0.0-rc.9", + "@rolldown/binding-wasm32-wasi": "1.0.0-rc.9", + "@rolldown/binding-win32-arm64-msvc": "1.0.0-rc.9", + "@rolldown/binding-win32-x64-msvc": "1.0.0-rc.9" + } + }, + "node_modules/rolldown/node_modules/@rolldown/pluginutils": { + "version": "1.0.0-rc.9", + "resolved": "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-rc.9.tgz", + "integrity": "sha512-w6oiRWgEBl04QkFZgmW+jnU1EC9b57Oihi2ot3HNWIQRqgHp5PnYDia5iZ5FF7rpa4EQdiqMDXjlqKGXBhsoXw==", + "dev": true, + "license": "MIT" + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/sucrase": { + "version": "3.35.1", + "resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.35.1.tgz", + "integrity": "sha512-DhuTmvZWux4H1UOnWMB3sk0sbaCVOoQZjv8u1rDoTV0HTdGem9hkAZtl4JZy8P2z4Bg0nT+YMeOFyVr4zcG5Tw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.2", + "commander": "^4.0.0", + "lines-and-columns": "^1.1.6", + "mz": "^2.7.0", + "pirates": "^4.0.1", + "tinyglobby": "^0.2.11", + "ts-interface-checker": "^0.1.9" + }, + "bin": { + "sucrase": "bin/sucrase", + "sucrase-node": "bin/sucrase-node" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/tailwindcss": { + "version": "3.4.19", + "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.19.tgz", + "integrity": "sha512-3ofp+LL8E+pK/JuPLPggVAIaEuhvIz4qNcf3nA1Xn2o/7fb7s/TYpHhwGDv1ZU3PkBluUVaF8PyCHcm48cKLWQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@alloc/quick-lru": "^5.2.0", + "arg": "^5.0.2", + "chokidar": "^3.6.0", + "didyoumean": "^1.2.2", + "dlv": "^1.1.3", + "fast-glob": "^3.3.2", + "glob-parent": "^6.0.2", + "is-glob": "^4.0.3", + "jiti": "^1.21.7", + "lilconfig": "^3.1.3", + "micromatch": "^4.0.8", + "normalize-path": "^3.0.0", + "object-hash": "^3.0.0", + "picocolors": "^1.1.1", + "postcss": "^8.4.47", + "postcss-import": "^15.1.0", + "postcss-js": "^4.0.1", + "postcss-load-config": "^4.0.2 || ^5.0 || ^6.0", + "postcss-nested": "^6.2.0", + "postcss-selector-parser": "^6.1.2", + "resolve": "^1.22.8", + "sucrase": "^3.35.0" + }, + "bin": { + "tailwind": "lib/cli.js", + "tailwindcss": "lib/cli.js" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/thenify": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.1.tgz", + "integrity": "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==", + "dev": true, + "license": "MIT", + "dependencies": { + "any-promise": "^1.0.0" + } + }, + "node_modules/thenify-all": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz", + "integrity": "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==", + "dev": true, + "license": "MIT", + "dependencies": { + "thenify": ">= 3.1.0 < 4" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/tinyglobby": { + "version": "0.2.15", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", + "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.3" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/ts-interface-checker": { + "version": "0.1.13", + "resolved": "https://registry.npmjs.org/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz", + "integrity": "sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "dev": true, + "license": "0BSD", + "optional": true + }, + "node_modules/typescript": { + "version": "5.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "devOptional": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/undici-types": { + "version": "7.16.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-7.16.0.tgz", + "integrity": "sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==", + "dev": true, + "license": "MIT" + }, + "node_modules/update-browserslist-db": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.3.tgz", + "integrity": "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "escalade": "^3.2.0", + "picocolors": "^1.1.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "dev": true, + "license": "MIT" + }, + "node_modules/vite": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/vite/-/vite-8.0.0.tgz", + "integrity": "sha512-fPGaRNj9Zytaf8LEiBhY7Z6ijnFKdzU/+mL8EFBaKr7Vw1/FWcTBAMW0wLPJAGMPX38ZPVCVgLceWiEqeoqL2Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@oxc-project/runtime": "0.115.0", + "lightningcss": "^1.32.0", + "picomatch": "^4.0.3", + "postcss": "^8.5.8", + "rolldown": "1.0.0-rc.9", + "tinyglobby": "^0.2.15" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^20.19.0 || >=22.12.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^20.19.0 || >=22.12.0", + "@vitejs/devtools": "^0.0.0-alpha.31", + "esbuild": "^0.27.0", + "jiti": ">=1.21.0", + "less": "^4.0.0", + "sass": "^1.70.0", + "sass-embedded": "^1.70.0", + "stylus": ">=0.54.8", + "sugarss": "^5.0.0", + "terser": "^5.16.0", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "@vitejs/devtools": { + "optional": true + }, + "esbuild": { + "optional": true + }, + "jiti": { + "optional": true + }, + "less": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, + "node_modules/vscode-uri": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/vscode-uri/-/vscode-uri-3.1.0.tgz", + "integrity": "sha512-/BpdSx+yCQGnCvecbyXdxHDkuk55/G3xwnC0GqY4gmQ3j+A+g8kzzgB4Nk/SINjqn6+waqw3EgbVF2QKExkRxQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/vue": { + "version": "3.5.30", + "resolved": "https://registry.npmjs.org/vue/-/vue-3.5.30.tgz", + "integrity": "sha512-hTHLc6VNZyzzEH/l7PFGjpcTvUgiaPK5mdLkbjrTeWSRcEfxFrv56g/XckIYlE9ckuobsdwqd5mk2g1sBkMewg==", + "license": "MIT", + "dependencies": { + "@vue/compiler-dom": "3.5.30", + "@vue/compiler-sfc": "3.5.30", + "@vue/runtime-dom": "3.5.30", + "@vue/server-renderer": "3.5.30", + "@vue/shared": "3.5.30" + }, + "peerDependencies": { + "typescript": "*" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/vue-router": { + "version": "4.6.4", + "resolved": "https://registry.npmjs.org/vue-router/-/vue-router-4.6.4.tgz", + "integrity": "sha512-Hz9q5sa33Yhduglwz6g9skT8OBPii+4bFn88w6J+J4MfEo4KRRpmiNG/hHHkdbRFlLBOqxN8y8gf2Fb0MTUgVg==", + "license": "MIT", + "dependencies": { + "@vue/devtools-api": "^6.6.4" + }, + "funding": { + "url": "https://github.com/sponsors/posva" + }, + "peerDependencies": { + "vue": "^3.5.0" + } + }, + "node_modules/vue-tsc": { + "version": "3.2.5", + "resolved": "https://registry.npmjs.org/vue-tsc/-/vue-tsc-3.2.5.tgz", + "integrity": "sha512-/htfTCMluQ+P2FISGAooul8kO4JMheOTCbCy4M6dYnYYjqLe3BExZudAua6MSIKSFYQtFOYAll7XobYwcpokGA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@volar/typescript": "2.4.28", + "@vue/language-core": "3.2.5" + }, + "bin": { + "vue-tsc": "bin/vue-tsc.js" + }, + "peerDependencies": { + "typescript": ">=5.0.0" + } + } + } +} diff --git a/web/frontend/package.json b/web/frontend/package.json new file mode 100644 index 0000000..203c214 --- /dev/null +++ b/web/frontend/package.json @@ -0,0 +1,26 @@ +{ + "name": "frontend", + "private": true, + "version": "0.0.0", + "type": "module", + "scripts": { + "dev": "vite", + "build": "vue-tsc -b && vite build", + "preview": "vite preview" + }, + "dependencies": { + "vue": "^3.5.30", + "vue-router": "^4.6.4" + }, + "devDependencies": { + "@types/node": "^24.12.0", + "@vitejs/plugin-vue": "^6.0.5", + "@vue/tsconfig": "^0.9.0", + "autoprefixer": "^10.4.27", + "postcss": "^8.5.8", + "tailwindcss": "^3.4.19", + "typescript": "~5.9.3", + "vite": "^8.0.0", + "vue-tsc": "^3.2.5" + } +} diff --git a/web/frontend/postcss.config.js b/web/frontend/postcss.config.js new file mode 100644 index 0000000..2e7af2b --- /dev/null +++ b/web/frontend/postcss.config.js @@ -0,0 +1,6 @@ +export default { + plugins: { + tailwindcss: {}, + autoprefixer: {}, + }, +} diff --git a/web/frontend/src/App.vue b/web/frontend/src/App.vue new file mode 100644 index 0000000..4ebce21 --- /dev/null +++ b/web/frontend/src/App.vue @@ -0,0 +1,16 @@ + + + diff --git a/web/frontend/src/api.ts b/web/frontend/src/api.ts new file mode 100644 index 0000000..89afee3 --- /dev/null +++ b/web/frontend/src/api.ts @@ -0,0 +1,132 @@ +const BASE = 'http://localhost:8420/api' + +async function get(path: string): Promise { + const res = await fetch(`${BASE}${path}`) + if (!res.ok) throw new Error(`${res.status} ${res.statusText}`) + return res.json() +} + +async function post(path: string, body: unknown): Promise { + const res = await fetch(`${BASE}${path}`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(body), + }) + if (!res.ok) throw new Error(`${res.status} ${res.statusText}`) + return res.json() +} + +export interface Project { + id: string + name: string + path: string + status: string + priority: number + tech_stack: string[] | null + created_at: string + total_tasks: number + done_tasks: number + active_tasks: number + blocked_tasks: number + review_tasks: number +} + +export interface ProjectDetail extends Project { + tasks: Task[] + modules: Module[] + decisions: Decision[] +} + +export interface Task { + id: string + project_id: string + title: string + status: string + priority: number + assigned_role: string | null + parent_task_id: string | null + brief: Record | null + spec: Record | null + created_at: string + updated_at: string +} + +export interface Decision { + id: number + project_id: string + task_id: string | null + type: string + category: string | null + title: string + description: string + tags: string[] | null + created_at: string +} + +export interface Module { + id: number + project_id: string + name: string + type: string + path: string + description: string | null + owner_role: string | null + dependencies: string[] | null +} + +export interface PipelineStep { + id: number + agent_role: string + action: string + output_summary: string | null + success: boolean | number + duration_seconds: number | null + tokens_used: number | null + model: string | null + cost_usd: number | null + created_at: string +} + +export interface TaskFull extends Task { + pipeline_steps: PipelineStep[] + related_decisions: Decision[] +} + +export interface PendingAction { + type: string + description: string + original_item: Record + options: string[] +} + +export interface CostEntry { + project_id: string + project_name: string + runs: number + total_tokens: number + total_cost_usd: number + total_duration_seconds: number +} + +export const api = { + projects: () => get('/projects'), + project: (id: string) => get(`/projects/${id}`), + task: (id: string) => get(`/tasks/${id}`), + taskFull: (id: string) => get(`/tasks/${id}/full`), + taskPipeline: (id: string) => get(`/tasks/${id}/pipeline`), + cost: (days = 7) => get(`/cost?days=${days}`), + createProject: (data: { id: string; name: string; path: string; tech_stack?: string[]; priority?: number }) => + post('/projects', data), + createTask: (data: { project_id: string; title: string; priority?: number; route_type?: string }) => + post('/tasks', data), + approveTask: (id: string, data?: { decision_title?: string; decision_description?: string; decision_type?: string; create_followups?: boolean }) => + post<{ status: string; followup_tasks: Task[]; needs_decision: boolean; pending_actions: PendingAction[] }>(`/tasks/${id}/approve`, data || {}), + resolveAction: (id: string, action: PendingAction, choice: string) => + post<{ choice: string; result: unknown }>(`/tasks/${id}/resolve`, { action, choice }), + rejectTask: (id: string, reason: string) => + post<{ status: string }>(`/tasks/${id}/reject`, { reason }), + runTask: (id: string) => + post<{ status: string }>(`/tasks/${id}/run`, {}), + bootstrap: (data: { path: string; id: string; name: string }) => + post<{ project: Project }>('/bootstrap', data), +} diff --git a/web/frontend/src/components/Badge.vue b/web/frontend/src/components/Badge.vue new file mode 100644 index 0000000..f321109 --- /dev/null +++ b/web/frontend/src/components/Badge.vue @@ -0,0 +1,19 @@ + + + diff --git a/web/frontend/src/components/Modal.vue b/web/frontend/src/components/Modal.vue new file mode 100644 index 0000000..9724468 --- /dev/null +++ b/web/frontend/src/components/Modal.vue @@ -0,0 +1,18 @@ + + + diff --git a/web/frontend/src/main.ts b/web/frontend/src/main.ts new file mode 100644 index 0000000..91cc08d --- /dev/null +++ b/web/frontend/src/main.ts @@ -0,0 +1,18 @@ +import { createApp } from 'vue' +import { createRouter, createWebHistory } from 'vue-router' +import './style.css' +import App from './App.vue' +import Dashboard from './views/Dashboard.vue' +import ProjectView from './views/ProjectView.vue' +import TaskDetail from './views/TaskDetail.vue' + +const router = createRouter({ + history: createWebHistory(), + routes: [ + { path: '/', component: Dashboard }, + { path: '/project/:id', component: ProjectView, props: true }, + { path: '/task/:id', component: TaskDetail, props: true }, + ], +}) + +createApp(App).use(router).mount('#app') diff --git a/web/frontend/src/style.css b/web/frontend/src/style.css new file mode 100644 index 0000000..580a718 --- /dev/null +++ b/web/frontend/src/style.css @@ -0,0 +1,8 @@ +@tailwind base; +@tailwind components; +@tailwind utilities; + +body { + @apply bg-gray-950 text-gray-100; + font-family: ui-monospace, 'SF Mono', Consolas, monospace; +} diff --git a/web/frontend/src/views/Dashboard.vue b/web/frontend/src/views/Dashboard.vue new file mode 100644 index 0000000..e8fc54a --- /dev/null +++ b/web/frontend/src/views/Dashboard.vue @@ -0,0 +1,187 @@ + + + diff --git a/web/frontend/src/views/ProjectView.vue b/web/frontend/src/views/ProjectView.vue new file mode 100644 index 0000000..06e608f --- /dev/null +++ b/web/frontend/src/views/ProjectView.vue @@ -0,0 +1,332 @@ + + + diff --git a/web/frontend/src/views/TaskDetail.vue b/web/frontend/src/views/TaskDetail.vue new file mode 100644 index 0000000..70b6503 --- /dev/null +++ b/web/frontend/src/views/TaskDetail.vue @@ -0,0 +1,351 @@ + + + diff --git a/web/frontend/tailwind.config.js b/web/frontend/tailwind.config.js new file mode 100644 index 0000000..6e2abc5 --- /dev/null +++ b/web/frontend/tailwind.config.js @@ -0,0 +1,10 @@ +/** @type {import('tailwindcss').Config} */ +export default { + darkMode: 'class', + content: ["./index.html", "./src/**/*.{vue,ts}"], + theme: { + extend: {}, + }, + plugins: [], +} + diff --git a/web/frontend/tsconfig.app.json b/web/frontend/tsconfig.app.json new file mode 100644 index 0000000..8d16e42 --- /dev/null +++ b/web/frontend/tsconfig.app.json @@ -0,0 +1,16 @@ +{ + "extends": "@vue/tsconfig/tsconfig.dom.json", + "compilerOptions": { + "tsBuildInfoFile": "./node_modules/.tmp/tsconfig.app.tsbuildinfo", + "types": ["vite/client"], + + /* Linting */ + "strict": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "erasableSyntaxOnly": true, + "noFallthroughCasesInSwitch": true, + "noUncheckedSideEffectImports": true + }, + "include": ["src/**/*.ts", "src/**/*.tsx", "src/**/*.vue"] +} diff --git a/web/frontend/tsconfig.json b/web/frontend/tsconfig.json new file mode 100644 index 0000000..1ffef60 --- /dev/null +++ b/web/frontend/tsconfig.json @@ -0,0 +1,7 @@ +{ + "files": [], + "references": [ + { "path": "./tsconfig.app.json" }, + { "path": "./tsconfig.node.json" } + ] +} diff --git a/web/frontend/tsconfig.node.json b/web/frontend/tsconfig.node.json new file mode 100644 index 0000000..8a67f62 --- /dev/null +++ b/web/frontend/tsconfig.node.json @@ -0,0 +1,26 @@ +{ + "compilerOptions": { + "tsBuildInfoFile": "./node_modules/.tmp/tsconfig.node.tsbuildinfo", + "target": "ES2023", + "lib": ["ES2023"], + "module": "ESNext", + "types": ["node"], + "skipLibCheck": true, + + /* Bundler mode */ + "moduleResolution": "bundler", + "allowImportingTsExtensions": true, + "verbatimModuleSyntax": true, + "moduleDetection": "force", + "noEmit": true, + + /* Linting */ + "strict": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "erasableSyntaxOnly": true, + "noFallthroughCasesInSwitch": true, + "noUncheckedSideEffectImports": true + }, + "include": ["vite.config.ts"] +} diff --git a/web/frontend/vite.config.ts b/web/frontend/vite.config.ts new file mode 100644 index 0000000..bbcf80c --- /dev/null +++ b/web/frontend/vite.config.ts @@ -0,0 +1,7 @@ +import { defineConfig } from 'vite' +import vue from '@vitejs/plugin-vue' + +// https://vite.dev/config/ +export default defineConfig({ + plugins: [vue()], +})