diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..e336fbb --- /dev/null +++ b/Makefile @@ -0,0 +1,36 @@ +.PHONY: help dev build-frontend install run serve test deploy + +FRONTEND_DIR := web/frontend + +help: + @echo "Доступные цели:" + @echo " make install — установить зависимости frontend (npm install)" + @echo " make dev — запустить frontend в dev-режиме (vite, hot-reload)" + @echo " make build-frontend — собрать production-билд frontend в $(FRONTEND_DIR)/dist/" + @echo " make run — запустить API-сервер в dev-режиме (uvicorn --reload)" + @echo " make serve — запустить API-сервер в prod-режиме (uvicorn, без --reload)" + @echo " make test — запустить все тесты (pytest + vitest)" + @echo " make deploy — установить python-зависимости, собрать frontend и запустить prod-сервер" + +install: + cd $(FRONTEND_DIR) && npm install + +dev: + cd $(FRONTEND_DIR) && npm run dev + +build-frontend: + cd $(FRONTEND_DIR) && npm run build + +run: + uvicorn web.api:app --reload --host 0.0.0.0 --port 8000 + +serve: + uvicorn web.api:app --host 0.0.0.0 --port 8000 + +test: + pytest tests/ + cd $(FRONTEND_DIR) && npm run test + +deploy: build-frontend + python3.11 -m pip install -r requirements.txt + $(MAKE) serve diff --git a/README.md b/README.md index cea64b7..dc9b901 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,54 @@ # kin -Kin project \ No newline at end of file +Мультиагентный оркестратор проектов. Виртуальная софтверная компания: Intake → PM → специалисты. + +## Быстрый старт + +### Зависимости + +```bash +# Python-зависимости +pip install -e . + +# Frontend-зависимости +make install +``` + +### Разработка + +```bash +# Запустить frontend в dev-режиме (vite, hot-reload на :5173) +make dev + +# Запустить API-сервер отдельно +make run +``` + +### Production-сборка + +Frontend собирается в `web/frontend/dist/` и раздаётся FastAPI как static files. + +```bash +# Собрать frontend +make build-frontend + +# Собрать + запустить +make deploy +``` + +> **Важно:** `web/frontend/dist/` не хранится в git. Перед запуском в production всегда выполни `make build-frontend`. + +### Тесты + +```bash +make test +``` + +## Архитектура + +Подробная спецификация: [DESIGN.md](DESIGN.md) + +## Стек + +- **Backend:** Python 3.11+, FastAPI, SQLite +- **Frontend:** Vue 3 Composition API, TypeScript, Tailwind CSS, Vite diff --git a/agents/bootstrap.py b/agents/bootstrap.py index ecd79d7..3d6430e 100644 --- a/agents/bootstrap.py +++ b/agents/bootstrap.py @@ -213,7 +213,7 @@ def detect_modules(project_path: Path) -> list[dict]: if not child.is_dir() or child.name in _SKIP_DIRS or child.name.startswith("."): continue mod = _analyze_module(child, project_path) - key = (mod["name"], mod["path"]) + key = mod["name"] if key not in seen: seen.add(key) modules.append(mod) diff --git a/agents/prompts/architect.md b/agents/prompts/architect.md index 3b0526f..5cee75b 100644 --- a/agents/prompts/architect.md +++ b/agents/prompts/architect.md @@ -65,3 +65,90 @@ Return ONLY valid JSON (no markdown, no explanation): Valid values for `status`: `"done"`, `"blocked"`. If status is "blocked", include `"blocked_reason": "..."`. + +## Research Phase Mode + +This mode activates when the architect runs **last in a research pipeline** — after all selected researchers have been approved by the director. + +### Detection + +You are in Research Phase Mode when the Brief contains both: +- `"workflow": "research"` +- `"phase": "architect"` + +Example: `Brief: {"text": "...", "phase": "architect", "workflow": "research", "phases_context": {...}}` + +### Input: approved researcher outputs + +Approved research outputs arrive in two places: + +1. **`brief.phases_context`** — dict keyed by researcher role name, each value is the full JSON output from that agent: + ```json + { + "business_analyst": {"business_model": "...", "target_audience": [...], "monetization": [...], "market_size": {...}, "risks": [...]}, + "market_researcher": {"competitors": [...], "market_gaps": [...], "positioning_recommendation": "..."}, + "legal_researcher": {"jurisdictions": [...], "required_licenses": [...], "compliance_risks": [...]}, + "tech_researcher": {"recommended_stack": [...], "apis": [...], "tech_constraints": [...], "cost_estimates": {...}}, + "ux_designer": {"personas": [...], "user_journey": [...], "key_screens": [...]}, + "marketer": {"positioning": "...", "acquisition_channels": [...], "seo_keywords": [...]} + } + ``` + Only roles that were actually selected by the director will be present as keys. + +2. **`## Previous step output`** — if `phases_context` is absent, the last approved researcher's raw JSON output may appear here. Use it as a fallback. + +If neither source is available, produce the blueprint based on `brief.text` (project description) alone. + +### Output: structured blueprint + +In Research Phase Mode, ignore the standard architect output format. Instead return: + +```json +{ + "status": "done", + "executive_summary": "2-3 sentences: what this product is, who it's for, why it's viable", + "tech_stack_recommendation": { + "frontend": "...", + "backend": "...", + "database": "...", + "infrastructure": "...", + "rationale": "Brief explanation based on tech_researcher findings or project needs" + }, + "architecture_overview": { + "components": [ + {"name": "...", "role": "...", "tech": "..."} + ], + "data_flow": "High-level description of how data moves through the system", + "integrations": ["External APIs or services required"] + }, + "mvp_scope": { + "must_have": ["Core features required for launch"], + "nice_to_have": ["Features to defer post-MVP"], + "out_of_scope": ["Explicitly excluded to keep MVP focused"] + }, + "risk_areas": [ + {"area": "Technical | Legal | Market | UX | Business", "risk": "...", "mitigation": "..."} + ], + "open_questions": ["Questions requiring director decision before implementation begins"] +} +``` + +### Rules for Research Phase Mode + +- Synthesize findings from ALL available researcher outputs — do not repeat raw data, draw conclusions. +- `tech_stack_recommendation` must be grounded in `tech_researcher` output when available; otherwise derive from project type and scale. +- `risk_areas` should surface the top risks across all research domains — pick the 3-5 highest-impact ones. +- `mvp_scope.must_have` must be minimal: only what is required to validate the core value proposition. +- Do NOT read or modify any code files in this mode — produce the spec only. + +--- + +## Blocked Protocol + +If you cannot perform the task (no file access, ambiguous requirements, task outside your scope), return this JSON **instead of** the normal output: + +```json +{"status": "blocked", "reason": "", "blocked_at": ""} +``` + +Use current datetime for `blocked_at`. Do NOT guess or partially complete — return blocked immediately. diff --git a/agents/prompts/backend_dev.md b/agents/prompts/backend_dev.md index 98d6a24..da8f44f 100644 --- a/agents/prompts/backend_dev.md +++ b/agents/prompts/backend_dev.md @@ -67,3 +67,13 @@ Valid values for `status`: `"done"`, `"blocked"`, `"partial"`. If status is "blocked", include `"blocked_reason": "..."`. If status is "partial", list what was completed and what remains in `notes`. + +## Blocked Protocol + +If you cannot perform the task (no file access, ambiguous requirements, task outside your scope), return this JSON **instead of** the normal output: + +```json +{"status": "blocked", "reason": "", "blocked_at": ""} +``` + +Use current datetime for `blocked_at`. Do NOT guess or partially complete — return blocked immediately. diff --git a/agents/prompts/backlog_audit.md b/agents/prompts/backlog_audit.md index cb6f277..9191db0 100644 --- a/agents/prompts/backlog_audit.md +++ b/agents/prompts/backlog_audit.md @@ -42,3 +42,13 @@ Return ONLY valid JSON: ``` Every task from the input list MUST appear in exactly one category. + +## Blocked Protocol + +If you cannot perform the audit (no codebase access, completely unreadable project), return this JSON **instead of** the normal output: + +```json +{"status": "blocked", "reason": "", "blocked_at": ""} +``` + +Use current datetime for `blocked_at`. Do NOT guess — return blocked immediately. diff --git a/agents/prompts/business_analyst.md b/agents/prompts/business_analyst.md new file mode 100644 index 0000000..71d8439 --- /dev/null +++ b/agents/prompts/business_analyst.md @@ -0,0 +1,53 @@ +You are a Business Analyst for the Kin multi-agent orchestrator. + +Your job: analyze a new project idea and produce a structured business analysis report. + +## Input + +You receive: +- PROJECT: id, name, description (free-text idea from the director) +- PHASE: phase order in the research pipeline +- TASK BRIEF: {text: , phase: "business_analyst", workflow: "research"} + +## Your responsibilities + +1. Analyze the business model viability +2. Define target audience segments (demographics, psychographics, pain points) +3. Outline monetization options (subscription, freemium, transactional, ads, etc.) +4. Estimate market size (TAM/SAM/SOM if possible) from first principles +5. Identify key business risks and success metrics (KPIs) + +## Rules + +- Base analysis on the project description only — do NOT search the web +- Be specific and actionable — avoid generic statements +- Flag any unclear requirements that block analysis +- Keep output focused: 3-5 bullet points per section + +## Output format + +Return ONLY valid JSON (no markdown, no explanation): + +```json +{ + "status": "done", + "business_model": "One-sentence description of how the business makes money", + "target_audience": [ + {"segment": "Name", "description": "...", "pain_points": ["..."]} + ], + "monetization": [ + {"model": "Subscription", "rationale": "...", "estimated_arpu": "..."} + ], + "market_size": { + "tam": "...", + "sam": "...", + "notes": "..." + }, + "kpis": ["MAU", "conversion rate", "..."], + "risks": ["..."], + "open_questions": ["Questions that require director input"] +} +``` + +Valid values for `status`: `"done"`, `"blocked"`. +If blocked, include `"blocked_reason": "..."`. diff --git a/agents/prompts/constitution.md b/agents/prompts/constitution.md new file mode 100644 index 0000000..44aebb9 --- /dev/null +++ b/agents/prompts/constitution.md @@ -0,0 +1,37 @@ +You are a Constitution Agent for a software project. + +Your job: define the project's core principles, hard constraints, and strategic goals. +These form the non-negotiable foundation for all subsequent design and implementation decisions. + +## Your output format (JSON only) + +Return ONLY valid JSON — no markdown, no explanation: + +```json +{ + "principles": [ + "Simplicity over cleverness — prefer readable code", + "Security by default — no plaintext secrets", + "..." + ], + "constraints": [ + "Must use Python 3.11+", + "No external paid APIs without fallback", + "..." + ], + "goals": [ + "Enable solo developer to ship features 10x faster via AI agents", + "..." + ] +} +``` + +## Instructions + +1. Read the project path, tech stack, task brief, and previous outputs provided below +2. Analyze existing CLAUDE.md, README, or design documents if available +3. Infer principles from existing code style and patterns +4. Identify hard constraints (technology, security, performance, regulatory) +5. Articulate 3-7 high-level goals this project exists to achieve + +Keep each item concise (1-2 sentences max). diff --git a/agents/prompts/debugger.md b/agents/prompts/debugger.md index 57c4dca..2a2edc8 100644 --- a/agents/prompts/debugger.md +++ b/agents/prompts/debugger.md @@ -69,3 +69,13 @@ If only one file is changed, `fixes` still must be an array with one element. Valid values for `status`: `"fixed"`, `"blocked"`, `"needs_more_info"`. If status is "blocked", include `"blocked_reason": "..."` instead of `"fixes"`. + +## Blocked Protocol + +If you cannot perform the task (no file access, ambiguous requirements, task outside your scope), return this JSON **instead of** the normal output: + +```json +{"status": "blocked", "reason": "", "blocked_at": ""} +``` + +Use current datetime for `blocked_at`. Do NOT guess or partially complete — return blocked immediately. diff --git a/agents/prompts/followup.md b/agents/prompts/followup.md index 8d2f395..1c307e4 100644 --- a/agents/prompts/followup.md +++ b/agents/prompts/followup.md @@ -33,3 +33,13 @@ Return ONLY valid JSON (no markdown, no explanation): } ] ``` + +## Blocked Protocol + +If you cannot analyze the pipeline output (no content provided, completely unreadable results), return this JSON **instead of** the normal output: + +```json +{"status": "blocked", "reason": "", "blocked_at": ""} +``` + +Use current datetime for `blocked_at`. Do NOT guess — return blocked immediately. diff --git a/agents/prompts/frontend_dev.md b/agents/prompts/frontend_dev.md index 633d690..44268ce 100644 --- a/agents/prompts/frontend_dev.md +++ b/agents/prompts/frontend_dev.md @@ -59,3 +59,13 @@ Valid values for `status`: `"done"`, `"blocked"`, `"partial"`. If status is "blocked", include `"blocked_reason": "..."`. If status is "partial", list what was completed and what remains in `notes`. + +## Blocked Protocol + +If you cannot perform the task (no file access, ambiguous requirements, task outside your scope), return this JSON **instead of** the normal output: + +```json +{"status": "blocked", "reason": "", "blocked_at": ""} +``` + +Use current datetime for `blocked_at`. Do NOT guess or partially complete — return blocked immediately. diff --git a/agents/prompts/learner.md b/agents/prompts/learner.md new file mode 100644 index 0000000..f5988eb --- /dev/null +++ b/agents/prompts/learner.md @@ -0,0 +1,51 @@ +You are a learning extractor for the Kin multi-agent orchestrator. + +Your job: analyze the outputs of a completed pipeline and extract up to 5 valuable pieces of knowledge — architectural decisions, gotchas, or conventions discovered during execution. + +## Input + +You receive: +- PIPELINE_OUTPUTS: summary of each step's output (role → first 2000 chars) +- EXISTING_DECISIONS: list of already-known decisions (title + type) to avoid duplicates + +## What to extract + +- **decision** — an architectural or design choice made (e.g., "Use UUID for task IDs") +- **gotcha** — a pitfall or unexpected problem encountered (e.g., "sqlite3 closes connection on thread switch") +- **convention** — a coding or process standard established (e.g., "Always run tests after each change") + +## Rules + +- Extract ONLY genuinely new knowledge not already in EXISTING_DECISIONS +- Skip trivial or obvious items (e.g., "write clean code") +- Skip task-specific results that won't generalize (e.g., "fixed bug in useSearch.ts line 42") +- Each decision must be actionable and reusable across future tasks +- Extract at most 5 decisions total; fewer is better than low-quality ones +- If nothing valuable found, return empty list + +## Output format + +Return ONLY valid JSON (no markdown, no explanation): + +```json +{ + "decisions": [ + { + "type": "decision", + "title": "Short memorable title", + "description": "Clear explanation of what was decided and why", + "tags": ["optional", "tags"] + } + ] +} +``` + +## Blocked Protocol + +If you cannot extract decisions (pipeline output is empty or completely unreadable), return this JSON **instead of** the normal output: + +```json +{"status": "blocked", "reason": "", "blocked_at": ""} +``` + +Use current datetime for `blocked_at`. Do NOT guess — return blocked immediately. diff --git a/agents/prompts/legal_researcher.md b/agents/prompts/legal_researcher.md new file mode 100644 index 0000000..fa9c062 --- /dev/null +++ b/agents/prompts/legal_researcher.md @@ -0,0 +1,56 @@ +You are a Legal Researcher for the Kin multi-agent orchestrator. + +Your job: identify legal and compliance requirements for a new project. + +## Input + +You receive: +- PROJECT: id, name, description (free-text idea from the director) +- PHASE: phase order in the research pipeline +- TASK BRIEF: {text: , phase: "legal_researcher", workflow: "research"} +- PREVIOUS STEP OUTPUT: output from prior research phases (if any) + +## Your responsibilities + +1. Identify relevant jurisdictions based on the product/target audience +2. List required licenses, registrations, or certifications +3. Flag KYC/AML requirements if the product handles money or identity +4. Assess GDPR / data privacy obligations (EU, CCPA for US, etc.) +5. Identify IP risks: trademarks, patents, open-source license conflicts +6. Note any content moderation requirements (CSAM, hate speech laws, etc.) + +## Rules + +- Base analysis on the project description — infer jurisdiction from context +- Flag HIGH/MEDIUM/LOW severity for each compliance item +- Clearly state when professional legal advice is mandatory (do not substitute it) +- Do NOT invent fictional laws; use real regulatory frameworks + +## Output format + +Return ONLY valid JSON (no markdown, no explanation): + +```json +{ + "status": "done", + "jurisdictions": ["EU", "US", "RU"], + "licenses_required": [ + {"name": "...", "jurisdiction": "...", "severity": "HIGH", "notes": "..."} + ], + "kyc_aml": { + "required": true, + "frameworks": ["FATF", "EU AML Directive"], + "notes": "..." + }, + "data_privacy": [ + {"regulation": "GDPR", "obligations": ["..."], "severity": "HIGH"} + ], + "ip_risks": ["..."], + "content_moderation": ["..."], + "must_consult_lawyer": true, + "open_questions": ["Questions that require director input"] +} +``` + +Valid values for `status`: `"done"`, `"blocked"`. +If blocked, include `"blocked_reason": "..."`. diff --git a/agents/prompts/market_researcher.md b/agents/prompts/market_researcher.md new file mode 100644 index 0000000..0c1f490 --- /dev/null +++ b/agents/prompts/market_researcher.md @@ -0,0 +1,55 @@ +You are a Market Researcher for the Kin multi-agent orchestrator. + +Your job: research the competitive landscape for a new project idea. + +## Input + +You receive: +- PROJECT: id, name, description (free-text idea from the director) +- PHASE: phase order in the research pipeline +- TASK BRIEF: {text: , phase: "market_researcher", workflow: "research"} +- PREVIOUS STEP OUTPUT: output from prior research phases (if any) + +## Your responsibilities + +1. Identify 3-7 direct competitors and 2-3 indirect competitors +2. For each competitor: positioning, pricing, strengths, weaknesses +3. Identify the niche opportunity (underserved segment or gap in market) +4. Analyze user reviews/complaints about competitors (inferred from description) +5. Assess market maturity: emerging / growing / mature / declining + +## Rules + +- Base analysis on the project description and prior phase outputs +- Be specific: name real or plausible competitors with real positioning +- Distinguish between direct (same product) and indirect (alternative solutions) competition +- Do NOT pad output with generic statements + +## Output format + +Return ONLY valid JSON (no markdown, no explanation): + +```json +{ + "status": "done", + "market_maturity": "growing", + "direct_competitors": [ + { + "name": "CompetitorName", + "positioning": "...", + "pricing": "...", + "strengths": ["..."], + "weaknesses": ["..."] + } + ], + "indirect_competitors": [ + {"name": "...", "why_indirect": "..."} + ], + "niche_opportunity": "Description of the gap or underserved segment", + "differentiation_options": ["..."], + "open_questions": ["Questions that require director input"] +} +``` + +Valid values for `status`: `"done"`, `"blocked"`. +If blocked, include `"blocked_reason": "..."`. diff --git a/agents/prompts/marketer.md b/agents/prompts/marketer.md new file mode 100644 index 0000000..7c9f841 --- /dev/null +++ b/agents/prompts/marketer.md @@ -0,0 +1,63 @@ +You are a Marketer for the Kin multi-agent orchestrator. + +Your job: design a go-to-market and growth strategy for a new project. + +## Input + +You receive: +- PROJECT: id, name, description (free-text idea from the director) +- PHASE: phase order in the research pipeline +- TASK BRIEF: {text: , phase: "marketer", workflow: "research"} +- PREVIOUS STEP OUTPUT: output from prior research phases (business, market, UX, etc.) + +## Your responsibilities + +1. Define the positioning statement (for whom, what problem, how different) +2. Propose 3-5 acquisition channels with estimated CAC and effort level +3. Outline SEO strategy: target keywords, content pillars, link building approach +4. Identify conversion optimization patterns (landing page, onboarding, activation) +5. Design a retention loop (notifications, email, community, etc.) +6. Estimate budget ranges for each channel + +## Rules + +- Be specific: real channel names, real keyword examples, realistic CAC estimates +- Prioritize by impact/effort ratio — not everything needs to be done +- Use prior phase outputs (market research, UX) to inform the strategy +- Budget estimates in USD ranges (e.g. "$500-2000/mo") + +## Output format + +Return ONLY valid JSON (no markdown, no explanation): + +```json +{ + "status": "done", + "positioning": "For [target], [product] is the [category] that [key benefit] unlike [alternative]", + "acquisition_channels": [ + { + "channel": "SEO", + "estimated_cac": "$5-20", + "effort": "high", + "timeline": "3-6 months", + "priority": 1 + } + ], + "seo_strategy": { + "target_keywords": ["..."], + "content_pillars": ["..."], + "link_building": "..." + }, + "conversion_patterns": ["..."], + "retention_loop": "Description of how users come back", + "budget_estimates": { + "month_1": "$...", + "month_3": "$...", + "month_6": "$..." + }, + "open_questions": ["Questions that require director input"] +} +``` + +Valid values for `status`: `"done"`, `"blocked"`. +If blocked, include `"blocked_reason": "..."`. diff --git a/agents/prompts/pm.md b/agents/prompts/pm.md index 9120f82..2cc40fd 100644 --- a/agents/prompts/pm.md +++ b/agents/prompts/pm.md @@ -5,8 +5,9 @@ Your job: decompose a task into a pipeline of specialist steps. ## Input You receive: -- PROJECT: id, name, tech stack +- PROJECT: id, name, tech stack, project_type (development | operations | research) - TASK: id, title, brief +- ACCEPTANCE CRITERIA: what the task output must satisfy (if provided — use this to verify task completeness, do NOT confuse with current task status) - DECISIONS: known issues, gotchas, workarounds for this project - MODULES: project module map - ACTIVE TASKS: currently in-progress tasks (avoid conflicts) @@ -29,6 +30,52 @@ You receive: - For features: architect first (if complex), then developer, then test + review. - Don't assign specialists who aren't needed. - If a task is blocked or unclear, say so — don't guess. +- If `acceptance_criteria` is provided, include it in the brief for the last pipeline step (tester or reviewer) so they can verify the result against it. Do NOT use acceptance_criteria to describe current task state. + +## Project type routing + +**If project_type == "operations":** +- ONLY use these roles: sysadmin, debugger, reviewer +- NEVER assign: architect, frontend_dev, backend_dev, tester +- Default route for scan/explore tasks: infra_scan (sysadmin → reviewer) +- Default route for incident/debug tasks: infra_debug (sysadmin → debugger → reviewer) +- The sysadmin agent connects via SSH — no local path is available + +**If project_type == "research":** +- Prefer: tech_researcher, architect, reviewer +- No code changes — output is analysis and decisions only + +**If project_type == "development"** (default): +- Full specialist pool available + +## Completion mode selection + +Set `completion_mode` based on the following rules (in priority order): + +1. If `project.execution_mode` is set — use it as the default. +2. Override by `route_type`: + - `debug`, `hotfix`, `feature` → `"auto_complete"` (only if the last pipeline step is `tester` or `reviewer`) + - `research`, `new_project`, `security_audit` → `"review"` +3. Fallback: `"review"` + +## Task categories + +Assign a category based on the nature of the work. Choose ONE from this list: + +| Code | Meaning | +|------|---------| +| SEC | Security, auth, permissions | +| UI | Frontend, styles, UX | +| API | Integrations, endpoints, external APIs | +| INFRA| Infrastructure, DevOps, deployment | +| BIZ | Business logic, workflows | +| DB | Database schema, migrations, queries | +| ARCH | Architecture decisions, refactoring | +| TEST | Tests, QA, coverage | +| PERF | Performance optimizations | +| DOCS | Documentation | +| FIX | Hotfixes, bug fixes | +| OBS | Monitoring, observability, logging | ## Output format @@ -37,6 +84,8 @@ Return ONLY valid JSON (no markdown, no explanation): ```json { "analysis": "Brief analysis of what needs to be done", + "completion_mode": "auto_complete", + "category": "FIX", "pipeline": [ { "role": "debugger", @@ -56,3 +105,17 @@ Return ONLY valid JSON (no markdown, no explanation): "route_type": "debug" } ``` + +Valid values for `status`: `"done"`, `"blocked"`. + +If status is "blocked", include `"blocked_reason": "..."` and `"analysis": "..."` explaining why the task cannot be planned. + +## Blocked Protocol + +If you cannot plan the pipeline (task is completely ambiguous, no information to work with, or explicitly outside the system scope), return this JSON **instead of** the normal output: + +```json +{"status": "blocked", "reason": "", "blocked_at": ""} +``` + +Use current datetime for `blocked_at`. Do NOT guess — return blocked immediately. diff --git a/agents/prompts/reviewer.md b/agents/prompts/reviewer.md index b638b38..46a358a 100644 --- a/agents/prompts/reviewer.md +++ b/agents/prompts/reviewer.md @@ -7,6 +7,7 @@ Your job: review the implementation for correctness, security, and adherence to You receive: - PROJECT: id, name, path, tech stack - TASK: id, title, brief describing what was built +- ACCEPTANCE CRITERIA: what the task output must satisfy (if provided — verify the implementation meets each criterion before approving) - DECISIONS: project conventions and standards - PREVIOUS STEP OUTPUT: dev agent and/or tester output describing what was changed @@ -35,6 +36,7 @@ You receive: - Check that API endpoints validate input and return proper HTTP status codes. - Check that no secrets, tokens, or credentials are hardcoded. - Do NOT rewrite code — only report findings and recommendations. +- If `acceptance_criteria` is provided, check every criterion explicitly — failing to satisfy any criterion must result in `"changes_requested"`. ## Output format @@ -68,6 +70,16 @@ Valid values for `test_coverage`: `"adequate"`, `"insufficient"`, `"missing"`. If verdict is "changes_requested", findings must be non-empty with actionable suggestions. If verdict is "blocked", include `"blocked_reason": "..."` (e.g. unable to read files). +## Blocked Protocol + +If you cannot perform the review (no file access, ambiguous requirements, task outside your scope), return this JSON **instead of** the normal output: + +```json +{"status": "blocked", "verdict": "blocked", "reason": "", "blocked_at": ""} +``` + +Use current datetime for `blocked_at`. Do NOT guess or partially review — return blocked immediately. + ## Output field details **security_issues** and **conventions_violations**: Each array element is an object with the following structure: diff --git a/agents/prompts/security.md b/agents/prompts/security.md index cd8af8d..f92017a 100644 --- a/agents/prompts/security.md +++ b/agents/prompts/security.md @@ -71,3 +71,13 @@ Return ONLY valid JSON: } } ``` + +## Blocked Protocol + +If you cannot perform the audit (no file access, ambiguous requirements, task outside your scope), return this JSON **instead of** the normal output: + +```json +{"status": "blocked", "reason": "", "blocked_at": ""} +``` + +Use current datetime for `blocked_at`. Do NOT guess or partially audit — return blocked immediately. diff --git a/agents/prompts/spec.md b/agents/prompts/spec.md new file mode 100644 index 0000000..8420978 --- /dev/null +++ b/agents/prompts/spec.md @@ -0,0 +1,45 @@ +You are a Specification Agent for a software project. + +Your job: create a detailed feature specification based on the project constitution +(provided as "Previous step output") and the task brief. + +## Your output format (JSON only) + +Return ONLY valid JSON — no markdown, no explanation: + +```json +{ + "overview": "One paragraph summary of what is being built and why", + "features": [ + { + "name": "User Authentication", + "description": "Email + password login with JWT tokens", + "acceptance_criteria": "User can log in, receives token, token expires in 24h" + } + ], + "data_model": [ + { + "entity": "User", + "fields": ["id UUID", "email TEXT UNIQUE", "password_hash TEXT", "created_at DATETIME"] + } + ], + "api_contracts": [ + { + "method": "POST", + "path": "/api/auth/login", + "body": {"email": "string", "password": "string"}, + "response": {"token": "string", "expires_at": "ISO-8601"} + } + ], + "acceptance_criteria": "Full set of acceptance criteria for the entire spec" +} +``` + +## Instructions + +1. The **Previous step output** contains the constitution (principles, constraints, goals) +2. Respect ALL constraints from the constitution — do not violate them +3. Design features that advance the stated goals +4. Keep the data model minimal — only what is needed +5. API contracts must be consistent with existing project patterns +6. Acceptance criteria must be testable and specific diff --git a/agents/prompts/sysadmin.md b/agents/prompts/sysadmin.md new file mode 100644 index 0000000..dee56cc --- /dev/null +++ b/agents/prompts/sysadmin.md @@ -0,0 +1,114 @@ +You are a Sysadmin agent for the Kin multi-agent orchestrator. + +Your job: connect to a remote server via SSH, scan it, and produce a structured map of what's running there. + +## Input + +You receive: +- PROJECT: id, name, project_type=operations +- SSH CONNECTION: host, user, key path, optional ProxyJump +- TASK: id, title, brief describing what to scan or investigate +- DECISIONS: known facts and gotchas about this server +- MODULES: existing known components (if any) + +## SSH Command Pattern + +Use the Bash tool to run remote commands. Always use the explicit form: + +``` +ssh -i {KEY} [-J {PROXYJUMP}] -o StrictHostKeyChecking=no -o BatchMode=yes {USER}@{HOST} "command" +``` + +If no key path is provided, omit the `-i` flag and use default SSH auth. +If no ProxyJump is set, omit the `-J` flag. + +**SECURITY: Never use shell=True with user-supplied data. Always pass commands as explicit string arguments to ssh. Never interpolate untrusted input into shell commands.** + +## Scan sequence + +Run these commands one by one. Analyze each result before proceeding: + +1. `uname -a && cat /etc/os-release` — OS version and kernel +2. `docker ps --format 'table {{.Names}}\t{{.Image}}\t{{.Status}}\t{{.Ports}}'` — running containers +3. `systemctl list-units --state=running --no-pager --plain --type=service 2>/dev/null | head -40` — running services +4. `ss -tlnp 2>/dev/null || netstat -tlnp 2>/dev/null` — open ports +5. `find /etc -maxdepth 3 -name "*.conf" -o -name "*.yaml" -o -name "*.yml" -o -name "*.env" 2>/dev/null | head -30` — config files +6. `docker compose ls 2>/dev/null || docker-compose ls 2>/dev/null` — docker-compose projects +7. If docker is present: `docker inspect $(docker ps -q) 2>/dev/null | python3 -c "import json,sys; [print(c['Name'], c.get('HostConfig',{}).get('Binds',[])) for c in json.load(sys.stdin)]" 2>/dev/null` — volume mounts +8. For each key config found — read with `ssh ... "cat /path/to/config"` (skip files with obvious secrets unless needed for the task) +9. `find /opt /home /root /srv -maxdepth 4 -name '.git' -type d 2>/dev/null | head -10` — найти git-репозитории; для каждого: `git -C remote -v && git -C log --oneline -3 2>/dev/null` — remote origin и последние коммиты +10. `ls -la ~/.ssh/ 2>/dev/null && cat ~/.ssh/authorized_keys 2>/dev/null` — список установленных SSH-ключей. Не читать приватные ключи (id_rsa, id_ed25519 без .pub) + +## Rules + +- Run commands one by one — do NOT batch unrelated commands in one ssh call +- Analyze output before next step — skip irrelevant follow-up commands +- If a command fails (permission denied, not found) — note it and continue +- If the task is specific (e.g. "find nginx config") — focus on relevant commands only +- Never read files that clearly contain secrets (private keys, .env with passwords) unless the task explicitly requires it +- If SSH connection fails entirely — return status "blocked" with the error + +## Output format + +Return ONLY valid JSON (no markdown, no explanation): + +```json +{ + "status": "done", + "summary": "Brief description of what was found", + "os": "Ubuntu 22.04 LTS, kernel 5.15.0", + "services": [ + {"name": "nginx", "type": "systemd", "status": "running", "note": "web proxy"}, + {"name": "myapp", "type": "docker", "image": "myapp:1.2.3", "ports": ["80:8080"]} + ], + "open_ports": [ + {"port": 80, "proto": "tcp", "process": "nginx"}, + {"port": 443, "proto": "tcp", "process": "nginx"}, + {"port": 5432, "proto": "tcp", "process": "postgres"} + ], + "key_configs": [ + {"path": "/etc/nginx/nginx.conf", "note": "main nginx config"}, + {"path": "/opt/myapp/docker-compose.yml", "note": "app stack"} + ], + "versions": { + "docker": "24.0.5", + "nginx": "1.24.0", + "postgres": "15.3" + }, + "decisions": [ + { + "type": "gotcha", + "title": "Brief title of discovered fact", + "description": "Detailed description of the finding", + "tags": ["server", "relevant-tag"] + } + ], + "modules": [ + { + "name": "nginx", + "type": "service", + "path": "/etc/nginx", + "description": "Reverse proxy, serving ports 80/443", + "owner_role": "sysadmin" + } + ], + "git_repos": [ + {"path": "/opt/myapp", "remote": "git@github.com:org/myapp.git", "last_commits": ["abc1234 fix: hotfix", "def5678 feat: new endpoint"]} + ], + "ssh_authorized_keys": [ + "ssh-ed25519 AAAA... user@host", + "ssh-rsa AAAA... deploy-key" + ], + "files_read": ["/etc/nginx/nginx.conf"], + "commands_run": ["uname -a", "docker ps"], + "notes": "Any important caveats, things to investigate further, or follow-up tasks needed" +} +``` + +Valid status values: `"done"`, `"partial"` (if some commands failed), `"blocked"` (if SSH connection failed entirely). + +If blocked, include `"blocked_reason": "..."` field. + +The `decisions` array: add entries for every significant discovery — running services, non-standard configs, open ports, version info, gotchas. These will be saved to the project's knowledge base. + +The `modules` array: add one entry per distinct service or component found. These will be registered as project modules. diff --git a/agents/prompts/task_decomposer.md b/agents/prompts/task_decomposer.md new file mode 100644 index 0000000..d3b37a3 --- /dev/null +++ b/agents/prompts/task_decomposer.md @@ -0,0 +1,43 @@ +You are a Task Decomposer Agent for a software project. + +Your job: take an architect's implementation plan (provided as "Previous step output") +and break it down into concrete, actionable implementation tasks. + +## Your output format (JSON only) + +Return ONLY valid JSON — no markdown, no explanation: + +```json +{ + "tasks": [ + { + "title": "Add user_sessions table to core/db.py", + "brief": "Create table with columns: id, user_id, token_hash, expires_at, created_at. Add migration in _migrate().", + "priority": 3, + "category": "DB", + "acceptance_criteria": "Table created in SQLite, migration idempotent, existing DB unaffected" + }, + { + "title": "Implement POST /api/auth/login endpoint", + "brief": "Validate email/password, generate JWT, store session, return token. Use bcrypt for password verification.", + "priority": 3, + "category": "API", + "acceptance_criteria": "Returns 200 with token on valid credentials, 401 on invalid, 422 on missing fields" + } + ] +} +``` + +## Valid categories + +DB, API, UI, INFRA, SEC, BIZ, ARCH, TEST, PERF, DOCS, FIX, OBS + +## Instructions + +1. The **Previous step output** contains the architect's implementation plan +2. Create one task per discrete implementation unit (file, function group, endpoint) +3. Tasks should be independent and completable in a single agent session +4. Priority: 1 = critical, 3 = normal, 5 = low +5. Each task must have clear, testable acceptance criteria +6. Do NOT include tasks for writing documentation unless explicitly in the spec +7. Aim for 3-10 tasks — if you need more, group related items diff --git a/agents/prompts/tech_researcher.md b/agents/prompts/tech_researcher.md index b91ed5a..6f58c70 100644 --- a/agents/prompts/tech_researcher.md +++ b/agents/prompts/tech_researcher.md @@ -90,3 +90,13 @@ Valid values for `status`: `"done"`, `"partial"`, `"blocked"`. - `"blocked"` — unable to proceed; include `"blocked_reason": "..."`. If status is "partial", include `"partial_reason": "..."` explaining what was skipped. + +## Blocked Protocol + +If you cannot perform the task (no file access, ambiguous requirements, task outside your scope), return this JSON **instead of** the normal output: + +```json +{"status": "blocked", "reason": "", "blocked_at": ""} +``` + +Use current datetime for `blocked_at`. Do NOT guess or partially complete — return blocked immediately. diff --git a/agents/prompts/tester.md b/agents/prompts/tester.md index 3b958f7..b2517f0 100644 --- a/agents/prompts/tester.md +++ b/agents/prompts/tester.md @@ -7,6 +7,7 @@ Your job: write or update tests that verify the implementation is correct and re You receive: - PROJECT: id, name, path, tech stack - TASK: id, title, brief describing what was implemented +- ACCEPTANCE CRITERIA: what the task output must satisfy (if provided — verify tests cover these criteria explicitly) - PREVIOUS STEP OUTPUT: dev agent output describing what was changed (required) ## Your responsibilities @@ -38,6 +39,7 @@ For a specific test file: `python -m pytest tests/test_models.py -v` - One test per behavior — don't combine multiple assertions in one test without clear reason. - Test names must describe the scenario: `test_update_task_sets_updated_at`, not `test_task`. - Do NOT test implementation internals — test observable behavior and return values. +- If `acceptance_criteria` is provided in the task, ensure your tests explicitly verify each criterion. ## Output format @@ -65,3 +67,13 @@ Valid values for `status`: `"passed"`, `"failed"`, `"blocked"`. If status is "failed", populate `"failures"` with `[{"test": "...", "error": "..."}]`. If status is "blocked", include `"blocked_reason": "..."`. + +## Blocked Protocol + +If you cannot perform the task (no file access, ambiguous requirements, task outside your scope), return this JSON **instead of** the normal output: + +```json +{"status": "blocked", "reason": "", "blocked_at": ""} +``` + +Use current datetime for `blocked_at`. Do NOT guess or partially complete — return blocked immediately. diff --git a/agents/prompts/ux_designer.md b/agents/prompts/ux_designer.md new file mode 100644 index 0000000..98c2d7d --- /dev/null +++ b/agents/prompts/ux_designer.md @@ -0,0 +1,57 @@ +You are a UX Designer for the Kin multi-agent orchestrator. + +Your job: analyze UX patterns and design the user experience for a new project. + +## Input + +You receive: +- PROJECT: id, name, description (free-text idea from the director) +- PHASE: phase order in the research pipeline +- TASK BRIEF: {text: , phase: "ux_designer", workflow: "research"} +- PREVIOUS STEP OUTPUT: output from prior research phases (market research, etc.) + +## Your responsibilities + +1. Identify 2-3 user personas with goals, frustrations, and tech savviness +2. Map the primary user journey (5-8 steps: Awareness → Onboarding → Core Value → Retention) +3. Analyze UX patterns from competitors (from market research output if available) +4. Identify the 3 most critical UX risks +5. Propose key screens/flows as text wireframes (ASCII or numbered descriptions) + +## Rules + +- Focus on the most important user flows first — do not over-engineer +- Base competitor UX analysis on prior research phase output +- Wireframes must be text-based (no images), concise, actionable +- Highlight where the UX must differentiate from competitors + +## Output format + +Return ONLY valid JSON (no markdown, no explanation): + +```json +{ + "status": "done", + "personas": [ + { + "name": "...", + "role": "...", + "goals": ["..."], + "frustrations": ["..."], + "tech_savviness": "medium" + } + ], + "user_journey": [ + {"step": 1, "name": "Awareness", "action": "...", "emotion": "..."} + ], + "competitor_ux_analysis": "Summary of what competitors do well/poorly", + "ux_risks": ["..."], + "key_screens": [ + {"name": "Onboarding", "wireframe": "Step 1: ... Step 2: ..."} + ], + "open_questions": ["Questions that require director input"] +} +``` + +Valid values for `status`: `"done"`, `"blocked"`. +If blocked, include `"blocked_reason": "..."`. diff --git a/agents/runner.py b/agents/runner.py index 33dffbe..d37300c 100644 --- a/agents/runner.py +++ b/agents/runner.py @@ -4,7 +4,9 @@ Each agent = separate process with isolated context. """ import json +import logging import os +import shutil import sqlite3 import subprocess import time @@ -13,11 +15,120 @@ from typing import Any import re +_logger = logging.getLogger("kin.runner") + + +# Extra PATH entries to inject when searching for claude CLI. +# launchctl daemons start with a stripped PATH that may omit these. +_EXTRA_PATH_DIRS = [ + "/opt/homebrew/bin", + "/opt/homebrew/sbin", + "/usr/local/bin", + "/usr/local/sbin", +] + + +def _build_claude_env() -> dict: + """Return an env dict with an extended PATH that includes common CLI tool locations. + + Merges _EXTRA_PATH_DIRS with the current process PATH, deduplicating entries. + Also resolves ~/.nvm/versions/node/*/bin globs that launchctl may not expand. + """ + env = os.environ.copy() + existing = env.get("PATH", "").split(":") + + extra = list(_EXTRA_PATH_DIRS) + + # Expand nvm node bin dirs dynamically + nvm_root = Path.home() / ".nvm" / "versions" / "node" + if nvm_root.is_dir(): + for node_ver in sorted(nvm_root.iterdir(), reverse=True): + bin_dir = node_ver / "bin" + if bin_dir.is_dir(): + extra.append(str(bin_dir)) + + seen: set[str] = set() + deduped: list[str] = [] + for d in extra + existing: + if d and d not in seen: + seen.add(d) + deduped.append(d) + env["PATH"] = ":".join(deduped) + + # Ensure SSH agent is available for agents that connect via SSH. + # Under launchd, SSH_AUTH_SOCK is not inherited — detect macOS system socket. + if "SSH_AUTH_SOCK" not in env: + import glob + socks = glob.glob("/private/tmp/com.apple.launchd.*/Listeners") + if socks: + env["SSH_AUTH_SOCK"] = socks[0] + if "SSH_AGENT_PID" not in env: + pid = os.environ.get("SSH_AGENT_PID") + if pid: + env["SSH_AGENT_PID"] = pid + + return env + + +def _resolve_claude_cmd() -> str: + """Return the full path to the claude CLI, or 'claude' as fallback.""" + extended_env = _build_claude_env() + found = shutil.which("claude", path=extended_env["PATH"]) + return found or "claude" + from core import models from core.context_builder import build_context, format_prompt from core.hooks import run_hooks +class ClaudeAuthError(Exception): + """Raised when Claude CLI is not authenticated or not available.""" + pass + + +def check_claude_auth(timeout: int = 10) -> None: + """Check that claude CLI is authenticated before running a pipeline. + + Runs: claude -p 'ok' --output-format json with timeout. + Returns None if auth is confirmed. + Raises ClaudeAuthError if: + - claude CLI not found in PATH (FileNotFoundError) + - stdout/stderr contains 'not logged in' (case-insensitive) + - returncode != 0 + - is_error=true in parsed JSON output + Returns silently on TimeoutExpired (ambiguous — don't block pipeline). + """ + claude_cmd = _resolve_claude_cmd() + env = _build_claude_env() + try: + proc = subprocess.run( + [claude_cmd, "-p", "ok", "--output-format", "json"], + capture_output=True, + text=True, + timeout=timeout, + env=env, + stdin=subprocess.DEVNULL, + ) + except FileNotFoundError: + raise ClaudeAuthError("claude CLI not found in PATH. Install it or add to PATH.") + except subprocess.TimeoutExpired: + return # Ambiguous — don't block pipeline on timeout + + stdout = proc.stdout or "" + stderr = proc.stderr or "" + combined = stdout + stderr + + if "not logged in" in combined.lower(): + raise ClaudeAuthError("Claude CLI requires login. Run: claude login") + + if proc.returncode != 0: + raise ClaudeAuthError("Claude CLI requires login. Run: claude login") + + parsed = _try_parse_json(stdout) + if isinstance(parsed, dict) and parsed.get("is_error"): + raise ClaudeAuthError("Claude CLI requires login. Run: claude login") + + def run_agent( conn: sqlite3.Connection, role: str, @@ -29,6 +140,7 @@ def run_agent( dry_run: bool = False, allow_write: bool = False, noninteractive: bool = False, + working_dir_override: str | None = None, ) -> dict: """Run a single Claude Code agent as a subprocess. @@ -61,7 +173,11 @@ def run_agent( # Determine working directory project = models.get_project(conn, project_id) working_dir = None - if project and role in ("debugger", "frontend_dev", "backend_dev", "tester", "security"): + # Operations projects have no local path — sysadmin works via SSH + is_operations = project and project.get("project_type") == "operations" + if working_dir_override: + working_dir = working_dir_override + elif not is_operations and project and role in ("debugger", "frontend_dev", "backend_dev", "tester", "security", "constitution", "spec", "task_decomposer"): project_path = Path(project["path"]).expanduser() if project_path.is_dir(): working_dir = str(project_path) @@ -116,10 +232,12 @@ def _run_claude( working_dir: str | None = None, allow_write: bool = False, noninteractive: bool = False, + timeout: int | None = None, ) -> dict: """Execute claude CLI as subprocess. Returns dict with output, returncode, etc.""" + claude_cmd = _resolve_claude_cmd() cmd = [ - "claude", + claude_cmd, "-p", prompt, "--output-format", "json", "--model", model, @@ -128,7 +246,9 @@ def _run_claude( cmd.append("--dangerously-skip-permissions") is_noninteractive = noninteractive or os.environ.get("KIN_NONINTERACTIVE") == "1" - timeout = 300 if is_noninteractive else 600 + if timeout is None: + timeout = int(os.environ.get("KIN_AGENT_TIMEOUT") or 600) + env = _build_claude_env() try: proc = subprocess.run( @@ -137,6 +257,7 @@ def _run_claude( text=True, timeout=timeout, cwd=working_dir, + env=env, stdin=subprocess.DEVNULL if is_noninteractive else None, ) except FileNotFoundError: @@ -362,6 +483,35 @@ def run_audit( } +# --------------------------------------------------------------------------- +# Blocked protocol detection +# --------------------------------------------------------------------------- + +def _parse_agent_blocked(result: dict) -> dict | None: + """Detect semantic blocked status from a successful agent result. + + Returns dict with {reason, blocked_at} if the agent's top-level JSON + contains status='blocked'. Returns None otherwise. + + Only checks top-level output object — never recurses into nested fields, + to avoid false positives from nested task status fields. + """ + from datetime import datetime + if not result.get("success"): + return None + output = result.get("output") + if not isinstance(output, dict): + return None + # reviewer uses "verdict: blocked"; all others use "status: blocked" + is_blocked = (output.get("status") == "blocked" or output.get("verdict") == "blocked") + if not is_blocked: + return None + return { + "reason": output.get("reason") or output.get("blocked_reason") or "", + "blocked_at": output.get("blocked_at") or datetime.now().isoformat(), + } + + # --------------------------------------------------------------------------- # Permission error detection # --------------------------------------------------------------------------- @@ -377,6 +527,440 @@ def _is_permission_error(result: dict) -> bool: return any(re.search(p, text) for p in PERMISSION_PATTERNS) +# --------------------------------------------------------------------------- +# Autocommit: git add -A && git commit after successful pipeline +# --------------------------------------------------------------------------- + +def _get_changed_files(project_path: str) -> list[str]: + """Return files changed in the current pipeline run. + + Combines unstaged changes, staged changes, and the last commit diff + to cover both autocommit-on and autocommit-off scenarios. + Returns [] on any git error (e.g. no git repo, first commit). + """ + env = _build_claude_env() + git_cmd = shutil.which("git", path=env["PATH"]) or "git" + files: set[str] = set() + for git_args in ( + ["diff", "--name-only"], # unstaged tracked changes + ["diff", "--cached", "--name-only"], # staged changes + ["diff", "HEAD~1", "HEAD", "--name-only"], # last commit (post-autocommit) + ): + try: + r = subprocess.run( + [git_cmd] + git_args, + cwd=project_path, + capture_output=True, + text=True, + timeout=10, + env=env, + ) + if r.returncode == 0: + files.update(f.strip() for f in r.stdout.splitlines() if f.strip()) + except Exception: + pass + return list(files) + + +def _run_autocommit( + conn: sqlite3.Connection, + task_id: str, + project_id: str, +) -> None: + """Auto-commit changes after successful pipeline completion. + + Runs: git add -A && git commit -m 'kin: {task_id} {title}'. + Silently skips if nothing to commit (exit code 1) or project path not found. + Never raises — autocommit errors must never block the pipeline. + Uses stderr=subprocess.DEVNULL per decision #30. + """ + task = models.get_task(conn, task_id) + project = models.get_project(conn, project_id) + if not task or not project: + return + + if not project.get("autocommit_enabled"): + return + + project_path = Path(project["path"]).expanduser() + if not project_path.is_dir(): + return + + working_dir = str(project_path) + env = _build_claude_env() + git_cmd = shutil.which("git", path=env["PATH"]) or "git" + + title = (task.get("title") or "").replace('"', "'").replace("\n", " ").replace("\r", "") + commit_msg = f"kin: {task_id} {title}" + + try: + subprocess.run( + [git_cmd, "add", "-A"], + cwd=working_dir, + env=env, + stderr=subprocess.DEVNULL, + stdout=subprocess.DEVNULL, + ) + result = subprocess.run( + [git_cmd, "commit", "-m", commit_msg], + cwd=working_dir, + env=env, + stderr=subprocess.DEVNULL, + stdout=subprocess.DEVNULL, + ) + if result.returncode == 0: + _logger.info("Autocommit: %s", commit_msg) + else: + _logger.debug("Autocommit: nothing to commit for %s", task_id) + except Exception as exc: + _logger.warning("Autocommit failed for %s: %s", task_id, exc) + + +# --------------------------------------------------------------------------- +# Sysadmin output: save server map to decisions and modules +# --------------------------------------------------------------------------- + +def _save_sysadmin_output( + conn: sqlite3.Connection, + project_id: str, + task_id: str, + result: dict, +) -> dict: + """Parse sysadmin agent JSON output and save decisions/modules to DB. + + Idempotent: add_decision_if_new deduplicates, modules use INSERT OR IGNORE via + add_module which has UNIQUE(project_id, name) — wraps IntegrityError silently. + Returns {decisions_added, decisions_skipped, modules_added, modules_skipped}. + """ + raw = result.get("raw_output") or result.get("output") or "" + if isinstance(raw, (dict, list)): + raw = json.dumps(raw, ensure_ascii=False) + + parsed = _try_parse_json(raw) + if not isinstance(parsed, dict): + return {"decisions_added": 0, "decisions_skipped": 0, "modules_added": 0, "modules_skipped": 0} + + decisions_added = 0 + decisions_skipped = 0 + for item in (parsed.get("decisions") or []): + if not isinstance(item, dict): + continue + d_type = item.get("type", "decision") + if d_type not in VALID_DECISION_TYPES: + d_type = "decision" + d_title = (item.get("title") or "").strip() + d_desc = (item.get("description") or "").strip() + if not d_title or not d_desc: + continue + saved = models.add_decision_if_new( + conn, + project_id=project_id, + type=d_type, + title=d_title, + description=d_desc, + tags=item.get("tags") or ["server"], + task_id=task_id, + ) + if saved: + decisions_added += 1 + else: + decisions_skipped += 1 + + modules_added = 0 + modules_skipped = 0 + for item in (parsed.get("modules") or []): + if not isinstance(item, dict): + continue + m_name = (item.get("name") or "").strip() + m_type = (item.get("type") or "service").strip() + m_path = (item.get("path") or "").strip() + if not m_name: + continue + try: + m = models.add_module( + conn, + project_id=project_id, + name=m_name, + type=m_type, + path=m_path or m_name, + description=item.get("description"), + owner_role="sysadmin", + ) + if m.get("_created", True): + modules_added += 1 + else: + modules_skipped += 1 + except Exception: + modules_skipped += 1 + + return { + "decisions_added": decisions_added, + "decisions_skipped": decisions_skipped, + "modules_added": modules_added, + "modules_skipped": modules_skipped, + } + + +# --------------------------------------------------------------------------- +# Auto-test: detect test failure in agent output +# --------------------------------------------------------------------------- + +_TEST_FAILURE_PATTERNS = [ + r"\bFAILED\b", + r"\bFAIL\b", + r"\d+\s+failed", + r"test(?:s)?\s+failed", + r"assert(?:ion)?\s*(error|failed)", + r"exception(?:s)?\s+occurred", + r"returncode\s*[!=]=\s*0", + r"Error:\s", + r"ERRORS?\b", +] + +_TEST_SUCCESS_PATTERNS = [ + r"no\s+failures", + r"all\s+tests?\s+pass", + r"0\s+failed", + r"passed.*no\s+errors", +] + + +def _is_test_failure(result: dict) -> bool: + """Return True if agent output indicates test failures. + + Checks for failure keywords, guards against false positives from + explicit success phrases (e.g. 'no failures'). + """ + output = result.get("raw_output") or result.get("output") or "" + if not isinstance(output, str): + output = json.dumps(output, ensure_ascii=False) + + for p in _TEST_SUCCESS_PATTERNS: + if re.search(p, output, re.IGNORECASE): + return False + + for p in _TEST_FAILURE_PATTERNS: + if re.search(p, output, re.IGNORECASE): + return True + + return False + + +# --------------------------------------------------------------------------- +# Auto-test runner: run project tests via `make test` +# --------------------------------------------------------------------------- + +# Roles that trigger auto-test when project.auto_test_enabled is set +_AUTO_TEST_ROLES = {"backend_dev", "frontend_dev"} + + +def _run_project_tests(project_path: str, timeout: int = 120) -> dict: + """Run `make test` in project_path. Returns {success, output, returncode}. + + Never raises — all errors are captured and returned in output. + """ + env = _build_claude_env() + make_cmd = shutil.which("make", path=env["PATH"]) or "make" + try: + result = subprocess.run( + [make_cmd, "test"], + cwd=project_path, + capture_output=True, + text=True, + timeout=timeout, + env=env, + ) + output = (result.stdout or "") + (result.stderr or "") + return {"success": result.returncode == 0, "output": output, "returncode": result.returncode} + except subprocess.TimeoutExpired: + return {"success": False, "output": f"make test timed out after {timeout}s", "returncode": 124} + except FileNotFoundError: + return {"success": False, "output": "make not found — no Makefile or make not in PATH", "returncode": 127} + except Exception as exc: + return {"success": False, "output": f"Test run error: {exc}", "returncode": -1} + + +# --------------------------------------------------------------------------- +# Decomposer output: create child tasks from task_decomposer JSON +# --------------------------------------------------------------------------- + +def _save_decomposer_output( + conn: sqlite3.Connection, + project_id: str, + parent_task_id: str, + result: dict, +) -> dict: + """Parse task_decomposer output and create child tasks in DB. + + Expected output format: {tasks: [{title, brief, priority, category, acceptance_criteria}]} + Idempotent: skips tasks with same parent_task_id + title (case-insensitive). + Returns {created: int, skipped: int}. + """ + raw = result.get("raw_output") or result.get("output") or "" + if isinstance(raw, (dict, list)): + raw = json.dumps(raw, ensure_ascii=False) + + parsed = _try_parse_json(raw) + if not isinstance(parsed, dict): + return {"created": 0, "skipped": 0, "error": "non-JSON decomposer output"} + + task_list = parsed.get("tasks", []) + if not isinstance(task_list, list): + return {"created": 0, "skipped": 0, "error": "invalid tasks format"} + + created = 0 + skipped = 0 + for item in task_list: + if not isinstance(item, dict): + continue + title = (item.get("title") or "").strip() + if not title: + continue + # Idempotency: skip if same parent + title already exists + existing = conn.execute( + """SELECT id FROM tasks + WHERE parent_task_id = ? AND lower(trim(title)) = lower(trim(?))""", + (parent_task_id, title), + ).fetchone() + if existing: + skipped += 1 + continue + category = (item.get("category") or "").strip().upper() + if category not in models.TASK_CATEGORIES: + category = None + task_id = models.next_task_id(conn, project_id, category=category) + brief_text = item.get("brief") or "" + models.create_task( + conn, + task_id, + project_id, + title, + priority=item.get("priority", 5), + brief={"text": brief_text, "source": f"decomposer:{parent_task_id}"}, + category=category, + acceptance_criteria=item.get("acceptance_criteria"), + parent_task_id=parent_task_id, + ) + created += 1 + + return {"created": created, "skipped": skipped} + + +# --------------------------------------------------------------------------- +# Auto-learning: extract decisions from pipeline results +# --------------------------------------------------------------------------- + +VALID_DECISION_TYPES = {"decision", "gotcha", "convention"} + +def _run_learning_extraction( + conn: sqlite3.Connection, + task_id: str, + project_id: str, + step_results: list[dict], +) -> dict: + """Extract and save decisions from completed pipeline results. + + Calls the learner agent with step outputs + existing decisions, + parses the JSON response, and saves new decisions via add_decision_if_new. + Returns a summary dict with added/skipped counts. + """ + learner_prompt_path = PROMPTS_DIR / "learner.md" + if not learner_prompt_path.exists(): + return {"added": 0, "skipped": 0, "error": "learner.md not found"} + + template = learner_prompt_path.read_text() + + # Summarize step outputs (first 2000 chars each) + step_summaries = {} + for r in step_results: + role = r.get("role", "unknown") + output = r.get("raw_output") or r.get("output") or "" + if isinstance(output, (dict, list)): + output = json.dumps(output, ensure_ascii=False) + step_summaries[role] = output[:2000] + + # Fetch existing decisions for dedup hint + existing = models.get_decisions(conn, project_id) + existing_hints = [ + {"title": d["title"], "type": d["type"]} + for d in existing + ] + + prompt_parts = [ + template, + "", + "## PIPELINE_OUTPUTS", + json.dumps(step_summaries, ensure_ascii=False, indent=2), + "", + "## EXISTING_DECISIONS", + json.dumps(existing_hints, ensure_ascii=False, indent=2), + ] + prompt = "\n".join(prompt_parts) + + learner_timeout = int(os.environ.get("KIN_LEARNER_TIMEOUT") or 120) + start = time.monotonic() + result = _run_claude(prompt, model="sonnet", noninteractive=True, timeout=learner_timeout) + duration = int(time.monotonic() - start) + + raw_output = result.get("output", "") + if not isinstance(raw_output, str): + raw_output = json.dumps(raw_output, ensure_ascii=False) + success = result["returncode"] == 0 + + # Log to agent_logs + models.log_agent_run( + conn, + project_id=project_id, + task_id=task_id, + agent_role="learner", + action="learn", + input_summary=f"project={project_id}, task={task_id}, steps={len(step_results)}", + output_summary=raw_output or None, + tokens_used=result.get("tokens_used"), + model="sonnet", + cost_usd=result.get("cost_usd"), + success=success, + error_message=result.get("error") if not success else None, + duration_seconds=duration, + ) + + parsed = _try_parse_json(raw_output) + if not isinstance(parsed, dict): + return {"added": 0, "skipped": 0, "error": "non-JSON learner output"} + + decisions = parsed.get("decisions", []) + if not isinstance(decisions, list): + return {"added": 0, "skipped": 0, "error": "invalid decisions format"} + + added = 0 + skipped = 0 + for item in decisions[:5]: + if not isinstance(item, dict): + continue + d_type = item.get("type", "decision") + if d_type not in VALID_DECISION_TYPES: + d_type = "decision" + d_title = (item.get("title") or "").strip() + d_desc = (item.get("description") or "").strip() + if not d_title or not d_desc: + continue + saved = models.add_decision_if_new( + conn, + project_id=project_id, + type=d_type, + title=d_title, + description=d_desc, + tags=item.get("tags") or [], + task_id=task_id, + ) + if saved: + added += 1 + else: + skipped += 1 + + return {"added": added, "skipped": skipped} + + # --------------------------------------------------------------------------- # Pipeline executor # --------------------------------------------------------------------------- @@ -398,6 +982,18 @@ def run_pipeline( Returns {success, steps_completed, total_cost, total_tokens, total_duration, results} """ + # Auth check — skip for dry_run (dry_run never calls claude CLI) + if not dry_run: + try: + check_claude_auth() + except ClaudeAuthError as exc: + return { + "success": False, + "error": "claude_auth_required", + "message": str(exc), + "instructions": "Run: claude login", + } + task = models.get_task(conn, task_id) if not task: return {"success": False, "error": f"Task '{task_id}' not found"} @@ -431,6 +1027,26 @@ def run_pipeline( model = step.get("model", "sonnet") brief = step.get("brief") + # Worktree isolation: opt-in per project, for write-capable roles + _WORKTREE_ROLES = {"backend_dev", "frontend_dev", "debugger"} + worktree_path = None + project_for_wt = models.get_project(conn, task["project_id"]) if not dry_run else None + use_worktree = ( + not dry_run + and role in _WORKTREE_ROLES + and project_for_wt + and project_for_wt.get("worktrees_enabled") + and project_for_wt.get("path") + ) + if use_worktree: + try: + from core.worktree import create_worktree, ensure_gitignore + p_path = str(Path(project_for_wt["path"]).expanduser()) + ensure_gitignore(p_path) + worktree_path = create_worktree(p_path, task_id, role) + except Exception: + worktree_path = None # Fall back to normal execution + try: result = run_agent( conn, role, task_id, project_id, @@ -440,6 +1056,7 @@ def run_pipeline( dry_run=dry_run, allow_write=allow_write, noninteractive=noninteractive, + working_dir_override=worktree_path, ) except Exception as exc: exc_msg = f"Step {i+1}/{len(steps)} ({role}) raised exception: {exc}" @@ -463,6 +1080,21 @@ def run_pipeline( error_message=exc_msg, ) models.update_task(conn, task_id, status="blocked", blocked_reason=exc_msg) + try: + from core.telegram import send_telegram_escalation + project = models.get_project(conn, project_id) + project_name = project["name"] if project else project_id + sent = send_telegram_escalation( + task_id=task_id, + project_name=project_name, + agent_role=role, + reason=exc_msg, + pipeline_step=str(i + 1), + ) + if sent: + models.mark_telegram_sent(conn, task_id) + except Exception: + pass # Telegram errors must never block pipeline return { "success": False, "error": exc_msg, @@ -485,7 +1117,7 @@ def run_pipeline( if not result["success"]: # Auto mode: retry once with allow_write on permission error - if mode == "auto" and not allow_write and _is_permission_error(result): + if mode == "auto_complete" and not allow_write and _is_permission_error(result): task_modules = models.get_modules(conn, project_id) try: run_hooks(conn, project_id, task_id, @@ -493,6 +1125,19 @@ def run_pipeline( task_modules=task_modules) except Exception: pass + # Audit log: record dangerous skip before retry + try: + models.log_audit_event( + conn, + event_type="dangerous_skip", + task_id=task_id, + step_id=role, + reason=f"auto mode permission retry: step {i+1}/{len(steps)} ({role})", + project_id=project_id, + ) + models.update_task(conn, task_id, dangerously_skipped=1) + except Exception: + pass retry = run_agent( conn, role, task_id, project_id, model=model, @@ -536,8 +1181,232 @@ def run_pipeline( "pipeline_id": pipeline["id"] if pipeline else None, } + # Worktree merge/cleanup after successful step + if worktree_path and result["success"] and not dry_run: + try: + from core.worktree import merge_worktree, cleanup_worktree + p_path = str(Path(project_for_wt["path"]).expanduser()) + merge_result = merge_worktree(worktree_path, p_path) + if not merge_result["success"]: + conflicts = merge_result.get("conflicts", []) + conflict_msg = f"Worktree merge conflict in files: {', '.join(conflicts)}" if conflicts else "Worktree merge failed" + models.update_task(conn, task_id, status="blocked", blocked_reason=conflict_msg) + cleanup_worktree(worktree_path, p_path) + if pipeline: + models.update_pipeline(conn, pipeline["id"], status="failed", + total_cost_usd=total_cost, + total_tokens=total_tokens, + total_duration_seconds=total_duration) + return { + "success": False, + "error": conflict_msg, + "steps_completed": i, + "results": results, + "total_cost_usd": total_cost, + "total_tokens": total_tokens, + "total_duration_seconds": total_duration, + "pipeline_id": pipeline["id"] if pipeline else None, + } + cleanup_worktree(worktree_path, p_path) + except Exception: + pass # Worktree errors must never block pipeline + elif worktree_path and not dry_run: + # Step failed — cleanup worktree without merging + try: + from core.worktree import cleanup_worktree + p_path = str(Path(project_for_wt["path"]).expanduser()) + cleanup_worktree(worktree_path, p_path) + except Exception: + pass + results.append(result) + # Semantic blocked: agent ran successfully but returned status='blocked' + blocked_info = _parse_agent_blocked(result) + if blocked_info: + if pipeline: + models.update_pipeline( + conn, pipeline["id"], + status="failed", + total_cost_usd=total_cost, + total_tokens=total_tokens, + total_duration_seconds=total_duration, + ) + models.update_task( + conn, task_id, + status="blocked", + blocked_reason=blocked_info["reason"], + blocked_at=blocked_info["blocked_at"], + blocked_agent_role=role, + blocked_pipeline_step=str(i + 1), + ) + try: + from core.telegram import send_telegram_escalation + project = models.get_project(conn, project_id) + project_name = project["name"] if project else project_id + sent = send_telegram_escalation( + task_id=task_id, + project_name=project_name, + agent_role=role, + reason=blocked_info["reason"], + pipeline_step=str(i + 1), + ) + if sent: + models.mark_telegram_sent(conn, task_id) + except Exception: + pass # Telegram errors must never block pipeline + error_msg = f"Step {i+1}/{len(steps)} ({role}) blocked: {blocked_info['reason']}" + return { + "success": False, + "error": error_msg, + "blocked_by": role, + "blocked_reason": blocked_info["reason"], + "steps_completed": i, + "results": results, + "total_cost_usd": total_cost, + "total_tokens": total_tokens, + "total_duration_seconds": total_duration, + "pipeline_id": pipeline["id"] if pipeline else None, + } + + # Save sysadmin scan results immediately after a successful sysadmin step + if role == "sysadmin" and result["success"] and not dry_run: + try: + _save_sysadmin_output(conn, project_id, task_id, result) + except Exception: + pass # Never block pipeline on sysadmin save errors + + # Save decomposer output: create child tasks from task_decomposer JSON + if role == "task_decomposer" and result["success"] and not dry_run: + try: + _save_decomposer_output(conn, project_id, task_id, result) + except Exception: + pass # Never block pipeline on decomposer save errors + + # Project-level auto-test: run `make test` after backend_dev/frontend_dev steps. + # Enabled per project via auto_test_enabled flag (opt-in). + # On failure, loop fixer up to KIN_AUTO_TEST_MAX_ATTEMPTS times, then block. + if ( + not dry_run + and role in _AUTO_TEST_ROLES + and result["success"] + and project_for_wt + and project_for_wt.get("auto_test_enabled") + and project_for_wt.get("path") + ): + max_auto_test_attempts = int(os.environ.get("KIN_AUTO_TEST_MAX_ATTEMPTS") or 3) + p_path_str = str(Path(project_for_wt["path"]).expanduser()) + test_run = _run_project_tests(p_path_str) + results.append({"role": "_auto_test", "success": test_run["success"], + "output": test_run["output"], "_project_test": True}) + auto_test_attempt = 0 + while not test_run["success"] and auto_test_attempt < max_auto_test_attempts: + auto_test_attempt += 1 + fix_context = ( + f"Automated project test run (make test) failed after your changes.\n" + f"Test output:\n{test_run['output'][:4000]}\n" + f"Fix the failing tests. Do NOT modify test files." + ) + fix_result = run_agent( + conn, role, task_id, project_id, + model=model, + previous_output=fix_context, + dry_run=False, + allow_write=allow_write, + noninteractive=noninteractive, + ) + total_cost += fix_result.get("cost_usd") or 0 + total_tokens += fix_result.get("tokens_used") or 0 + total_duration += fix_result.get("duration_seconds") or 0 + results.append({**fix_result, "_auto_test_fix_attempt": auto_test_attempt}) + test_run = _run_project_tests(p_path_str) + results.append({"role": "_auto_test", "success": test_run["success"], + "output": test_run["output"], "_project_test": True, + "_attempt": auto_test_attempt}) + if not test_run["success"]: + block_reason = ( + f"Auto-test (make test) failed after {auto_test_attempt} fix attempt(s). " + f"Last output: {test_run['output'][:500]}" + ) + models.update_task(conn, task_id, status="blocked", blocked_reason=block_reason) + if pipeline: + models.update_pipeline(conn, pipeline["id"], status="failed", + total_cost_usd=total_cost, + total_tokens=total_tokens, + total_duration_seconds=total_duration) + return { + "success": False, + "error": block_reason, + "steps_completed": i, + "results": results, + "total_cost_usd": total_cost, + "total_tokens": total_tokens, + "total_duration_seconds": total_duration, + "pipeline_id": pipeline["id"] if pipeline else None, + } + + # Auto-test loop: if tester step has auto_fix=true and tests failed, + # call fix_role agent and re-run tester up to max_attempts times. + if ( + not dry_run + and step.get("auto_fix") + and role == "tester" + and result["success"] + and _is_test_failure(result) + ): + max_attempts = int(step.get("max_attempts", 3)) + fix_role = step.get("fix_role", "backend_dev") + fix_model = step.get("fix_model", model) + attempt = 0 + while attempt < max_attempts and _is_test_failure(result): + attempt += 1 + tester_output = result.get("raw_output") or result.get("output") or "" + if isinstance(tester_output, (dict, list)): + tester_output = json.dumps(tester_output, ensure_ascii=False) + + # Run fixer + fix_result = run_agent( + conn, fix_role, task_id, project_id, + model=fix_model, + previous_output=tester_output, + dry_run=False, + allow_write=allow_write, + noninteractive=noninteractive, + ) + total_cost += fix_result.get("cost_usd") or 0 + total_tokens += fix_result.get("tokens_used") or 0 + total_duration += fix_result.get("duration_seconds") or 0 + results.append({**fix_result, "_auto_fix_attempt": attempt}) + + # Re-run tester + fix_output = fix_result.get("raw_output") or fix_result.get("output") or "" + if isinstance(fix_output, (dict, list)): + fix_output = json.dumps(fix_output, ensure_ascii=False) + retest = run_agent( + conn, role, task_id, project_id, + model=model, + previous_output=fix_output, + dry_run=False, + allow_write=allow_write, + noninteractive=noninteractive, + ) + total_cost += retest.get("cost_usd") or 0 + total_tokens += retest.get("tokens_used") or 0 + total_duration += retest.get("duration_seconds") or 0 + result = retest + results.append({**result, "_auto_retest_attempt": attempt}) + + # Save final test result regardless of outcome + try: + final_output = result.get("raw_output") or result.get("output") or "" + models.update_task(conn, task_id, test_result={ + "output": final_output if isinstance(final_output, str) else str(final_output), + "auto_fix_attempts": attempt, + "passed": not _is_test_failure(result), + }) + except Exception: + pass + # Chain output to next step previous_output = result.get("raw_output") or result.get("output") if isinstance(previous_output, (dict, list)): @@ -555,8 +1424,25 @@ def run_pipeline( task_modules = models.get_modules(conn, project_id) - if mode == "auto": - # Auto mode: skip review, approve immediately + # Compute changed files for hook filtering (frontend build trigger) + changed_files: list[str] | None = None + project = models.get_project(conn, project_id) + if project and project.get("path"): + p_path = Path(project["path"]).expanduser() + if p_path.is_dir(): + changed_files = _get_changed_files(str(p_path)) + + last_role = steps[-1].get("role", "") if steps else "" + auto_eligible = last_role in {"tester", "reviewer"} + + # Guard: re-fetch current status — user may have manually changed it while pipeline ran + current_task = models.get_task(conn, task_id) + current_status = current_task.get("status") if current_task else None + + if current_status in ("done", "cancelled"): + pass # User finished manually — don't overwrite + elif mode == "auto_complete" and auto_eligible: + # Auto-complete mode: last step is tester/reviewer — skip review, approve immediately models.update_task(conn, task_id, status="done") try: run_hooks(conn, project_id, task_id, @@ -586,15 +1472,29 @@ def run_pipeline( pass else: # Review mode: wait for manual approval - models.update_task(conn, task_id, status="review") + models.update_task(conn, task_id, status="review", execution_mode="review") # Run post-pipeline hooks (failures don't affect pipeline status) try: run_hooks(conn, project_id, task_id, - event="pipeline_completed", task_modules=task_modules) + event="pipeline_completed", task_modules=task_modules, + changed_files=changed_files) except Exception: pass # Hook errors must never block pipeline completion + # Auto-learning: extract decisions from pipeline results + if results: + try: + _run_learning_extraction(conn, task_id, project_id, results) + except Exception: + pass # Learning errors must never block pipeline completion + + # Auto-commit changes after successful pipeline + try: + _run_autocommit(conn, task_id, project_id) + except Exception: + pass # Autocommit errors must never block pipeline completion + return { "success": True, "steps_completed": len(steps), diff --git a/agents/specialists.yaml b/agents/specialists.yaml index 0a7963a..6c8f07c 100644 --- a/agents/specialists.yaml +++ b/agents/specialists.yaml @@ -81,6 +81,16 @@ specialists: context_rules: decisions_category: security + sysadmin: + name: "Sysadmin" + model: sonnet + tools: [Bash, Read] + description: "SSH-based server scanner: maps running services, open ports, configs, versions via remote commands" + permissions: read_bash + context_rules: + decisions: all + modules: all + tech_researcher: name: "Tech Researcher" model: sonnet @@ -101,6 +111,46 @@ specialists: codebase_diff: "array of { file, line_hint, issue, suggestion }" notes: string + constitution: + name: "Constitution Agent" + model: sonnet + tools: [Read, Grep, Glob] + description: "Defines project principles, constraints, and non-negotiables. First step in spec-driven workflow." + permissions: read_only + context_rules: + decisions: all + output_schema: + principles: "array of strings" + constraints: "array of strings" + goals: "array of strings" + + spec: + name: "Spec Agent" + model: sonnet + tools: [Read, Grep, Glob] + description: "Creates detailed feature specification from constitution output. Second step in spec-driven workflow." + permissions: read_only + context_rules: + decisions: all + output_schema: + overview: string + features: "array of { name, description, acceptance_criteria }" + data_model: "array of { entity, fields }" + api_contracts: "array of { method, path, body, response }" + acceptance_criteria: string + + task_decomposer: + name: "Task Decomposer" + model: sonnet + tools: [Read, Grep, Glob] + description: "Decomposes architect output into concrete implementation tasks. Creates child tasks in DB." + permissions: read_only + context_rules: + decisions: all + modules: all + output_schema: + tasks: "array of { title, brief, priority, category, acceptance_criteria }" + # Route templates — PM uses these to build pipelines routes: debug: @@ -126,3 +176,15 @@ routes: api_research: steps: [tech_researcher, architect] description: "Study external API → integration plan" + + infra_scan: + steps: [sysadmin, reviewer] + description: "SSH scan server → map services/ports/configs → review findings" + + infra_debug: + steps: [sysadmin, debugger, reviewer] + description: "SSH diagnose → find root cause → verify fix plan" + + spec_driven: + steps: [constitution, spec, architect, task_decomposer] + description: "Constitution → spec → implementation plan → decompose into tasks" diff --git a/cli/main.py b/cli/main.py index bc4ba61..bde03da 100644 --- a/cli/main.py +++ b/cli/main.py @@ -53,21 +53,6 @@ def _table(headers: list[str], rows: list[list[str]], min_width: int = 6): return "\n".join(lines) -def _auto_task_id(conn, project_id: str) -> str: - """Generate next task ID like PROJ-001.""" - prefix = project_id.upper() - existing = models.list_tasks(conn, project_id=project_id) - max_num = 0 - for t in existing: - tid = t["id"] - if tid.startswith(prefix + "-"): - try: - num = int(tid.split("-", 1)[1]) - max_num = max(max_num, num) - except ValueError: - pass - return f"{prefix}-{max_num + 1:03d}" - # =========================================================================== # Root group @@ -111,6 +96,74 @@ def project_add(ctx, id, name, path, tech_stack, status, priority, language): click.echo(f"Created project: {p['id']} ({p['name']})") +@cli.command("new-project") +@click.argument("description") +@click.option("--id", "project_id", required=True, help="Project ID") +@click.option("--name", required=True, help="Project name") +@click.option("--path", required=True, help="Project path") +@click.option("--roles", default="business,market,tech", show_default=True, + help="Comma-separated roles: business,market,legal,tech,ux,marketer") +@click.option("--tech-stack", default=None, help="Comma-separated tech stack") +@click.option("--priority", type=int, default=5, show_default=True) +@click.option("--language", default="ru", show_default=True) +@click.pass_context +def new_project(ctx, description, project_id, name, path, roles, tech_stack, priority, language): + """Create a new project with a sequential research phase pipeline. + + DESCRIPTION — free-text project description for the agents. + + Role aliases: business=business_analyst, market=market_researcher, + legal=legal_researcher, tech=tech_researcher, ux=ux_designer, marketer=marketer. + Architect is added automatically as the last phase. + """ + from core.phases import create_project_with_phases, validate_roles, ROLE_LABELS + + _ALIASES = { + "business": "business_analyst", + "market": "market_researcher", + "legal": "legal_researcher", + "tech": "tech_researcher", + "ux": "ux_designer", + } + + raw_roles = [r.strip().lower() for r in roles.split(",") if r.strip()] + expanded = [_ALIASES.get(r, r) for r in raw_roles] + clean_roles = validate_roles(expanded) + if not clean_roles: + click.echo("Error: no valid research roles specified.", err=True) + raise SystemExit(1) + + ts = [s.strip() for s in tech_stack.split(",") if s.strip()] if tech_stack else None + conn = ctx.obj["conn"] + + if models.get_project(conn, project_id): + click.echo(f"Error: project '{project_id}' already exists.", err=True) + raise SystemExit(1) + + try: + result = create_project_with_phases( + conn, project_id, name, path, + description=description, + selected_roles=clean_roles, + tech_stack=ts, + priority=priority, + language=language, + ) + except ValueError as e: + click.echo(f"Error: {e}", err=True) + raise SystemExit(1) + + click.echo(f"Created project: {result['project']['id']} ({result['project']['name']})") + click.echo(f"Description: {description}") + click.echo("") + phases = result["phases"] + rows = [ + [str(p["id"]), str(p["phase_order"] + 1), p["role"], p["status"], p.get("task_id") or "—"] + for p in phases + ] + click.echo(_table(["ID", "#", "Role", "Status", "Task"], rows)) + + @project.command("list") @click.option("--status", default=None) @click.pass_context @@ -178,18 +231,28 @@ def task(): @click.argument("title") @click.option("--type", "route_type", type=click.Choice(["debug", "feature", "refactor", "hotfix"]), default=None) @click.option("--priority", type=int, default=5) +@click.option("--category", "-c", default=None, + help=f"Task category: {', '.join(models.TASK_CATEGORIES)}") @click.pass_context -def task_add(ctx, project_id, title, route_type, priority): - """Add a task to a project. ID is auto-generated (PROJ-001).""" +def task_add(ctx, project_id, title, route_type, priority, category): + """Add a task to a project. ID is auto-generated (PROJ-001 or PROJ-CAT-001).""" conn = ctx.obj["conn"] p = models.get_project(conn, project_id) if not p: click.echo(f"Project '{project_id}' not found.", err=True) raise SystemExit(1) - task_id = _auto_task_id(conn, project_id) + if category: + category = category.upper() + if category not in models.TASK_CATEGORIES: + click.echo( + f"Invalid category '{category}'. Must be one of: {', '.join(models.TASK_CATEGORIES)}", + err=True, + ) + raise SystemExit(1) + task_id = models.next_task_id(conn, project_id, category=category) brief = {"route_type": route_type} if route_type else None t = models.create_task(conn, task_id, project_id, title, - priority=priority, brief=brief) + priority=priority, brief=brief, category=category) click.echo(f"Created task: {t['id']} — {t['title']}") @@ -586,6 +649,30 @@ def run_task(ctx, task_id, dry_run, allow_write): pipeline_steps = output["pipeline"] analysis = output.get("analysis", "") + # Save completion_mode from PM output to task (only if not already set by user) + task_current = models.get_task(conn, task_id) + update_fields = {} + if not task_current.get("execution_mode"): + pm_completion_mode = models.validate_completion_mode( + output.get("completion_mode", "review") + ) + update_fields["execution_mode"] = pm_completion_mode + import logging + logging.getLogger("kin").info( + "PM set completion_mode=%s for task %s", pm_completion_mode, task_id + ) + + # Save category from PM output (only if task has no category yet) + if not task_current.get("category"): + pm_category = output.get("category") + if pm_category and isinstance(pm_category, str): + pm_category = pm_category.upper() + if pm_category in models.TASK_CATEGORIES: + update_fields["category"] = pm_category + + if update_fields: + models.update_task(conn, task_id, **update_fields) + click.echo(f"\nAnalysis: {analysis}") click.echo(f"Pipeline ({len(pipeline_steps)} steps):") for i, step in enumerate(pipeline_steps, 1): diff --git a/core/chat_intent.py b/core/chat_intent.py new file mode 100644 index 0000000..c92166a --- /dev/null +++ b/core/chat_intent.py @@ -0,0 +1,48 @@ +"""Kin — chat intent classifier (heuristic, no LLM). + +classify_intent(text) → 'task_request' | 'status_query' | 'question' +""" + +import re +from typing import Literal + +_STATUS_PATTERNS = [ + r'что сейчас', + r'в работе', + r'\bстатус\b', + r'список задач', + r'покажи задачи', + r'покажи список', + r'какие задачи', + r'что идёт', + r'что делается', + r'что висит', +] + +_QUESTION_STARTS = ( + 'почему', 'зачем', 'как ', 'что такое', 'что значит', + 'объясни', 'расскажи', 'что делает', 'как работает', + 'в чём', 'когда', 'кто', +) + + +def classify_intent(text: str) -> Literal['task_request', 'status_query', 'question']: + """Classify user message intent. + + Returns: + 'status_query' — user is asking about current project status/tasks + 'question' — user is asking a question (no action implied) + 'task_request' — everything else; default: create a task and run pipeline + """ + lower = text.lower().strip() + + for pattern in _STATUS_PATTERNS: + if re.search(pattern, lower): + return 'status_query' + + if lower.endswith('?'): + for word in _QUESTION_STARTS: + if lower.startswith(word): + return 'question' + + return 'task_request' diff --git a/core/context_builder.py b/core/context_builder.py index fad1313..88e6c9d 100644 --- a/core/context_builder.py +++ b/core/context_builder.py @@ -41,6 +41,22 @@ def build_context( "role": role, } + # Attachments — all roles get them so debugger sees screenshots, UX sees mockups, etc. + # Initialize before conditional to guarantee key presence in ctx (#213) + attachments = models.list_attachments(conn, task_id) + ctx["attachments"] = attachments + + # If task has a revise comment, fetch the last agent output for context + if task and task.get("revise_comment"): + row = conn.execute( + """SELECT output_summary FROM agent_logs + WHERE task_id = ? AND success = 1 + ORDER BY created_at DESC LIMIT 1""", + (task_id,), + ).fetchone() + if row and row["output_summary"]: + ctx["last_agent_output"] = row["output_summary"] + if role == "pm": ctx["modules"] = models.get_modules(conn, project_id) ctx["decisions"] = models.get_decisions(conn, project_id) @@ -73,10 +89,23 @@ def build_context( conn, project_id, types=["convention"], ) + elif role == "sysadmin": + ctx["decisions"] = models.get_decisions(conn, project_id) + ctx["modules"] = models.get_modules(conn, project_id) + elif role == "tester": # Minimal context — just the task spec pass + elif role in ("constitution", "spec"): + ctx["modules"] = models.get_modules(conn, project_id) + ctx["decisions"] = models.get_decisions(conn, project_id) + + elif role == "task_decomposer": + ctx["modules"] = models.get_modules(conn, project_id) + ctx["decisions"] = models.get_decisions(conn, project_id) + ctx["active_tasks"] = models.list_tasks(conn, project_id=project_id, status="in_progress") + elif role == "security": ctx["decisions"] = models.get_decisions( conn, project_id, category="security", @@ -91,7 +120,7 @@ def build_context( def _slim_task(task: dict) -> dict: """Extract only relevant fields from a task for the prompt.""" - return { + result = { "id": task["id"], "title": task["title"], "status": task["status"], @@ -100,17 +129,31 @@ def _slim_task(task: dict) -> dict: "brief": task.get("brief"), "spec": task.get("spec"), } + if task.get("revise_comment"): + result["revise_comment"] = task["revise_comment"] + if task.get("acceptance_criteria"): + result["acceptance_criteria"] = task["acceptance_criteria"] + return result def _slim_project(project: dict) -> dict: """Extract only relevant fields from a project.""" - return { + result = { "id": project["id"], "name": project["name"], "path": project["path"], "tech_stack": project.get("tech_stack"), "language": project.get("language", "ru"), + "execution_mode": project.get("execution_mode"), + "project_type": project.get("project_type", "development"), } + # Include SSH fields for operations projects + if project.get("project_type") == "operations": + result["ssh_host"] = project.get("ssh_host") + result["ssh_user"] = project.get("ssh_user") + result["ssh_key_path"] = project.get("ssh_key_path") + result["ssh_proxy_jump"] = project.get("ssh_proxy_jump") + return result def _extract_module_hint(task: dict | None) -> str | None: @@ -144,6 +187,25 @@ def format_prompt(context: dict, role: str, prompt_template: str | None = None) if proj.get("tech_stack"): sections.append(f"Tech stack: {', '.join(proj['tech_stack'])}") sections.append(f"Path: {proj['path']}") + project_type = proj.get("project_type", "development") + sections.append(f"Project type: {project_type}") + sections.append("") + + # SSH connection info for operations projects + if proj and proj.get("project_type") == "operations": + ssh_host = proj.get("ssh_host") or "" + ssh_user = proj.get("ssh_user") or "" + ssh_key = proj.get("ssh_key_path") or "" + ssh_proxy = proj.get("ssh_proxy_jump") or "" + sections.append("## SSH Connection") + if ssh_host: + sections.append(f"Host: {ssh_host}") + if ssh_user: + sections.append(f"User: {ssh_user}") + if ssh_key: + sections.append(f"Key: {ssh_key}") + if ssh_proxy: + sections.append(f"ProxyJump: {ssh_proxy}") sections.append("") # Task info @@ -157,6 +219,12 @@ def format_prompt(context: dict, role: str, prompt_template: str | None = None) sections.append(f"Spec: {json.dumps(task['spec'], ensure_ascii=False)}") sections.append("") + # Acceptance criteria — shown as a dedicated section so agents use it for completeness check + if task and task.get("acceptance_criteria"): + sections.append("## Acceptance Criteria") + sections.append(task["acceptance_criteria"]) + sections.append("") + # Decisions decisions = context.get("decisions") if decisions: @@ -203,6 +271,41 @@ def format_prompt(context: dict, role: str, prompt_template: str | None = None) sections.append(f"## Target module: {hint}") sections.append("") + # Revision context: director's comment + agent's previous output + task = context.get("task") + if task and task.get("revise_comment"): + sections.append("## Director's revision request:") + sections.append(task["revise_comment"]) + sections.append("") + last_output = context.get("last_agent_output") + if last_output: + sections.append("## Your previous output (before revision):") + sections.append(last_output) + sections.append("") + + # Attachments + attachments = context.get("attachments") + if attachments: + sections.append(f"## Attachments ({len(attachments)}):") + for a in attachments: + mime = a.get("mime_type", "") + size = a.get("size", 0) + sections.append(f"- {a['filename']} ({mime}, {size} bytes): {a['path']}") + # Inline content for small text-readable files (<= 32 KB) so PM can use them immediately + _TEXT_TYPES = {"text/", "application/json", "application/xml", "application/yaml"} + _TEXT_EXTS = {".txt", ".md", ".json", ".yaml", ".yml", ".csv", ".log", ".xml", ".toml", ".ini", ".env"} + is_text = ( + any(mime.startswith(t) if t.endswith("/") else mime == t for t in _TEXT_TYPES) + or Path(a["filename"]).suffix.lower() in _TEXT_EXTS + ) + if is_text and 0 < size <= 32 * 1024: + try: + content = Path(a["path"]).read_text(encoding="utf-8", errors="replace") + sections.append(f"```\n{content}\n```") + except Exception: + pass + sections.append("") + # Previous step output (pipeline chaining) prev = context.get("previous_output") if prev: diff --git a/core/db.py b/core/db.py index b91d29c..8bbb5f1 100644 --- a/core/db.py +++ b/core/db.py @@ -13,7 +13,7 @@ SCHEMA = """ CREATE TABLE IF NOT EXISTS projects ( id TEXT PRIMARY KEY, name TEXT NOT NULL, - path TEXT NOT NULL, + path TEXT CHECK (path IS NOT NULL OR project_type = 'operations'), tech_stack JSON, status TEXT DEFAULT 'active', priority INTEGER DEFAULT 5, @@ -22,6 +22,17 @@ CREATE TABLE IF NOT EXISTS projects ( forgejo_repo TEXT, language TEXT DEFAULT 'ru', execution_mode TEXT NOT NULL DEFAULT 'review', + deploy_command TEXT, + project_type TEXT DEFAULT 'development', + ssh_host TEXT, + ssh_user TEXT, + ssh_key_path TEXT, + ssh_proxy_jump TEXT, + description TEXT, + autocommit_enabled INTEGER DEFAULT 0, + obsidian_vault_path TEXT, + worktrees_enabled INTEGER DEFAULT 0, + auto_test_enabled INTEGER DEFAULT 0, created_at DATETIME DEFAULT CURRENT_TIMESTAMP ); @@ -42,6 +53,17 @@ CREATE TABLE IF NOT EXISTS tasks ( forgejo_issue_id INTEGER, execution_mode TEXT, blocked_reason TEXT, + blocked_at DATETIME, + blocked_agent_role TEXT, + blocked_pipeline_step TEXT, + dangerously_skipped BOOLEAN DEFAULT 0, + revise_comment TEXT, + revise_count INTEGER DEFAULT 0, + revise_target_role TEXT DEFAULT NULL, + labels JSON, + category TEXT DEFAULT NULL, + telegram_sent BOOLEAN DEFAULT 0, + acceptance_criteria TEXT, created_at DATETIME DEFAULT CURRENT_TIMESTAMP, updated_at DATETIME DEFAULT CURRENT_TIMESTAMP ); @@ -91,6 +113,22 @@ CREATE TABLE IF NOT EXISTS modules ( UNIQUE(project_id, name) ); +-- Фазы исследования нового проекта (research workflow KIN-059) +CREATE TABLE IF NOT EXISTS project_phases ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + project_id TEXT NOT NULL REFERENCES projects(id), + role TEXT NOT NULL, + phase_order INTEGER NOT NULL, + status TEXT DEFAULT 'pending', + task_id TEXT REFERENCES tasks(id), + revise_count INTEGER DEFAULT 0, + revise_comment TEXT, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP, + updated_at DATETIME DEFAULT CURRENT_TIMESTAMP +); + +CREATE INDEX IF NOT EXISTS idx_phases_project ON project_phases(project_id, phase_order); + -- Pipelines (история запусков) CREATE TABLE IF NOT EXISTS pipelines ( id INTEGER PRIMARY KEY AUTOINCREMENT, @@ -135,6 +173,20 @@ CREATE TABLE IF NOT EXISTS hook_logs ( created_at TEXT DEFAULT (datetime('now')) ); +-- Аудит-лог опасных операций (dangerously-skip-permissions) +CREATE TABLE IF NOT EXISTS audit_log ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + timestamp DATETIME DEFAULT CURRENT_TIMESTAMP, + task_id TEXT REFERENCES tasks(id), + step_id TEXT, + event_type TEXT NOT NULL DEFAULT 'dangerous_skip', + reason TEXT, + project_id TEXT REFERENCES projects(id) +); + +CREATE INDEX IF NOT EXISTS idx_audit_log_task ON audit_log(task_id); +CREATE INDEX IF NOT EXISTS idx_audit_log_event ON audit_log(event_type, timestamp); + -- Кросс-проектные зависимости CREATE TABLE IF NOT EXISTS project_links ( id INTEGER PRIMARY KEY AUTOINCREMENT, @@ -177,6 +229,24 @@ CREATE TABLE IF NOT EXISTS support_bot_config ( escalation_keywords JSON ); +-- Среды развёртывания проекта (prod/dev серверы) +CREATE TABLE IF NOT EXISTS project_environments ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + project_id TEXT NOT NULL REFERENCES projects(id), + name TEXT NOT NULL, + host TEXT NOT NULL, + port INTEGER DEFAULT 22, + username TEXT NOT NULL, + auth_type TEXT NOT NULL DEFAULT 'password', + auth_value TEXT, + is_installed INTEGER NOT NULL DEFAULT 0, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP, + updated_at DATETIME DEFAULT CURRENT_TIMESTAMP, + UNIQUE(project_id, name) +); + +CREATE INDEX IF NOT EXISTS idx_environments_project ON project_environments(project_id); + -- Индексы CREATE INDEX IF NOT EXISTS idx_tasks_project_status ON tasks(project_id, status); CREATE INDEX IF NOT EXISTS idx_decisions_project ON decisions(project_id); @@ -185,6 +255,32 @@ CREATE INDEX IF NOT EXISTS idx_agent_logs_project ON agent_logs(project_id, crea CREATE INDEX IF NOT EXISTS idx_agent_logs_cost ON agent_logs(project_id, cost_usd); CREATE INDEX IF NOT EXISTS idx_tickets_project ON support_tickets(project_id, status); CREATE INDEX IF NOT EXISTS idx_tickets_client ON support_tickets(client_id); + +-- Чат-сообщения (KIN-OBS-012) +CREATE TABLE IF NOT EXISTS chat_messages ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + project_id TEXT NOT NULL REFERENCES projects(id), + role TEXT NOT NULL, + content TEXT NOT NULL, + message_type TEXT DEFAULT 'text', + task_id TEXT REFERENCES tasks(id), + created_at DATETIME DEFAULT CURRENT_TIMESTAMP +); + +CREATE INDEX IF NOT EXISTS idx_chat_messages_project ON chat_messages(project_id, created_at); + +-- Вложения задач (KIN-090) +CREATE TABLE IF NOT EXISTS task_attachments ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + task_id TEXT NOT NULL REFERENCES tasks(id) ON DELETE CASCADE, + filename TEXT NOT NULL, + path TEXT NOT NULL, + mime_type TEXT NOT NULL, + size INTEGER NOT NULL, + created_at TEXT NOT NULL DEFAULT (datetime('now')) +); + +CREATE INDEX IF NOT EXISTS idx_task_attachments_task ON task_attachments(task_id); """ @@ -216,12 +312,356 @@ def _migrate(conn: sqlite3.Connection): conn.execute("ALTER TABLE tasks ADD COLUMN blocked_reason TEXT") conn.commit() + if "autocommit_enabled" not in proj_cols: + conn.execute("ALTER TABLE projects ADD COLUMN autocommit_enabled INTEGER DEFAULT 0") + conn.commit() + + if "dangerously_skipped" not in task_cols: + conn.execute("ALTER TABLE tasks ADD COLUMN dangerously_skipped BOOLEAN DEFAULT 0") + conn.commit() + + if "revise_comment" not in task_cols: + conn.execute("ALTER TABLE tasks ADD COLUMN revise_comment TEXT") + conn.commit() + + if "category" not in task_cols: + conn.execute("ALTER TABLE tasks ADD COLUMN category TEXT DEFAULT NULL") + conn.commit() + + if "blocked_at" not in task_cols: + conn.execute("ALTER TABLE tasks ADD COLUMN blocked_at DATETIME") + conn.commit() + if "blocked_agent_role" not in task_cols: + conn.execute("ALTER TABLE tasks ADD COLUMN blocked_agent_role TEXT") + conn.commit() + if "blocked_pipeline_step" not in task_cols: + conn.execute("ALTER TABLE tasks ADD COLUMN blocked_pipeline_step TEXT") + conn.commit() + + if "telegram_sent" not in task_cols: + conn.execute("ALTER TABLE tasks ADD COLUMN telegram_sent BOOLEAN DEFAULT 0") + conn.commit() + + if "acceptance_criteria" not in task_cols: + conn.execute("ALTER TABLE tasks ADD COLUMN acceptance_criteria TEXT") + conn.commit() + + if "revise_count" not in task_cols: + conn.execute("ALTER TABLE tasks ADD COLUMN revise_count INTEGER DEFAULT 0") + conn.commit() + + if "labels" not in task_cols: + conn.execute("ALTER TABLE tasks ADD COLUMN labels JSON DEFAULT NULL") + conn.commit() + + if "revise_target_role" not in task_cols: + conn.execute("ALTER TABLE tasks ADD COLUMN revise_target_role TEXT DEFAULT NULL") + conn.commit() + + if "obsidian_vault_path" not in proj_cols: + conn.execute("ALTER TABLE projects ADD COLUMN obsidian_vault_path TEXT") + conn.commit() + + if "worktrees_enabled" not in proj_cols: + conn.execute("ALTER TABLE projects ADD COLUMN worktrees_enabled INTEGER DEFAULT 0") + conn.commit() + + if "auto_test_enabled" not in proj_cols: + conn.execute("ALTER TABLE projects ADD COLUMN auto_test_enabled INTEGER DEFAULT 0") + conn.commit() + + if "deploy_command" not in proj_cols: + conn.execute("ALTER TABLE projects ADD COLUMN deploy_command TEXT") + conn.commit() + + if "project_type" not in proj_cols: + conn.execute("ALTER TABLE projects ADD COLUMN project_type TEXT DEFAULT 'development'") + conn.commit() + + if "ssh_host" not in proj_cols: + conn.execute("ALTER TABLE projects ADD COLUMN ssh_host TEXT") + conn.commit() + + if "ssh_user" not in proj_cols: + conn.execute("ALTER TABLE projects ADD COLUMN ssh_user TEXT") + conn.commit() + + if "ssh_key_path" not in proj_cols: + conn.execute("ALTER TABLE projects ADD COLUMN ssh_key_path TEXT") + conn.commit() + + if "ssh_proxy_jump" not in proj_cols: + conn.execute("ALTER TABLE projects ADD COLUMN ssh_proxy_jump TEXT") + conn.commit() + + if "description" not in proj_cols: + conn.execute("ALTER TABLE projects ADD COLUMN description TEXT") + conn.commit() + + # Migrate audit_log + project_phases tables + existing_tables = {r[0] for r in conn.execute( + "SELECT name FROM sqlite_master WHERE type='table'" + ).fetchall()} + if "project_environments" not in existing_tables: + conn.executescript(""" + CREATE TABLE IF NOT EXISTS project_environments ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + project_id TEXT NOT NULL REFERENCES projects(id), + name TEXT NOT NULL, + host TEXT NOT NULL, + port INTEGER DEFAULT 22, + username TEXT NOT NULL, + auth_type TEXT NOT NULL DEFAULT 'password', + auth_value TEXT, + is_installed INTEGER NOT NULL DEFAULT 0, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP, + updated_at DATETIME DEFAULT CURRENT_TIMESTAMP, + UNIQUE(project_id, name) + ); + CREATE INDEX IF NOT EXISTS idx_environments_project ON project_environments(project_id); + """) + conn.commit() + + # Migrate project_environments: old schema used label/login/credential, + # new schema uses name/username/auth_value (KIN-087 column rename). + env_cols = {r[1] for r in conn.execute("PRAGMA table_info(project_environments)").fetchall()} + if "name" not in env_cols and "label" in env_cols: + conn.executescript(""" + PRAGMA foreign_keys=OFF; + CREATE TABLE project_environments_new ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + project_id TEXT NOT NULL REFERENCES projects(id), + name TEXT NOT NULL, + host TEXT NOT NULL, + port INTEGER DEFAULT 22, + username TEXT NOT NULL, + auth_type TEXT NOT NULL DEFAULT 'password', + auth_value TEXT, + is_installed INTEGER NOT NULL DEFAULT 0, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP, + updated_at DATETIME DEFAULT CURRENT_TIMESTAMP, + UNIQUE(project_id, name) + ); + INSERT INTO project_environments_new + SELECT id, project_id, label, host, port, login, auth_type, + credential, is_installed, created_at, updated_at + FROM project_environments; + DROP TABLE project_environments; + ALTER TABLE project_environments_new RENAME TO project_environments; + CREATE INDEX IF NOT EXISTS idx_environments_project ON project_environments(project_id); + PRAGMA foreign_keys=ON; + """) + conn.commit() + + if "project_phases" not in existing_tables: + conn.executescript(""" + CREATE TABLE IF NOT EXISTS project_phases ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + project_id TEXT NOT NULL REFERENCES projects(id), + role TEXT NOT NULL, + phase_order INTEGER NOT NULL, + status TEXT DEFAULT 'pending', + task_id TEXT REFERENCES tasks(id), + revise_count INTEGER DEFAULT 0, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP, + updated_at DATETIME DEFAULT CURRENT_TIMESTAMP + ); + CREATE INDEX IF NOT EXISTS idx_phases_project ON project_phases(project_id, phase_order); + """) + conn.commit() + # Migrate project_phases columns (table may already exist without revise_comment) + phase_cols = {r[1] for r in conn.execute("PRAGMA table_info(project_phases)").fetchall()} + if "revise_comment" not in phase_cols: + conn.execute("ALTER TABLE project_phases ADD COLUMN revise_comment TEXT") + conn.commit() + + if "audit_log" not in existing_tables: + conn.executescript(""" + CREATE TABLE IF NOT EXISTS audit_log ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + timestamp DATETIME DEFAULT CURRENT_TIMESTAMP, + task_id TEXT REFERENCES tasks(id), + step_id TEXT, + event_type TEXT NOT NULL DEFAULT 'dangerous_skip', + reason TEXT, + project_id TEXT REFERENCES projects(id) + ); + CREATE INDEX IF NOT EXISTS idx_audit_log_task ON audit_log(task_id); + CREATE INDEX IF NOT EXISTS idx_audit_log_event ON audit_log(event_type, timestamp); + """) + conn.commit() + + # Migrate columns that must exist before table recreation (KIN-UI-002) + # These columns are referenced in the INSERT SELECT below but were not added + # by any prior ALTER TABLE in this chain — causing OperationalError on minimal schemas. + if "tech_stack" not in proj_cols: + conn.execute("ALTER TABLE projects ADD COLUMN tech_stack JSON DEFAULT NULL") + conn.commit() + + if "priority" not in proj_cols: + conn.execute("ALTER TABLE projects ADD COLUMN priority INTEGER DEFAULT 5") + conn.commit() + + if "pm_prompt" not in proj_cols: + conn.execute("ALTER TABLE projects ADD COLUMN pm_prompt TEXT DEFAULT NULL") + conn.commit() + + if "claude_md_path" not in proj_cols: + conn.execute("ALTER TABLE projects ADD COLUMN claude_md_path TEXT DEFAULT NULL") + conn.commit() + + if "forgejo_repo" not in proj_cols: + conn.execute("ALTER TABLE projects ADD COLUMN forgejo_repo TEXT DEFAULT NULL") + conn.commit() + + if "created_at" not in proj_cols: + # SQLite ALTER TABLE does not allow non-constant defaults like CURRENT_TIMESTAMP + conn.execute("ALTER TABLE projects ADD COLUMN created_at DATETIME DEFAULT NULL") + conn.commit() + + # Migrate projects.path from NOT NULL to nullable (KIN-ARCH-003) + # SQLite doesn't support ALTER COLUMN, so we recreate the table. + path_col_rows = conn.execute("PRAGMA table_info(projects)").fetchall() + path_col = next((r for r in path_col_rows if r[1] == "path"), None) + if path_col and path_col[3] == 1: # notnull == 1, migration needed + conn.executescript(""" + PRAGMA foreign_keys=OFF; + CREATE TABLE projects_new ( + id TEXT PRIMARY KEY, + name TEXT NOT NULL, + path TEXT CHECK (path IS NOT NULL OR project_type = 'operations'), + tech_stack JSON, + status TEXT DEFAULT 'active', + priority INTEGER DEFAULT 5, + pm_prompt TEXT, + claude_md_path TEXT, + forgejo_repo TEXT, + language TEXT DEFAULT 'ru', + execution_mode TEXT NOT NULL DEFAULT 'review', + deploy_command TEXT, + project_type TEXT DEFAULT 'development', + ssh_host TEXT, + ssh_user TEXT, + ssh_key_path TEXT, + ssh_proxy_jump TEXT, + description TEXT, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP, + autocommit_enabled INTEGER DEFAULT 0, + obsidian_vault_path TEXT + ); + INSERT INTO projects_new + SELECT id, name, path, tech_stack, status, priority, + pm_prompt, claude_md_path, forgejo_repo, language, + execution_mode, deploy_command, project_type, + ssh_host, ssh_user, ssh_key_path, ssh_proxy_jump, + description, created_at, autocommit_enabled, obsidian_vault_path + FROM projects; + DROP TABLE projects; + ALTER TABLE projects_new RENAME TO projects; + PRAGMA foreign_keys=ON; + """) + + if "chat_messages" not in existing_tables: + conn.executescript(""" + CREATE TABLE IF NOT EXISTS chat_messages ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + project_id TEXT NOT NULL REFERENCES projects(id), + role TEXT NOT NULL, + content TEXT NOT NULL, + message_type TEXT DEFAULT 'text', + task_id TEXT REFERENCES tasks(id), + created_at DATETIME DEFAULT CURRENT_TIMESTAMP + ); + CREATE INDEX IF NOT EXISTS idx_chat_messages_project ON chat_messages(project_id, created_at); + """) + conn.commit() + + if "task_attachments" not in existing_tables: + conn.executescript(""" + CREATE TABLE IF NOT EXISTS task_attachments ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + task_id TEXT NOT NULL REFERENCES tasks(id) ON DELETE CASCADE, + filename TEXT NOT NULL, + path TEXT NOT NULL, + mime_type TEXT NOT NULL, + size INTEGER NOT NULL, + created_at TEXT NOT NULL DEFAULT (datetime('now')) + ); + CREATE INDEX IF NOT EXISTS idx_task_attachments_task ON task_attachments(task_id); + """) + conn.commit() + + # Rename legacy 'auto' → 'auto_complete' (KIN-063) + conn.execute( + "UPDATE projects SET execution_mode = 'auto_complete' WHERE execution_mode = 'auto'" + ) + conn.execute( + "UPDATE tasks SET execution_mode = 'auto_complete' WHERE execution_mode = 'auto'" + ) + conn.commit() + + +def _seed_default_hooks(conn: sqlite3.Connection): + """Seed default hooks for the kin project (idempotent). + + Creates rebuild-frontend hook only when: + - project 'kin' exists in the projects table + - the hook doesn't already exist (no duplicate) + + Also updates existing hooks to the correct command/config if outdated. + """ + kin_row = conn.execute( + "SELECT path FROM projects WHERE id = 'kin'" + ).fetchone() + if not kin_row or not kin_row["path"]: + return + + _PROJECT_PATH = kin_row["path"].rstrip("/") + _REBUILD_SCRIPT = f"{_PROJECT_PATH}/scripts/rebuild-frontend.sh" + _REBUILD_TRIGGER = "web/frontend/*" + _REBUILD_WORKDIR = _PROJECT_PATH + + exists = conn.execute( + "SELECT 1 FROM hooks" + " WHERE project_id = 'kin'" + " AND name = 'rebuild-frontend'" + " AND event = 'pipeline_completed'" + ).fetchone() + if not exists: + conn.execute( + """INSERT INTO hooks + (project_id, name, event, trigger_module_path, command, + working_dir, timeout_seconds, enabled) + VALUES ('kin', 'rebuild-frontend', 'pipeline_completed', + ?, ?, ?, 300, 1)""", + (_REBUILD_TRIGGER, _REBUILD_SCRIPT, _REBUILD_WORKDIR), + ) + else: + # Migrate existing hook: set trigger_module_path, correct command, working_dir + conn.execute( + """UPDATE hooks + SET trigger_module_path = ?, + command = ?, + working_dir = ?, + timeout_seconds = 300 + WHERE project_id = 'kin' AND name = 'rebuild-frontend'""", + (_REBUILD_TRIGGER, _REBUILD_SCRIPT, _REBUILD_WORKDIR), + ) + conn.commit() + + # Enable autocommit for kin project (opt-in, idempotent) + conn.execute( + "UPDATE projects SET autocommit_enabled=1 WHERE id='kin' AND autocommit_enabled=0" + ) + conn.commit() + def init_db(db_path: Path = DB_PATH) -> sqlite3.Connection: conn = get_connection(db_path) conn.executescript(SCHEMA) conn.commit() _migrate(conn) + _seed_default_hooks(conn) return conn diff --git a/core/followup.py b/core/followup.py index 3a01c23..8129d07 100644 --- a/core/followup.py +++ b/core/followup.py @@ -24,6 +24,15 @@ PERMISSION_PATTERNS = [ ] +def _next_task_id( + conn: sqlite3.Connection, + project_id: str, + category: str | None = None, +) -> str: + """Thin wrapper around models.next_task_id for testability.""" + return models.next_task_id(conn, project_id, category=category) + + def _is_permission_blocked(item: dict) -> bool: """Check if a follow-up item describes a permission/write failure.""" text = f"{item.get('title', '')} {item.get('brief', '')}".lower() @@ -48,21 +57,6 @@ def _collect_pipeline_output(conn: sqlite3.Connection, task_id: str) -> str: return "\n".join(parts) -def _next_task_id(conn: sqlite3.Connection, project_id: str) -> str: - """Generate the next sequential task ID for a project.""" - prefix = project_id.upper() - existing = models.list_tasks(conn, project_id=project_id) - max_num = 0 - for t in existing: - tid = t["id"] - if tid.startswith(prefix + "-"): - try: - num = int(tid.split("-", 1)[1]) - max_num = max(max_num, num) - except ValueError: - pass - return f"{prefix}-{max_num + 1:03d}" - def generate_followups( conn: sqlite3.Connection, @@ -154,7 +148,7 @@ def generate_followups( "options": ["rerun", "manual_task", "skip"], }) else: - new_id = _next_task_id(conn, project_id) + new_id = _next_task_id(conn, project_id, category=task.get("category")) brief_dict = {"source": f"followup:{task_id}"} if item.get("type"): brief_dict["route_type"] = item["type"] @@ -167,6 +161,7 @@ def generate_followups( priority=item.get("priority", 5), parent_task_id=task_id, brief=brief_dict, + category=task.get("category"), ) created.append(t) @@ -206,8 +201,8 @@ def resolve_pending_action( return None if choice == "manual_task": - new_id = _next_task_id(conn, project_id) - brief_dict = {"source": f"followup:{task_id}"} + new_id = _next_task_id(conn, project_id, category=task.get("category")) + brief_dict = {"source": f"followup:{task_id}", "task_type": "manual_escalation"} if item.get("type"): brief_dict["route_type"] = item["type"] if item.get("brief"): @@ -218,6 +213,7 @@ def resolve_pending_action( priority=item.get("priority", 5), parent_task_id=task_id, brief=brief_dict, + category=task.get("category"), ) if choice == "rerun": diff --git a/core/hooks.py b/core/hooks.py index c68df47..b4adc2b 100644 --- a/core/hooks.py +++ b/core/hooks.py @@ -115,9 +115,14 @@ def run_hooks( task_id: str | None, event: str, task_modules: list[dict], + changed_files: list[str] | None = None, ) -> list[HookResult]: """Run matching hooks for the given event and module list. + If changed_files is provided, trigger_module_path is matched against + the actual git-changed file paths (more precise than task_modules). + Falls back to task_modules matching when changed_files is None. + Never raises — hook failures are logged but don't affect the pipeline. """ hooks = get_hooks(conn, project_id, event=event) @@ -125,10 +130,13 @@ def run_hooks( for hook in hooks: if hook["trigger_module_path"] is not None: pattern = hook["trigger_module_path"] - matched = any( - fnmatch.fnmatch(m.get("path", ""), pattern) - for m in task_modules - ) + if changed_files is not None: + matched = any(fnmatch.fnmatch(f, pattern) for f in changed_files) + else: + matched = any( + fnmatch.fnmatch(m.get("path", ""), pattern) + for m in task_modules + ) if not matched: continue diff --git a/core/models.py b/core/models.py index 0a4825b..e06cf9b 100644 --- a/core/models.py +++ b/core/models.py @@ -3,7 +3,9 @@ Kin — data access functions for all tables. Pure functions: (conn, params) → dict | list[dict]. No ORM, no classes. """ +import base64 import json +import os import sqlite3 from datetime import datetime from typing import Any @@ -14,6 +16,20 @@ VALID_TASK_STATUSES = [ "blocked", "decomposed", "cancelled", ] +VALID_COMPLETION_MODES = {"auto_complete", "review"} + +TASK_CATEGORIES = [ + "SEC", "UI", "API", "INFRA", "BIZ", "DB", + "ARCH", "TEST", "PERF", "DOCS", "FIX", "OBS", +] + + +def validate_completion_mode(value: str) -> str: + """Validate completion mode from LLM output. Falls back to 'review' if invalid.""" + if value in VALID_COMPLETION_MODES: + return value + return "review" + def _row_to_dict(row: sqlite3.Row | None) -> dict | None: """Convert sqlite3.Row to dict with JSON fields decoded.""" @@ -49,7 +65,7 @@ def create_project( conn: sqlite3.Connection, id: str, name: str, - path: str, + path: str | None = None, tech_stack: list | None = None, status: str = "active", priority: int = 5, @@ -58,14 +74,22 @@ def create_project( forgejo_repo: str | None = None, language: str = "ru", execution_mode: str = "review", + project_type: str = "development", + ssh_host: str | None = None, + ssh_user: str | None = None, + ssh_key_path: str | None = None, + ssh_proxy_jump: str | None = None, + description: str | None = None, ) -> dict: """Create a new project and return it as dict.""" conn.execute( """INSERT INTO projects (id, name, path, tech_stack, status, priority, - pm_prompt, claude_md_path, forgejo_repo, language, execution_mode) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""", + pm_prompt, claude_md_path, forgejo_repo, language, execution_mode, + project_type, ssh_host, ssh_user, ssh_key_path, ssh_proxy_jump, description) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""", (id, name, path, _json_encode(tech_stack), status, priority, - pm_prompt, claude_md_path, forgejo_repo, language, execution_mode), + pm_prompt, claude_md_path, forgejo_repo, language, execution_mode, + project_type, ssh_host, ssh_user, ssh_key_path, ssh_proxy_jump, description), ) conn.commit() return get_project(conn, id) @@ -77,6 +101,16 @@ def get_project(conn: sqlite3.Connection, id: str) -> dict | None: return _row_to_dict(row) +def delete_project(conn: sqlite3.Connection, id: str) -> None: + """Delete a project and all its related data (modules, decisions, tasks, phases).""" + # Delete tables that have FK references to tasks BEFORE deleting tasks + # project_environments must come before tasks (FK on project_id) + for table in ("modules", "agent_logs", "decisions", "pipelines", "project_phases", "project_environments", "chat_messages", "tasks"): + conn.execute(f"DELETE FROM {table} WHERE project_id = ?", (id,)) + conn.execute("DELETE FROM projects WHERE id = ?", (id,)) + conn.commit() + + def get_effective_mode(conn: sqlite3.Connection, project_id: str, task_id: str) -> str: """Return effective execution mode: 'auto' or 'review'. @@ -123,6 +157,44 @@ def update_project(conn: sqlite3.Connection, id: str, **fields) -> dict: # Tasks # --------------------------------------------------------------------------- +def next_task_id( + conn: sqlite3.Connection, + project_id: str, + category: str | None = None, +) -> str: + """Generate next task ID. + + Without category: PROJ-001 (backward-compatible old format) + With category: PROJ-CAT-001 (new format, per-category counter) + """ + prefix = project_id.upper() + existing = list_tasks(conn, project_id=project_id) + + if category: + cat_prefix = f"{prefix}-{category}-" + max_num = 0 + for t in existing: + tid = t["id"] + if tid.startswith(cat_prefix): + try: + max_num = max(max_num, int(tid[len(cat_prefix):])) + except ValueError: + pass + return f"{prefix}-{category}-{max_num + 1:03d}" + else: + # Old format: global max across project (integers only, skip CAT-NNN) + max_num = 0 + for t in existing: + tid = t["id"] + if tid.startswith(prefix + "-"): + suffix = tid[len(prefix) + 1:] + try: + max_num = max(max_num, int(suffix)) + except ValueError: + pass + return f"{prefix}-{max_num + 1:03d}" + + def create_task( conn: sqlite3.Connection, id: str, @@ -136,16 +208,20 @@ def create_task( spec: dict | None = None, forgejo_issue_id: int | None = None, execution_mode: str | None = None, + category: str | None = None, + acceptance_criteria: str | None = None, + labels: list | None = None, ) -> dict: """Create a task linked to a project.""" conn.execute( """INSERT INTO tasks (id, project_id, title, status, priority, assigned_role, parent_task_id, brief, spec, forgejo_issue_id, - execution_mode) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""", + execution_mode, category, acceptance_criteria, labels) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""", (id, project_id, title, status, priority, assigned_role, parent_task_id, _json_encode(brief), _json_encode(spec), - forgejo_issue_id, execution_mode), + forgejo_issue_id, execution_mode, category, acceptance_criteria, + _json_encode(labels)), ) conn.commit() return get_task(conn, id) @@ -179,7 +255,7 @@ def update_task(conn: sqlite3.Connection, id: str, **fields) -> dict: """Update task fields. Auto-sets updated_at.""" if not fields: return get_task(conn, id) - json_cols = ("brief", "spec", "review", "test_result", "security_result") + json_cols = ("brief", "spec", "review", "test_result", "security_result", "labels") for key in json_cols: if key in fields: fields[key] = _json_encode(fields[key]) @@ -191,6 +267,15 @@ def update_task(conn: sqlite3.Connection, id: str, **fields) -> dict: return get_task(conn, id) +def mark_telegram_sent(conn: sqlite3.Connection, task_id: str) -> None: + """Mark that a Telegram escalation was sent for this task.""" + conn.execute( + "UPDATE tasks SET telegram_sent = 1 WHERE id = ?", + (task_id,), + ) + conn.commit() + + # --------------------------------------------------------------------------- # Decisions # --------------------------------------------------------------------------- @@ -220,6 +305,32 @@ def add_decision( return _row_to_dict(row) +def add_decision_if_new( + conn: sqlite3.Connection, + project_id: str, + type: str, + title: str, + description: str, + category: str | None = None, + tags: list | None = None, + task_id: str | None = None, +) -> dict | None: + """Add a decision only if no existing one matches (project_id, type, normalized title). + + Returns the new decision dict, or None if skipped as duplicate. + """ + existing = conn.execute( + """SELECT id FROM decisions + WHERE project_id = ? AND type = ? + AND lower(trim(title)) = lower(trim(?))""", + (project_id, type, title), + ).fetchone() + if existing: + return None + return add_decision(conn, project_id, type, title, description, + category=category, tags=tags, task_id=task_id) + + def get_decisions( conn: sqlite3.Connection, project_id: str, @@ -284,17 +395,26 @@ def add_module( ) -> dict: """Register a project module.""" cur = conn.execute( - """INSERT INTO modules (project_id, name, type, path, description, + """INSERT OR IGNORE INTO modules (project_id, name, type, path, description, owner_role, dependencies) VALUES (?, ?, ?, ?, ?, ?, ?)""", (project_id, name, type, path, description, owner_role, _json_encode(dependencies)), ) + created = cur.rowcount > 0 conn.commit() - row = conn.execute( - "SELECT * FROM modules WHERE id = ?", (cur.lastrowid,) - ).fetchone() - return _row_to_dict(row) + if cur.lastrowid: + row = conn.execute( + "SELECT * FROM modules WHERE id = ?", (cur.lastrowid,) + ).fetchone() + else: + row = conn.execute( + "SELECT * FROM modules WHERE project_id = ? AND name = ?", + (project_id, name), + ).fetchone() + result = _row_to_dict(row) + result["_created"] = created + return result def get_modules(conn: sqlite3.Connection, project_id: str) -> list[dict]: @@ -442,6 +562,58 @@ def list_tickets( return _rows_to_list(conn.execute(query, params).fetchall()) +# --------------------------------------------------------------------------- +# Audit Log +# --------------------------------------------------------------------------- + +def log_audit_event( + conn: sqlite3.Connection, + event_type: str, + task_id: str | None = None, + step_id: str | None = None, + reason: str | None = None, + project_id: str | None = None, +) -> dict: + """Log a security-sensitive event to audit_log. + + event_type='dangerous_skip' is used when --dangerously-skip-permissions is invoked. + """ + cur = conn.execute( + """INSERT INTO audit_log (event_type, task_id, step_id, reason, project_id) + VALUES (?, ?, ?, ?, ?)""", + (event_type, task_id, step_id, reason, project_id), + ) + conn.commit() + row = conn.execute( + "SELECT * FROM audit_log WHERE id = ?", (cur.lastrowid,) + ).fetchone() + return _row_to_dict(row) + + +def get_audit_log( + conn: sqlite3.Connection, + task_id: str | None = None, + project_id: str | None = None, + event_type: str | None = None, + limit: int = 100, +) -> list[dict]: + """Query audit log entries with optional filters.""" + query = "SELECT * FROM audit_log WHERE 1=1" + params: list = [] + if task_id: + query += " AND task_id = ?" + params.append(task_id) + if project_id: + query += " AND project_id = ?" + params.append(project_id) + if event_type: + query += " AND event_type = ?" + params.append(event_type) + query += " ORDER BY timestamp DESC LIMIT ?" + params.append(limit) + return _rows_to_list(conn.execute(query, params).fetchall()) + + # --------------------------------------------------------------------------- # Statistics / Dashboard # --------------------------------------------------------------------------- @@ -481,3 +653,291 @@ def get_cost_summary(conn: sqlite3.Connection, days: int = 7) -> list[dict]: ORDER BY total_cost_usd DESC """, (f"-{days} days",)).fetchall() return _rows_to_list(rows) + + +# --------------------------------------------------------------------------- +# Project Phases (KIN-059) +# --------------------------------------------------------------------------- + +def create_phase( + conn: sqlite3.Connection, + project_id: str, + role: str, + phase_order: int, +) -> dict: + """Create a research phase for a project.""" + cur = conn.execute( + """INSERT INTO project_phases (project_id, role, phase_order, status) + VALUES (?, ?, ?, 'pending')""", + (project_id, role, phase_order), + ) + conn.commit() + row = conn.execute( + "SELECT * FROM project_phases WHERE id = ?", (cur.lastrowid,) + ).fetchone() + return _row_to_dict(row) + + +def get_phase(conn: sqlite3.Connection, phase_id: int) -> dict | None: + """Get a project phase by id.""" + row = conn.execute( + "SELECT * FROM project_phases WHERE id = ?", (phase_id,) + ).fetchone() + return _row_to_dict(row) + + +def list_phases(conn: sqlite3.Connection, project_id: str) -> list[dict]: + """List all phases for a project ordered by phase_order.""" + rows = conn.execute( + "SELECT * FROM project_phases WHERE project_id = ? ORDER BY phase_order", + (project_id,), + ).fetchall() + return _rows_to_list(rows) + + +def update_phase(conn: sqlite3.Connection, phase_id: int, **fields) -> dict: + """Update phase fields. Auto-sets updated_at.""" + if not fields: + return get_phase(conn, phase_id) + fields["updated_at"] = datetime.now().isoformat() + sets = ", ".join(f"{k} = ?" for k in fields) + vals = list(fields.values()) + [phase_id] + conn.execute(f"UPDATE project_phases SET {sets} WHERE id = ?", vals) + conn.commit() + return get_phase(conn, phase_id) + + +# --------------------------------------------------------------------------- +# Project Environments (KIN-087) +# --------------------------------------------------------------------------- + +def _get_fernet(): + """Get Fernet instance using KIN_SECRET_KEY env var. + + Raises RuntimeError if KIN_SECRET_KEY is not set. + """ + key = os.environ.get("KIN_SECRET_KEY") + if not key: + raise RuntimeError( + "KIN_SECRET_KEY environment variable is not set. " + "Generate with: python -c \"from cryptography.fernet import Fernet; " + "print(Fernet.generate_key().decode())\"" + ) + from cryptography.fernet import Fernet + return Fernet(key.encode()) + + +def _encrypt_auth(value: str) -> str: + """Encrypt auth_value using Fernet (AES-128-CBC + HMAC-SHA256).""" + return _get_fernet().encrypt(value.encode()).decode() + + +def _decrypt_auth( + stored: str, + conn: sqlite3.Connection | None = None, + env_id: int | None = None, +) -> str: + """Decrypt auth_value. Handles migration from legacy base64 obfuscation. + + If stored value uses the old b64: prefix, decodes it and re-encrypts + in the DB (re-encrypt on read) if conn and env_id are provided. + """ + if not stored: + return stored + from cryptography.fernet import InvalidToken + try: + return _get_fernet().decrypt(stored.encode()).decode() + except (InvalidToken, Exception): + # Legacy b64: format — migrate on read + if stored.startswith("b64:"): + plaintext = base64.b64decode(stored[4:]).decode() + if conn is not None and env_id is not None: + new_encrypted = _encrypt_auth(plaintext) + conn.execute( + "UPDATE project_environments SET auth_value = ? WHERE id = ?", + (new_encrypted, env_id), + ) + conn.commit() + return plaintext + return stored + + +def create_environment( + conn: sqlite3.Connection, + project_id: str, + name: str, + host: str, + username: str, + port: int = 22, + auth_type: str = "password", + auth_value: str | None = None, + is_installed: bool = False, +) -> dict: + """Create a project environment. auth_value stored Fernet-encrypted; returned as None.""" + obfuscated = _encrypt_auth(auth_value) if auth_value else None + cur = conn.execute( + """INSERT INTO project_environments + (project_id, name, host, port, username, auth_type, auth_value, is_installed) + VALUES (?, ?, ?, ?, ?, ?, ?, ?)""", + (project_id, name, host, port, username, auth_type, obfuscated, int(is_installed)), + ) + conn.commit() + row = conn.execute( + "SELECT * FROM project_environments WHERE id = ?", (cur.lastrowid,) + ).fetchone() + result = _row_to_dict(row) + result["auth_value"] = None # never expose in API responses + return result + + +def get_environment(conn: sqlite3.Connection, env_id: int) -> dict | None: + """Get environment by id. auth_value is returned decrypted (for internal use).""" + row = conn.execute( + "SELECT * FROM project_environments WHERE id = ?", (env_id,) + ).fetchone() + result = _row_to_dict(row) + if result and result.get("auth_value"): + result["auth_value"] = _decrypt_auth(result["auth_value"], conn=conn, env_id=env_id) + return result + + +def list_environments(conn: sqlite3.Connection, project_id: str) -> list[dict]: + """List all environments for a project. auth_value is always None in response.""" + rows = conn.execute( + "SELECT * FROM project_environments WHERE project_id = ? ORDER BY created_at", + (project_id,), + ).fetchall() + result = _rows_to_list(rows) + for env in result: + env["auth_value"] = None + return result + + +def update_environment(conn: sqlite3.Connection, env_id: int, **fields) -> dict: + """Update environment fields. Auto-sets updated_at. Returns record with auth_value=None.""" + if not fields: + result = get_environment(conn, env_id) + if result: + result["auth_value"] = None + return result + if "auth_value" in fields and fields["auth_value"]: + fields["auth_value"] = _encrypt_auth(fields["auth_value"]) + elif "auth_value" in fields: + del fields["auth_value"] # empty/None = don't update auth_value + fields["updated_at"] = datetime.now().isoformat() + sets = ", ".join(f"{k} = ?" for k in fields) + vals = list(fields.values()) + [env_id] + conn.execute(f"UPDATE project_environments SET {sets} WHERE id = ?", vals) + conn.commit() + result = get_environment(conn, env_id) + if result: + result["auth_value"] = None + return result + + +def delete_environment(conn: sqlite3.Connection, env_id: int) -> bool: + """Delete environment by id. Returns True if deleted, False if not found.""" + cur = conn.execute( + "DELETE FROM project_environments WHERE id = ?", (env_id,) + ) + conn.commit() + return cur.rowcount > 0 + + +# --------------------------------------------------------------------------- +# Chat Messages (KIN-OBS-012) +# --------------------------------------------------------------------------- + +def add_chat_message( + conn: sqlite3.Connection, + project_id: str, + role: str, + content: str, + message_type: str = "text", + task_id: str | None = None, +) -> dict: + """Add a chat message and return it as dict. + + role: 'user' | 'assistant' | 'system' + message_type: 'text' | 'task_created' | 'error' + task_id: set for message_type='task_created' to link to the created task. + """ + cur = conn.execute( + """INSERT INTO chat_messages (project_id, role, content, message_type, task_id) + VALUES (?, ?, ?, ?, ?)""", + (project_id, role, content, message_type, task_id), + ) + conn.commit() + row = conn.execute( + "SELECT * FROM chat_messages WHERE id = ?", (cur.lastrowid,) + ).fetchone() + return _row_to_dict(row) + + +# --------------------------------------------------------------------------- +# Task Attachments (KIN-090) +# --------------------------------------------------------------------------- + +def create_attachment( + conn: sqlite3.Connection, + task_id: str, + filename: str, + path: str, + mime_type: str, + size: int, +) -> dict: + """Create a task attachment record. path must be absolute.""" + cur = conn.execute( + """INSERT INTO task_attachments (task_id, filename, path, mime_type, size) + VALUES (?, ?, ?, ?, ?)""", + (task_id, filename, path, mime_type, size), + ) + conn.commit() + row = conn.execute( + "SELECT * FROM task_attachments WHERE id = ?", (cur.lastrowid,) + ).fetchone() + return _row_to_dict(row) + + +def list_attachments(conn: sqlite3.Connection, task_id: str) -> list[dict]: + """List all attachments for a task ordered by creation time.""" + rows = conn.execute( + "SELECT * FROM task_attachments WHERE task_id = ? ORDER BY created_at", + (task_id,), + ).fetchall() + return _rows_to_list(rows) + + +def get_attachment(conn: sqlite3.Connection, attachment_id: int) -> dict | None: + """Get a single attachment by id.""" + row = conn.execute( + "SELECT * FROM task_attachments WHERE id = ?", (attachment_id,) + ).fetchone() + return _row_to_dict(row) + + +def delete_attachment(conn: sqlite3.Connection, attachment_id: int) -> bool: + """Delete attachment record. Returns True if deleted, False if not found.""" + cur = conn.execute("DELETE FROM task_attachments WHERE id = ?", (attachment_id,)) + conn.commit() + return cur.rowcount > 0 + + +def get_chat_messages( + conn: sqlite3.Connection, + project_id: str, + limit: int = 50, + before_id: int | None = None, +) -> list[dict]: + """Get chat messages for a project in chronological order (oldest first). + + before_id: pagination cursor — return messages with id < before_id. + """ + query = "SELECT * FROM chat_messages WHERE project_id = ?" + params: list = [project_id] + if before_id is not None: + query += " AND id < ?" + params.append(before_id) + query += " ORDER BY created_at ASC, id ASC LIMIT ?" + params.append(limit) + return _rows_to_list(conn.execute(query, params).fetchall()) diff --git a/core/obsidian_sync.py b/core/obsidian_sync.py new file mode 100644 index 0000000..d536147 --- /dev/null +++ b/core/obsidian_sync.py @@ -0,0 +1,183 @@ +""" +Kin — двусторонний sync с Obsidian vault. + +Export: decisions → .md-файлы с YAML frontmatter +Import: чекбоксы в .md-файлах → статус задач +""" + +import re +import sqlite3 +from pathlib import Path +from typing import Optional + +from core import models + + +def _slug(title: str) -> str: + """Генерирует slug из заголовка для имени файла.""" + s = title.lower() + s = re.sub(r"[^a-zа-я0-9\s-]", "", s) + s = re.sub(r"\s+", "-", s.strip()) + return s[:50] + + +def _decision_to_md(decision: dict) -> str: + """Форматирует decision как .md файл с YAML frontmatter.""" + tags = decision.get("tags") or [] + if isinstance(tags, str): + try: + import json + tags = json.loads(tags) + except Exception: + tags = [] + + tags_str = "[" + ", ".join(str(t) for t in tags) + "]" + created_at = (decision.get("created_at") or "")[:10] # только дата + + frontmatter = ( + "---\n" + f"kin_decision_id: {decision['id']}\n" + f"project: {decision['project_id']}\n" + f"type: {decision['type']}\n" + f"category: {decision.get('category') or ''}\n" + f"tags: {tags_str}\n" + f"created_at: {created_at}\n" + "---\n" + ) + + body = f"\n# {decision['title']}\n\n{decision['description']}\n" + return frontmatter + body + + +def _parse_frontmatter(text: str) -> dict: + """Парсит YAML frontmatter из .md файла (упрощённый парсер через re).""" + result = {} + match = re.match(r"^---\n(.*?)\n---", text, re.DOTALL) + if not match: + return result + for line in match.group(1).splitlines(): + if ":" in line: + key, _, val = line.partition(":") + result[key.strip()] = val.strip() + return result + + +def export_decisions_to_md( + project_id: str, + decisions: list[dict], + vault_path: Path, +) -> list[Path]: + """Экспортирует decisions в .md-файлы Obsidian. Возвращает список созданных файлов.""" + out_dir = vault_path / project_id / "decisions" + out_dir.mkdir(parents=True, exist_ok=True) + + created: list[Path] = [] + for d in decisions: + slug = _slug(d["title"]) + fname = f"{d['id']}-{slug}.md" + fpath = out_dir / fname + fpath.write_text(_decision_to_md(d), encoding="utf-8") + created.append(fpath) + + return created + + +def parse_task_checkboxes( + vault_path: Path, + project_id: str, +) -> list[dict]: + """Парсит *.md-файлы в vault/{project_id}/tasks/ и {project_id}/ на чекбоксы с task ID. + + Returns: [{"task_id": "KIN-013", "done": True, "title": "..."}] + """ + pattern = re.compile(r"^[-*]\s+\[([xX ])\]\s+([A-Z][A-Z0-9]*-(?:[A-Z][A-Z0-9]*-)?\d+)\s+(.+)$") + results: list[dict] = [] + + search_dirs = [ + vault_path / project_id / "tasks", + vault_path / project_id, + ] + + for search_dir in search_dirs: + if not search_dir.is_dir(): + continue + for md_file in search_dir.glob("*.md"): + try: + text = md_file.read_text(encoding="utf-8") + except OSError: + continue + for line in text.splitlines(): + m = pattern.match(line.strip()) + if m: + check_char, task_id, title = m.group(1), m.group(2), m.group(3) + results.append({ + "task_id": task_id, + "done": check_char.lower() == "x", + "title": title.strip(), + }) + + return results + + +def sync_obsidian(conn: sqlite3.Connection, project_id: str) -> dict: + """Оркестратор: export decisions + import checkboxes. + + Returns: + { + "exported_decisions": int, + "tasks_updated": int, + "errors": list[str], + "vault_path": str + } + """ + project = models.get_project(conn, project_id) + if not project: + raise ValueError(f"Project '{project_id}' not found") + + vault_path_str: Optional[str] = project.get("obsidian_vault_path") + if not vault_path_str: + raise ValueError(f"obsidian_vault_path not set for project '{project_id}'") + + vault_path = Path(vault_path_str) + errors: list[str] = [] + + # --- Создаём vault_path если не существует --- + try: + vault_path.mkdir(parents=True, exist_ok=True) + except Exception as e: + errors.append(f"Cannot create vault path {vault_path_str}: {e}") + return {"exported_decisions": 0, "tasks_updated": 0, "errors": errors, "vault_path": vault_path_str} + + # --- Export decisions --- + exported_count = 0 + try: + decisions = models.get_decisions(conn, project_id) + created_files = export_decisions_to_md(project_id, decisions, vault_path) + exported_count = len(created_files) + except Exception as e: + errors.append(f"Export error: {e}") + + # --- Import checkboxes --- + tasks_updated = 0 + try: + checkboxes = parse_task_checkboxes(vault_path, project_id) + for item in checkboxes: + if not item["done"]: + continue + task = models.get_task(conn, item["task_id"]) + if task is None: + continue + if task.get("project_id") != project_id: + continue + if task.get("status") != "done": + models.update_task(conn, item["task_id"], status="done") + tasks_updated += 1 + except Exception as e: + errors.append(f"Import error: {e}") + + return { + "exported_decisions": exported_count, + "tasks_updated": tasks_updated, + "errors": errors, + "vault_path": vault_path_str, + } diff --git a/core/phases.py b/core/phases.py new file mode 100644 index 0000000..1e08bac --- /dev/null +++ b/core/phases.py @@ -0,0 +1,210 @@ +""" +Kin — Research Phase Pipeline (KIN-059). + +Sequential workflow: Director describes a new project, picks researcher roles, +each phase produces a task for review. After approve → next phase activates. +Architect always runs last (auto-added when any researcher is selected). +""" + +import sqlite3 + +from core import models + +# Canonical order of research roles (architect always last) +RESEARCH_ROLES = [ + "business_analyst", + "market_researcher", + "legal_researcher", + "tech_researcher", + "ux_designer", + "marketer", + "architect", +] + +# Human-readable labels +ROLE_LABELS = { + "business_analyst": "Business Analyst", + "market_researcher": "Market Researcher", + "legal_researcher": "Legal Researcher", + "tech_researcher": "Tech Researcher", + "ux_designer": "UX Designer", + "marketer": "Marketer", + "architect": "Architect", +} + + +def validate_roles(roles: list[str]) -> list[str]: + """Filter unknown roles, remove duplicates, strip 'architect' (auto-added later).""" + seen: set[str] = set() + result = [] + for r in roles: + r = r.strip().lower() + if r == "architect": + continue + if r in RESEARCH_ROLES and r not in seen: + seen.add(r) + result.append(r) + return result + + +def build_phase_order(selected_roles: list[str]) -> list[str]: + """Return roles in canonical RESEARCH_ROLES order, append architect if any selected.""" + ordered = [r for r in RESEARCH_ROLES if r in selected_roles and r != "architect"] + if ordered: + ordered.append("architect") + return ordered + + +def create_project_with_phases( + conn: sqlite3.Connection, + id: str, + name: str, + path: str | None = None, + *, + description: str, + selected_roles: list[str], + tech_stack: list | None = None, + priority: int = 5, + language: str = "ru", +) -> dict: + """Create project + sequential research phases. + + Returns {project, phases}. + """ + clean_roles = validate_roles(selected_roles) + ordered_roles = build_phase_order(clean_roles) + if not ordered_roles: + raise ValueError("At least one research role must be selected") + + project = models.create_project( + conn, id, name, path, + tech_stack=tech_stack, priority=priority, language=language, + description=description, + ) + + phases = [] + for idx, role in enumerate(ordered_roles): + phase = models.create_phase(conn, id, role, idx) + phases.append(phase) + + # Activate the first phase immediately + if phases: + phases[0] = activate_phase(conn, phases[0]["id"]) + + return {"project": project, "phases": phases} + + +def activate_phase(conn: sqlite3.Connection, phase_id: int) -> dict: + """Create a task for the phase and set it to active. + + Task brief includes project description + phase context. + """ + phase = models.get_phase(conn, phase_id) + if not phase: + raise ValueError(f"Phase {phase_id} not found") + + project = models.get_project(conn, phase["project_id"]) + if not project: + raise ValueError(f"Project {phase['project_id']} not found") + + task_id = models.next_task_id(conn, phase["project_id"], category=None) + brief = { + "text": project.get("description") or project["name"], + "phase": phase["role"], + "phase_order": phase["phase_order"], + "workflow": "research", + } + task = models.create_task( + conn, task_id, phase["project_id"], + title=f"[Research] {ROLE_LABELS.get(phase['role'], phase['role'])}", + assigned_role=phase["role"], + brief=brief, + status="pending", + category=None, + ) + updated = models.update_phase(conn, phase_id, task_id=task["id"], status="active") + return updated + + +def approve_phase(conn: sqlite3.Connection, phase_id: int) -> dict: + """Approve a phase, activate the next one (or finish workflow). + + Returns {phase, next_phase|None}. + """ + phase = models.get_phase(conn, phase_id) + if not phase: + raise ValueError(f"Phase {phase_id} not found") + if phase["status"] != "active": + raise ValueError(f"Phase {phase_id} is not active (current: {phase['status']})") + + updated = models.update_phase(conn, phase_id, status="approved") + + # Find next pending phase + all_phases = models.list_phases(conn, phase["project_id"]) + next_phase = None + for p in all_phases: + if p["phase_order"] > phase["phase_order"] and p["status"] == "pending": + next_phase = p + break + + if next_phase: + activated = activate_phase(conn, next_phase["id"]) + return {"phase": updated, "next_phase": activated} + + return {"phase": updated, "next_phase": None} + + +def reject_phase(conn: sqlite3.Connection, phase_id: int, reason: str) -> dict: + """Reject a phase (director rejects the research output entirely).""" + phase = models.get_phase(conn, phase_id) + if not phase: + raise ValueError(f"Phase {phase_id} not found") + if phase["status"] != "active": + raise ValueError(f"Phase {phase_id} is not active (current: {phase['status']})") + + return models.update_phase(conn, phase_id, status="rejected") + + +def revise_phase(conn: sqlite3.Connection, phase_id: int, comment: str) -> dict: + """Request revision: create a new task for the same role with the comment. + + Returns {phase, new_task}. + """ + phase = models.get_phase(conn, phase_id) + if not phase: + raise ValueError(f"Phase {phase_id} not found") + if phase["status"] not in ("active", "revising"): + raise ValueError( + f"Phase {phase_id} cannot be revised (current: {phase['status']})" + ) + + project = models.get_project(conn, phase["project_id"]) + if not project: + raise ValueError(f"Project {phase['project_id']} not found") + + new_task_id = models.next_task_id(conn, phase["project_id"], category=None) + brief = { + "text": project.get("description") or project["name"], + "phase": phase["role"], + "phase_order": phase["phase_order"], + "workflow": "research", + "revise_comment": comment, + "revise_count": (phase.get("revise_count") or 0) + 1, + } + new_task = models.create_task( + conn, new_task_id, phase["project_id"], + title=f"[Research Revise] {ROLE_LABELS.get(phase['role'], phase['role'])}", + assigned_role=phase["role"], + brief=brief, + status="pending", + category=None, + ) + new_revise_count = (phase.get("revise_count") or 0) + 1 + updated = models.update_phase( + conn, phase_id, + status="revising", + task_id=new_task["id"], + revise_count=new_revise_count, + revise_comment=comment, + ) + return {"phase": updated, "new_task": new_task} diff --git a/core/telegram.py b/core/telegram.py new file mode 100644 index 0000000..3857e42 --- /dev/null +++ b/core/telegram.py @@ -0,0 +1,102 @@ +""" +Kin — Telegram escalation notifications. + +Sends a message when a PM agent detects a blocked agent. +Bot token is read from /Volumes/secrets/env/projects.env [kin] section. +Chat ID is read from KIN_TG_CHAT_ID env var. +""" + +import configparser +import json +import logging +import os +import urllib.error +import urllib.parse +import urllib.request +from pathlib import Path + +_logger = logging.getLogger("kin.telegram") + +_SECRETS_PATH = Path("/Volumes/secrets/env/projects.env") +_TELEGRAM_API = "https://api.telegram.org/bot{token}/sendMessage" + + +def _load_kin_config() -> dict: + """Load [kin] section from projects.env. Returns dict with available keys.""" + if not _SECRETS_PATH.exists(): + _logger.warning("secrets not mounted: %s", _SECRETS_PATH) + return {} + parser = configparser.ConfigParser() + parser.read(str(_SECRETS_PATH)) + if "kin" not in parser: + _logger.warning("No [kin] section in projects.env") + return {} + return dict(parser["kin"]) + + +def send_telegram_escalation( + task_id: str, + project_name: str, + agent_role: str, + reason: str, + pipeline_step: str | None, +) -> bool: + """Send a Telegram escalation message for a blocked agent. + + Returns True if message was sent successfully, False otherwise. + Never raises — escalation errors must never block the pipeline. + """ + config = _load_kin_config() + bot_token = config.get("tg_bot") or os.environ.get("KIN_TG_BOT_TOKEN") + if not bot_token: + _logger.warning("Telegram bot token not configured; skipping escalation for %s", task_id) + return False + + chat_id = os.environ.get("KIN_TG_CHAT_ID") + if not chat_id: + _logger.warning("KIN_TG_CHAT_ID not set; skipping Telegram escalation for %s", task_id) + return False + + step_info = f" (шаг {pipeline_step})" if pipeline_step else "" + text = ( + f"🚨 *Эскалация* — агент заблокирован\n\n" + f"*Проект:* {_escape_md(project_name)}\n" + f"*Задача:* `{task_id}`\n" + f"*Агент:* `{agent_role}{step_info}`\n" + f"*Причина:*\n{_escape_md(reason or '—')}" + ) + + payload = json.dumps({ + "chat_id": chat_id, + "text": text, + "parse_mode": "Markdown", + }).encode("utf-8") + + url = _TELEGRAM_API.format(token=bot_token) + req = urllib.request.Request( + url, + data=payload, + headers={"Content-Type": "application/json"}, + method="POST", + ) + try: + with urllib.request.urlopen(req, timeout=10) as resp: + if resp.status == 200: + _logger.info("Telegram escalation sent for task %s", task_id) + return True + _logger.warning("Telegram API returned status %d for task %s", resp.status, task_id) + return False + except urllib.error.URLError as exc: + _logger.warning("Telegram send failed for task %s: %s", task_id, exc) + return False + except Exception as exc: + _logger.warning("Unexpected Telegram error for task %s: %s", task_id, exc) + return False + + +def _escape_md(text: str) -> str: + """Escape Markdown special characters for Telegram MarkdownV1.""" + # MarkdownV1 is lenient — only escape backtick/asterisk/underscore in free text + for ch in ("*", "_", "`"): + text = text.replace(ch, f"\\{ch}") + return text diff --git a/core/worktree.py b/core/worktree.py new file mode 100644 index 0000000..1062766 --- /dev/null +++ b/core/worktree.py @@ -0,0 +1,149 @@ +""" +Kin — Git worktree management for isolated agent execution. + +Each eligible agent step gets its own worktree in {project_path}/.kin_worktrees/ +to prevent file-write conflicts between parallel or sequential agents. + +All functions are defensive: never raise, always log warnings on error. +""" + +import logging +import shutil +import subprocess +from pathlib import Path + +_logger = logging.getLogger("kin.worktree") + + +def _git(project_path: str) -> str: + """Resolve git executable, preferring extended PATH.""" + try: + from agents.runner import _build_claude_env + env = _build_claude_env() + found = shutil.which("git", path=env["PATH"]) + return found or "git" + except Exception: + return shutil.which("git") or "git" + + +def create_worktree(project_path: str, task_id: str, step_name: str = "step") -> str | None: + """Create a git worktree for isolated agent execution. + + Creates: {project_path}/.kin_worktrees/{task_id}-{step_name} + Branch name equals the worktree directory name. + + Returns the absolute worktree path, or None on any failure. + """ + git = _git(project_path) + safe_step = step_name.replace("/", "_").replace(" ", "_") + branch_name = f"{task_id}-{safe_step}" + worktrees_dir = Path(project_path) / ".kin_worktrees" + worktree_path = worktrees_dir / branch_name + + try: + worktrees_dir.mkdir(exist_ok=True) + r = subprocess.run( + [git, "worktree", "add", "-b", branch_name, str(worktree_path), "HEAD"], + cwd=project_path, + capture_output=True, + text=True, + timeout=30, + ) + if r.returncode != 0: + _logger.warning("git worktree add failed for %s: %s", branch_name, r.stderr.strip()) + return None + _logger.info("Created worktree: %s", worktree_path) + return str(worktree_path) + except Exception as exc: + _logger.warning("create_worktree error for %s: %s", branch_name, exc) + return None + + +def merge_worktree(worktree_path: str, project_path: str) -> dict: + """Merge the worktree branch back into current HEAD of project_path. + + Branch name is derived from the worktree directory name. + On conflict: aborts merge and returns success=False with conflict list. + + Returns {success: bool, conflicts: list[str], merged_files: list[str]} + """ + git = _git(project_path) + branch_name = Path(worktree_path).name + + try: + merge_result = subprocess.run( + [git, "-C", project_path, "merge", "--no-ff", branch_name], + capture_output=True, + text=True, + timeout=60, + ) + if merge_result.returncode == 0: + diff_result = subprocess.run( + [git, "-C", project_path, "diff", "HEAD~1", "HEAD", "--name-only"], + capture_output=True, + text=True, + timeout=10, + ) + merged_files = [ + f.strip() for f in diff_result.stdout.splitlines() if f.strip() + ] + _logger.info("Merged worktree %s: %d files", branch_name, len(merged_files)) + return {"success": True, "conflicts": [], "merged_files": merged_files} + + # Merge failed — collect conflicts and abort + conflict_result = subprocess.run( + [git, "-C", project_path, "diff", "--name-only", "--diff-filter=U"], + capture_output=True, + text=True, + timeout=10, + ) + conflicts = [f.strip() for f in conflict_result.stdout.splitlines() if f.strip()] + + subprocess.run( + [git, "-C", project_path, "merge", "--abort"], + capture_output=True, + timeout=10, + ) + _logger.warning("Merge conflict in worktree %s: %s", branch_name, conflicts) + return {"success": False, "conflicts": conflicts, "merged_files": []} + + except Exception as exc: + _logger.warning("merge_worktree error for %s: %s", branch_name, exc) + return {"success": False, "conflicts": [], "merged_files": [], "error": str(exc)} + + +def cleanup_worktree(worktree_path: str, project_path: str) -> None: + """Remove the git worktree and its branch. Never raises.""" + git = _git(project_path) + branch_name = Path(worktree_path).name + + try: + subprocess.run( + [git, "-C", project_path, "worktree", "remove", "--force", worktree_path], + capture_output=True, + timeout=30, + ) + subprocess.run( + [git, "-C", project_path, "branch", "-D", branch_name], + capture_output=True, + timeout=10, + ) + _logger.info("Cleaned up worktree: %s", worktree_path) + except Exception as exc: + _logger.warning("cleanup_worktree error for %s: %s", branch_name, exc) + + +def ensure_gitignore(project_path: str) -> None: + """Ensure .kin_worktrees/ is in project's .gitignore. Never raises.""" + entry = ".kin_worktrees/" + gitignore = Path(project_path) / ".gitignore" + try: + if gitignore.exists(): + content = gitignore.read_text() + if entry not in content: + with gitignore.open("a") as f: + f.write(f"\n{entry}\n") + else: + gitignore.write_text(f"{entry}\n") + except Exception as exc: + _logger.warning("ensure_gitignore error: %s", exc) diff --git a/pyproject.toml b/pyproject.toml index ad9e1fa..84abed8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ name = "kin" version = "0.1.0" description = "Multi-agent project orchestrator" requires-python = ">=3.11" -dependencies = ["click>=8.0", "fastapi>=0.110", "uvicorn>=0.29"] +dependencies = ["click>=8.0", "fastapi>=0.110", "uvicorn>=0.29", "cryptography>=41.0", "python-multipart>=0.0.9", "PyYAML>=6.0"] [project.scripts] kin = "cli.main:cli" diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..91d98ed --- /dev/null +++ b/requirements.txt @@ -0,0 +1,6 @@ +click>=8.0 +fastapi>=0.110 +uvicorn>=0.29 +cryptography>=41.0 +python-multipart>=0.0.9 +PyYAML>=6.0 diff --git a/scripts/rebuild-frontend.sh b/scripts/rebuild-frontend.sh index 19b9ea6..b21ae1c 100755 --- a/scripts/rebuild-frontend.sh +++ b/scripts/rebuild-frontend.sh @@ -19,20 +19,13 @@ npm run build echo "[rebuild-frontend] Build complete." # Restart API server if it's currently running. +# API is managed by launchctl with KeepAlive=true — just kill it, launchctl restarts it. # pgrep returns 1 if no match; || true prevents set -e from exiting. API_PID=$(pgrep -f "uvicorn web.api" 2>/dev/null || true) if [ -n "$API_PID" ]; then - echo "[rebuild-frontend] Stopping API server (PID: $API_PID) ..." + echo "[rebuild-frontend] Restarting API server (PID: $API_PID) — launchctl will auto-restart ..." kill "$API_PID" 2>/dev/null || true - # Wait for port 8420 to free up (up to 5 s) - for i in $(seq 1 5); do - pgrep -f "uvicorn web.api" > /dev/null 2>&1 || break - sleep 1 - done - echo "[rebuild-frontend] Starting API server ..." - cd "$PROJECT_ROOT" - nohup python -m uvicorn web.api:app --port 8420 >> /tmp/kin-api.log 2>&1 & - echo "[rebuild-frontend] API server started (PID: $!)." + echo "[rebuild-frontend] API server restarted (launchctl KeepAlive=true)." else echo "[rebuild-frontend] API server not running; skipping restart." fi diff --git a/tasks/KIN-013-spec.md b/tasks/KIN-013-spec.md new file mode 100644 index 0000000..013eb34 --- /dev/null +++ b/tasks/KIN-013-spec.md @@ -0,0 +1,266 @@ +# KIN-013 — Settings + Obsidian Sync: Техническая спецификация + +## Контекст + +Фича добавляет: +1. Страницу Settings в GUI для управления конфигурацией проектов +2. Двусторонний Obsidian sync: decisions → .md-файлы, чекбоксы Obsidian → статус задач + +Sync вызывается явно по кнопке (не демон), через API-эндпоинт. + +--- + +## 1. Схема данных + +### Изменение таблицы `projects` + +Добавить колонку: +```sql +ALTER TABLE projects ADD COLUMN obsidian_vault_path TEXT; +``` + +**Миграция**: в `core/db.py` → `_migrate()`, по паттерну существующих миграций: +```python +if "obsidian_vault_path" not in proj_cols: + conn.execute("ALTER TABLE projects ADD COLUMN obsidian_vault_path TEXT") + conn.commit() +``` + +**Семантика**: путь к корневой папке Obsidian vault для данного проекта. +Пример: `/Users/grosfrumos/Library/Mobile Documents/iCloud~md~obsidian/Documents/MyVault` + +--- + +## 2. Формат .md-файлов для decisions + +### Расположение +``` +{vault_path}/{project_id}/decisions/{id}-{slug}.md +``` +Пример: `.../kin/decisions/42-proxy-jump-ssh-gotcha.md` + +### Формат файла (YAML frontmatter + Markdown body) +```markdown +--- +kin_decision_id: 42 +project: kin +type: gotcha +category: testing +tags: [testing, mock, subprocess] +created_at: 2026-03-10 +--- + +# Proxy через SSH не работает без ssh-agent + +Описание: полный текст description из БД. +``` + +**Обоснование frontmatter**: +- Позволяет идентифицировать файл при импорте (поле `kin_decision_id`) +- Позволяет Obsidian показывать метаданные в Properties panel +- Поддерживает round-trip sync без парсинга имени файла + +### Slug из заголовка +```python +import re +def _slug(title: str) -> str: + s = title.lower() + s = re.sub(r"[^a-zа-я0-9\s-]", "", s) + s = re.sub(r"\s+", "-", s.strip()) + return s[:50] +``` + +--- + +## 3. Механизм двустороннего sync + +### 3.1 Decisions → Obsidian (export) + +- Создать/перезаписать `.md`-файл для каждого decision проекта +- Директория создаётся автоматически (`mkdir -p`) +- Если файл для `kin_decision_id` уже существует — перезаписать (идемпотентно) +- Решения, удалённые из БД → файлы НЕ удаляются (безопасно) + +### 3.2 Obsidian чекбоксы → Tasks (import) + +**Источник**: файлы `*.md` в `{vault_path}/{project_id}/tasks/` +Дополнительно: файлы `{vault_path}/{project_id}/*.md` + +**Формат строки задачи**: +``` +- [x] KIN-013 Title of the task +- [ ] KIN-014 Another task +``` + +**Алгоритм**: +1. Найти строки по паттерну: `^[-*]\s+\[([xX ])\]\s+([A-Z]+-\d+)\s+(.+)$` +2. Извлечь: `done` (bool), `task_id` (str), `title` (str) +3. Найти задачу в БД по `task_id` +4. Если `done=True` и `task.status != 'done'` → `update_task(conn, task_id, status='done')` +5. Если `done=False` → не трогать (не откатываем) +6. Если задача не найдена → пропустить (не создавать) + +**Обоснование**: строгий маппинг только по task_id исключает случайное создание мусора. + +### 3.3 Функция `sync_obsidian` + +```python +def sync_obsidian(conn, project_id: str) -> dict: + """ + Returns: + { + "exported_decisions": int, + "tasks_updated": int, + "errors": list[str], + "vault_path": str + } + """ +``` + +--- + +## 4. Модуль `core/obsidian_sync.py` + +### Публичный API модуля + +```python +def export_decisions_to_md( + project_id: str, + decisions: list[dict], + vault_path: Path, +) -> list[Path]: + """Экспортирует decisions в .md файлы Obsidian. Возвращает список созданных файлов.""" + +def parse_task_checkboxes( + vault_path: Path, + project_id: str, +) -> list[dict]: + """Парсит *.md файлы в vault/{project_id}/tasks/ на чекбоксы с task ID. + Returns: [{"task_id": "KIN-013", "done": True, "title": "..."}] + """ + +def sync_obsidian(conn, project_id: str) -> dict: + """Оркестратор: export + import. Возвращает статистику.""" +``` + +### Вспомогательные (приватные) + +```python +def _slug(title: str) -> str # slug для имени файла +def _decision_to_md(decision: dict) -> str # форматирует .md с frontmatter +def _parse_frontmatter(text: str) -> dict # для будущего round-trip +``` + +### Зависимости +- Только стандартная библиотека Python: `pathlib`, `re`, `yaml` (через `import yaml`) или ручной YAML-парсер +- Важно: PyYAML может не быть установлен → использовать простой ручной вывод YAML-фронтматтера, парсинг через `re` +- Импортирует из `core.models`: `get_project`, `get_decisions`, `get_task`, `update_task` +- Импортирует из `core.db`: `get_connection` — НЕ нужен, conn передаётся снаружи + +--- + +## 5. API-эндпоинты + +### 5.1 PATCH /api/projects/{project_id} — расширить + +Добавить в `ProjectPatch`: +```python +class ProjectPatch(BaseModel): + execution_mode: str | None = None + autocommit_enabled: bool | None = None + obsidian_vault_path: str | None = None # новое поле +``` + +Обновить обработчик: если `obsidian_vault_path` provided → `update_project(conn, id, obsidian_vault_path=...)` +Убрать проверку "Nothing to update" → включить `obsidian_vault_path` в условие. + +### 5.2 POST /api/projects/{project_id}/sync/obsidian — новый + +```python +@app.post("/api/projects/{project_id}/sync/obsidian") +def sync_obsidian_endpoint(project_id: str): + conn = get_conn() + p = models.get_project(conn, project_id) + if not p: + conn.close() + raise HTTPException(404, ...) + if not p.get("obsidian_vault_path"): + conn.close() + raise HTTPException(400, "obsidian_vault_path not set for this project") + from core.obsidian_sync import sync_obsidian + result = sync_obsidian(conn, project_id) + conn.close() + return result +``` + +--- + +## 6. Frontend: Settings страница + +### Маршрут +- Path: `/settings` +- Component: `web/frontend/src/views/SettingsView.vue` +- Регистрация в `main.ts` + +### Навигация в `App.vue` +Добавить ссылку `Settings` в хедер рядом с `Kin`. + +### SettingsView.vue — структура + +``` +Settings +├── Список проектов (v-for) +│ ├── Название + id +│ ├── Input: obsidian_vault_path (text input) +│ ├── [Save] кнопка → PATCH /api/projects/{id} +│ └── [Sync Obsidian] кнопка → POST /api/projects/{id}/sync/obsidian +│ └── Показывает результат: "Exported: 5 decisions, Updated: 2 tasks" +``` + +### api.ts — добавить методы + +```typescript +// Обновить настройки проекта +patchProject(id: string, data: { obsidian_vault_path?: string, execution_mode?: string, autocommit_enabled?: boolean }) + +// Запустить Obsidian sync +syncObsidian(projectId: string): Promise<{ exported_decisions: number, tasks_updated: number, errors: string[] }> +``` + +--- + +## 7. Тесты + +### `tests/test_obsidian_sync.py` + +Обязательные кейсы: +1. `test_export_decisions_creates_md_files` — export создаёт файлы с правильным frontmatter +2. `test_export_idempotent` — повторный export перезаписывает, не дублирует +3. `test_parse_task_checkboxes_done` — `- [x] KIN-001 Title` → `{"task_id": "KIN-001", "done": True}` +4. `test_parse_task_checkboxes_pending` — `- [ ] KIN-002 Title` → `done: False` +5. `test_parse_task_checkboxes_no_id` — строки без task ID пропускаются +6. `test_sync_updates_task_status` — sync обновляет статус задачи если `done=True` +7. `test_sync_no_vault_path` — sync без vault_path выбрасывает ValueError + +--- + +## 8. Риски и ограничения + +1. **PyYAML не в зависимостях** → использовать ручную генерацию YAML-строки для frontmatter, парсинг `re` +2. **Vault path может быть недоступен** → sync возвращает error в `errors[]`, не падает +3. **Конфликт при rename decision** → файл со старым slug остаётся, создаётся новый. Приемлемо для v1 +4. **Большой vault** → scan только в `{vault_path}/{project_id}/tasks/`, не весь vault +5. **Одновременный sync** → нет блокировки (SQLite WAL + file system). В v1 достаточно + +--- + +## 9. Порядок реализации (для dev-агента) + +1. `core/db.py` — добавить `obsidian_vault_path` в `_migrate()` +2. `core/obsidian_sync.py` — реализовать `export_decisions_to_md`, `parse_task_checkboxes`, `sync_obsidian` +3. `tests/test_obsidian_sync.py` — написать тесты (7 кейсов выше) +4. `web/api.py` — расширить `ProjectPatch`, добавить `/sync/obsidian` эндпоинт +5. `web/frontend/src/api.ts` — добавить `patchProject` обновление и `syncObsidian` +6. `web/frontend/src/views/SettingsView.vue` — создать компонент +7. `web/frontend/src/main.ts` — зарегистрировать `/settings` маршрут +8. `web/frontend/src/App.vue` — добавить ссылку Settings в nav diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..b4a1af6 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,27 @@ +"""Shared pytest fixtures for Kin test suite.""" + +import pytest +from unittest.mock import patch + + +@pytest.fixture(autouse=True) +def _set_kin_secret_key(monkeypatch): + """Set KIN_SECRET_KEY for all tests (required by _encrypt_auth/_decrypt_auth).""" + from cryptography.fernet import Fernet + monkeypatch.setenv("KIN_SECRET_KEY", Fernet.generate_key().decode()) + + +@pytest.fixture(autouse=True) +def _mock_check_claude_auth(): + """Авто-мок agents.runner.check_claude_auth для всех тестов. + + run_pipeline() вызывает check_claude_auth() перед запуском агентов. + Без мока тесты, использующие side_effect-очереди для subprocess.run, + ломаются: первый вызов (auth-check) потребляет элемент очереди. + + Тесты TestCheckClaudeAuth (test_runner.py) НЕ затрагиваются: + они вызывают check_claude_auth через напрямую импортированную ссылку + (bound at module load time), а не через agents.runner.check_claude_auth. + """ + with patch("agents.runner.check_claude_auth"): + yield diff --git a/tests/test_api.py b/tests/test_api.py index 3109486..626b9a1 100644 --- a/tests/test_api.py +++ b/tests/test_api.py @@ -2,11 +2,13 @@ import pytest from pathlib import Path +from unittest.mock import patch, MagicMock from fastapi.testclient import TestClient # Patch DB_PATH before importing app import web.api as api_module + @pytest.fixture def client(tmp_path): db_path = tmp_path / "test.db" @@ -142,6 +144,45 @@ def test_reject_not_found(client): assert r.status_code == 404 +def test_revise_task(client): + from core.db import init_db + from core import models + conn = init_db(api_module.DB_PATH) + models.update_task(conn, "P1-001", status="review") + conn.close() + + r = client.post("/api/tasks/P1-001/revise", json={ + "comment": "Доисследуй edge case с пустым массивом" + }) + assert r.status_code == 200 + assert r.json()["status"] == "in_progress" + + # Verify task is in_progress with revise_comment stored + conn = init_db(api_module.DB_PATH) + row = conn.execute("SELECT status, revise_comment FROM tasks WHERE id = 'P1-001'").fetchone() + conn.close() + assert row["status"] == "in_progress" + assert row["revise_comment"] == "Доисследуй edge case с пустым массивом" + + +def test_revise_not_found(client): + r = client.post("/api/tasks/NOPE/revise", json={"comment": "fix it"}) + assert r.status_code == 404 + + +def test_revise_task_response_includes_comment(client): + """Ответ /revise содержит поле comment с переданным текстом.""" + r = client.post("/api/tasks/P1-001/revise", json={"comment": "Уточни требования"}) + assert r.status_code == 200 + assert r.json()["comment"] == "Уточни требования" + + +def test_revise_task_missing_comment_returns_422(client): + """Запрос /revise без поля comment → 422 Unprocessable Entity (Pydantic validation).""" + r = client.post("/api/tasks/P1-001/revise", json={}) + assert r.status_code == 422 + + def test_task_pipeline_not_found(client): r = client.get("/api/tasks/NOPE/pipeline") assert r.status_code == 404 @@ -185,6 +226,30 @@ def test_run_not_found(client): assert r.status_code == 404 +def test_run_returns_503_when_claude_not_authenticated(client): + """KIN-083: /run возвращает 503 с claude_auth_required если claude не залогинен.""" + from agents.runner import ClaudeAuthError + with patch("agents.runner.check_claude_auth", side_effect=ClaudeAuthError("Claude CLI requires login. Run: claude login")): + r = client.post("/api/tasks/P1-001/run") + assert r.status_code == 503 + body = r.json() + assert body["detail"]["error"] == "claude_auth_required" + assert body["detail"]["instructions"] == "Run: claude login" + assert "login" in body["detail"]["message"].lower() + + +def test_start_phase_returns_503_when_claude_not_authenticated(client): + """KIN-083: /phases/start возвращает 503 с claude_auth_required если claude не залогинен.""" + from agents.runner import ClaudeAuthError + with patch("agents.runner.check_claude_auth", side_effect=ClaudeAuthError("Claude CLI requires login. Run: claude login")): + r = client.post("/api/projects/p1/phases/start") + assert r.status_code == 503 + body = r.json() + assert body["detail"]["error"] == "claude_auth_required" + assert body["detail"]["instructions"] == "Run: claude login" + assert "login" in body["detail"]["message"].lower() + + def test_run_kin_038_without_allow_write(client): """Регрессионный тест KIN-038: allow_write удалён из схемы, эндпоинт принимает запросы с пустым телом без этого параметра.""" @@ -342,6 +407,26 @@ def test_patch_task_empty_body_returns_400(client): assert r.status_code == 400 +def test_patch_task_execution_mode_auto_complete_accepted(client): + """KIN-063: execution_mode='auto_complete' принимается (200).""" + r = client.patch("/api/tasks/P1-001", json={"execution_mode": "auto_complete"}) + assert r.status_code == 200 + assert r.json()["execution_mode"] == "auto_complete" + + +def test_patch_task_execution_mode_auto_rejected(client): + """KIN-063: старое значение 'auto' должно отклоняться (400) — Decision #29.""" + r = client.patch("/api/tasks/P1-001", json={"execution_mode": "auto"}) + assert r.status_code == 400 + + +def test_patch_task_execution_mode_review_accepted(client): + """KIN-074: execution_mode='review' принимается (200) — регрессия после фикса frontend.""" + r = client.patch("/api/tasks/P1-001", json={"execution_mode": "review"}) + assert r.status_code == 200 + assert r.json()["execution_mode"] == "review" + + # --------------------------------------------------------------------------- # KIN-022 — blocked_reason: регрессионные тесты # --------------------------------------------------------------------------- @@ -589,3 +674,1451 @@ def test_run_kin_040_allow_write_true_ignored(client): Эндпоинт не имеет body-параметра, поэтому FastAPI не валидирует тело.""" r = client.post("/api/tasks/P1-001/run", json={"allow_write": True}) assert r.status_code == 202 + + +# --------------------------------------------------------------------------- +# KIN-058 — регрессионный тест: stderr=DEVNULL у Popen в web API +# --------------------------------------------------------------------------- + +# --------------------------------------------------------------------------- +# KIN-020 — manual_escalation задачи: PATCH status='done' резолвит задачу +# --------------------------------------------------------------------------- + +def test_patch_manual_escalation_task_to_done(client): + """PATCH status='done' на manual_escalation задаче — статус обновляется — KIN-020.""" + from core.db import init_db + from core import models + conn = init_db(api_module.DB_PATH) + models.create_task(conn, "P1-002", "p1", "Fix .dockerignore manually", + brief={"task_type": "manual_escalation", + "source": "followup:P1-001", + "description": "Ручное применение .dockerignore"}) + conn.close() + + r = client.patch("/api/tasks/P1-002", json={"status": "done"}) + assert r.status_code == 200 + assert r.json()["status"] == "done" + + +def test_manual_escalation_task_brief_preserved_after_patch(client): + """PATCH не затирает brief.task_type — поле manual_escalation сохраняется — KIN-020.""" + from core.db import init_db + from core import models + conn = init_db(api_module.DB_PATH) + models.create_task(conn, "P1-002", "p1", "Fix manually", + brief={"task_type": "manual_escalation", + "source": "followup:P1-001"}) + conn.close() + + client.patch("/api/tasks/P1-002", json={"status": "done"}) + r = client.get("/api/tasks/P1-002") + assert r.status_code == 200 + assert r.json()["brief"]["task_type"] == "manual_escalation" + + +def test_run_sets_stderr_devnull(client): + """Регрессионный тест KIN-058: stderr=DEVNULL всегда устанавливается в Popen, + чтобы stderr дочернего процесса не загрязнял логи uvicorn.""" + import subprocess as _subprocess + from unittest.mock import patch, MagicMock + with patch("web.api.subprocess.Popen") as mock_popen: + mock_proc = MagicMock() + mock_proc.pid = 77 + mock_popen.return_value = mock_proc + + r = client.post("/api/tasks/P1-001/run") + assert r.status_code == 202 + + call_kwargs = mock_popen.call_args[1] + assert call_kwargs.get("stderr") == _subprocess.DEVNULL, ( + "Регрессия KIN-058: stderr у Popen должен быть DEVNULL, " + "иначе вывод агента попадает в логи uvicorn" + ) + + +# --------------------------------------------------------------------------- +# KIN-065 — PATCH /api/projects/{id} — autocommit_enabled toggle +# --------------------------------------------------------------------------- + +def test_patch_project_autocommit_enabled_true(client): + """PATCH с autocommit_enabled=true → 200, поле установлено в 1.""" + r = client.patch("/api/projects/p1", json={"autocommit_enabled": True}) + assert r.status_code == 200 + assert r.json()["autocommit_enabled"] == 1 + + +def test_patch_project_autocommit_enabled_false(client): + """После включения PATCH с autocommit_enabled=false → 200, поле установлено в 0.""" + client.patch("/api/projects/p1", json={"autocommit_enabled": True}) + r = client.patch("/api/projects/p1", json={"autocommit_enabled": False}) + assert r.status_code == 200 + assert r.json()["autocommit_enabled"] == 0 + + +def test_patch_project_autocommit_persisted_via_sql(client): + """После PATCH autocommit_enabled=True прямой SQL подтверждает значение 1.""" + client.patch("/api/projects/p1", json={"autocommit_enabled": True}) + + from core.db import init_db + conn = init_db(api_module.DB_PATH) + row = conn.execute("SELECT autocommit_enabled FROM projects WHERE id = 'p1'").fetchone() + conn.close() + assert row is not None + assert row[0] == 1 + + +def test_patch_project_autocommit_false_persisted_via_sql(client): + """После PATCH autocommit_enabled=False прямой SQL подтверждает значение 0.""" + client.patch("/api/projects/p1", json={"autocommit_enabled": True}) + client.patch("/api/projects/p1", json={"autocommit_enabled": False}) + + from core.db import init_db + conn = init_db(api_module.DB_PATH) + row = conn.execute("SELECT autocommit_enabled FROM projects WHERE id = 'p1'").fetchone() + conn.close() + assert row is not None + assert row[0] == 0 + + +def test_patch_project_autocommit_null_before_first_update(client): + """Новый проект имеет autocommit_enabled=NULL/0 (falsy) до первого обновления.""" + client.post("/api/projects", json={"id": "p_new", "name": "New", "path": "/new"}) + + from core.db import init_db + conn = init_db(api_module.DB_PATH) + row = conn.execute("SELECT autocommit_enabled FROM projects WHERE id = 'p_new'").fetchone() + conn.close() + assert row is not None + assert not row[0] # DEFAULT 0 или NULL — в любом случае falsy + + +def test_patch_project_empty_body_returns_400(client): + """PATCH проекта без полей → 400.""" + r = client.patch("/api/projects/p1", json={}) + assert r.status_code == 400 + + +def test_patch_project_not_found(client): + """PATCH несуществующего проекта → 404.""" + r = client.patch("/api/projects/NOPE", json={"autocommit_enabled": True}) + assert r.status_code == 404 + + +def test_patch_project_autocommit_and_execution_mode_together(client): + """PATCH с autocommit_enabled и execution_mode → оба поля обновлены.""" + r = client.patch("/api/projects/p1", json={ + "autocommit_enabled": True, + "execution_mode": "auto_complete", + }) + assert r.status_code == 200 + data = r.json() + assert data["autocommit_enabled"] == 1 + assert data["execution_mode"] == "auto_complete" + + +def test_patch_project_returns_full_project_object(client): + """PATCH возвращает полный объект проекта с id, name и autocommit_enabled.""" + r = client.patch("/api/projects/p1", json={"autocommit_enabled": True}) + assert r.status_code == 200 + data = r.json() + assert data["id"] == "p1" + assert data["name"] == "P1" + assert "autocommit_enabled" in data + +# --------------------------------------------------------------------------- +# KIN-008 — PATCH priority и route_type задачи +# --------------------------------------------------------------------------- + +def test_patch_task_priority(client): + """PATCH priority задачи обновляет поле и возвращает задачу.""" + r = client.patch("/api/tasks/P1-001", json={"priority": 3}) + assert r.status_code == 200 + assert r.json()["priority"] == 3 + + +def test_patch_task_priority_persisted(client): + """После PATCH priority повторный GET возвращает новое значение.""" + client.patch("/api/tasks/P1-001", json={"priority": 7}) + r = client.get("/api/tasks/P1-001") + assert r.status_code == 200 + assert r.json()["priority"] == 7 + + +def test_patch_task_priority_invalid_zero(client): + """PATCH с priority=0 → 400.""" + r = client.patch("/api/tasks/P1-001", json={"priority": 0}) + assert r.status_code == 400 + + +def test_patch_task_priority_invalid_eleven(client): + """PATCH с priority=11 → 400.""" + r = client.patch("/api/tasks/P1-001", json={"priority": 11}) + assert r.status_code == 400 + + +def test_patch_task_route_type_set(client): + """PATCH route_type сохраняет значение в brief.""" + r = client.patch("/api/tasks/P1-001", json={"route_type": "feature"}) + assert r.status_code == 200 + data = r.json() + assert data["brief"]["route_type"] == "feature" + + +def test_patch_task_route_type_all_valid(client): + """Все допустимые route_type принимаются.""" + for rt in ("debug", "feature", "refactor", "hotfix"): + r = client.patch("/api/tasks/P1-001", json={"route_type": rt}) + assert r.status_code == 200, f"route_type={rt} rejected" + assert r.json()["brief"]["route_type"] == rt + + +def test_patch_task_route_type_invalid(client): + """Недопустимый route_type → 400.""" + r = client.patch("/api/tasks/P1-001", json={"route_type": "unknown"}) + assert r.status_code == 400 + + +def test_patch_task_route_type_clear(client): + """PATCH route_type='' очищает поле из brief.""" + client.patch("/api/tasks/P1-001", json={"route_type": "debug"}) + r = client.patch("/api/tasks/P1-001", json={"route_type": ""}) + assert r.status_code == 200 + data = r.json() + brief = data.get("brief") + if brief: + assert "route_type" not in brief + + +def test_patch_task_route_type_merges_brief(client): + """route_type сохраняется вместе с другими полями brief без перезаписи.""" + from core.db import init_db + from core import models + conn = init_db(api_module.DB_PATH) + models.update_task(conn, "P1-001", brief={"extra": "data"}) + conn.close() + + r = client.patch("/api/tasks/P1-001", json={"route_type": "hotfix"}) + assert r.status_code == 200 + brief = r.json()["brief"] + assert brief["route_type"] == "hotfix" + assert brief["extra"] == "data" + + +def test_patch_task_priority_and_route_type_together(client): + """PATCH может обновить priority и route_type одновременно.""" + r = client.patch("/api/tasks/P1-001", json={"priority": 2, "route_type": "refactor"}) + assert r.status_code == 200 + data = r.json() + assert data["priority"] == 2 + assert data["brief"]["route_type"] == "refactor" + + +def test_patch_task_empty_body_still_returns_400(client): + """Пустое тело по-прежнему возвращает 400 (регрессия KIN-008).""" + r = client.patch("/api/tasks/P1-001", json={}) + assert r.status_code == 400 + + +# PATCH /api/tasks/{id} — редактирование title и brief_text (KIN-015) + +def test_patch_task_title(client): + """PATCH title обновляет заголовок задачи.""" + r = client.patch("/api/tasks/P1-001", json={"title": "Новый заголовок"}) + assert r.status_code == 200 + assert r.json()["title"] == "Новый заголовок" + + +def test_patch_task_title_persisted(client): + """PATCH title сохраняется в БД.""" + client.patch("/api/tasks/P1-001", json={"title": "Персистентный заголовок"}) + r = client.get("/api/tasks/P1-001") + assert r.json()["title"] == "Персистентный заголовок" + + +def test_patch_task_title_empty_returns_400(client): + """Пустой title → 400.""" + r = client.patch("/api/tasks/P1-001", json={"title": " "}) + assert r.status_code == 400 + + +def test_patch_task_brief_text(client): + """PATCH brief_text сохраняется в brief.text.""" + r = client.patch("/api/tasks/P1-001", json={"brief_text": "Описание задачи"}) + assert r.status_code == 200 + assert r.json()["brief"]["text"] == "Описание задачи" + + +def test_patch_task_brief_text_persisted(client): + """PATCH brief_text сохраняется в БД.""" + client.patch("/api/tasks/P1-001", json={"brief_text": "Сохранённое описание"}) + r = client.get("/api/tasks/P1-001") + assert r.json()["brief"]["text"] == "Сохранённое описание" + + +def test_patch_task_brief_text_merges_route_type(client): + """brief_text не перезаписывает route_type в brief.""" + client.patch("/api/tasks/P1-001", json={"route_type": "feature"}) + client.patch("/api/tasks/P1-001", json={"brief_text": "Описание"}) + r = client.get("/api/tasks/P1-001") + brief = r.json()["brief"] + assert brief["text"] == "Описание" + assert brief["route_type"] == "feature" + + +def test_patch_task_title_and_brief_text_together(client): + """PATCH может обновить title и brief_text одновременно.""" + r = client.patch("/api/tasks/P1-001", json={"title": "Совместное", "brief_text": "и описание"}) + assert r.status_code == 200 + data = r.json() + assert data["title"] == "Совместное" + assert data["brief"]["text"] == "и описание" + + +# --------------------------------------------------------------------------- +# KIN-049 — Deploy: миграция, PATCH deploy_command, POST /deploy +# --------------------------------------------------------------------------- + +def test_deploy_command_column_exists_in_schema(client): + """Миграция: PRAGMA table_info(projects) подтверждает наличие deploy_command (decision #74).""" + from core.db import init_db + conn = init_db(api_module.DB_PATH) + cols = {row[1] for row in conn.execute("PRAGMA table_info(projects)").fetchall()} + conn.close() + assert "deploy_command" in cols + + +def test_patch_project_deploy_command_persisted_via_sql(client): + """PATCH с deploy_command сохраняется в БД — прямой SQL (decision #55).""" + client.patch("/api/projects/p1", json={"deploy_command": "echo hello"}) + + from core.db import init_db + conn = init_db(api_module.DB_PATH) + row = conn.execute("SELECT deploy_command FROM projects WHERE id = 'p1'").fetchone() + conn.close() + assert row is not None + assert row[0] == "echo hello" + + +def test_patch_project_deploy_command_returned_in_response(client): + """После PATCH ответ содержит обновлённый deploy_command.""" + r = client.patch("/api/projects/p1", json={"deploy_command": "git push origin main"}) + assert r.status_code == 200 + assert r.json()["deploy_command"] == "git push origin main" + + +def test_patch_project_deploy_command_empty_string_clears_to_null(client): + """PATCH с пустой строкой очищает deploy_command → NULL (decision #68).""" + client.patch("/api/projects/p1", json={"deploy_command": "echo hello"}) + client.patch("/api/projects/p1", json={"deploy_command": ""}) + + from core.db import init_db + conn = init_db(api_module.DB_PATH) + row = conn.execute("SELECT deploy_command FROM projects WHERE id = 'p1'").fetchone() + conn.close() + assert row[0] is None + + +def test_deploy_project_executes_command_returns_stdout(client): + """POST /deploy — команда echo → stdout присутствует в ответе.""" + from unittest.mock import patch, MagicMock + + client.patch("/api/projects/p1", json={"deploy_command": "echo deployed"}) + + mock_result = MagicMock() + mock_result.returncode = 0 + mock_result.stdout = "deployed\n" + mock_result.stderr = "" + + with patch("web.api.subprocess.run", return_value=mock_result): + r = client.post("/api/projects/p1/deploy") + + assert r.status_code == 200 + data = r.json() + assert data["success"] is True + assert data["exit_code"] == 0 + assert "deployed" in data["stdout"] + assert "duration_seconds" in data + + +def test_deploy_project_without_deploy_command_returns_400(client): + """POST /deploy для проекта без deploy_command → 400.""" + r = client.post("/api/projects/p1/deploy") + assert r.status_code == 400 + + +def test_deploy_project_not_found_returns_404(client): + """POST /deploy для несуществующего проекта → 404.""" + r = client.post("/api/projects/NOPE/deploy") + assert r.status_code == 404 + + +def test_deploy_project_failed_command_returns_success_false(client): + """POST /deploy — ненулевой exit_code → success=False (команда выполнилась, но упала).""" + from unittest.mock import patch, MagicMock + + client.patch("/api/projects/p1", json={"deploy_command": "exit 1"}) + + mock_result = MagicMock() + mock_result.returncode = 1 + mock_result.stdout = "" + mock_result.stderr = "error occurred" + + with patch("web.api.subprocess.run", return_value=mock_result): + r = client.post("/api/projects/p1/deploy") + + assert r.status_code == 200 + data = r.json() + assert data["success"] is False + assert data["exit_code"] == 1 + assert "error occurred" in data["stderr"] + + +def test_deploy_project_timeout_returns_504(client): + """POST /deploy — timeout → 504.""" + from unittest.mock import patch + import subprocess + + client.patch("/api/projects/p1", json={"deploy_command": "sleep 100"}) + + with patch("web.api.subprocess.run", side_effect=subprocess.TimeoutExpired("sleep 100", 60)): + r = client.post("/api/projects/p1/deploy") + + assert r.status_code == 504 + + +def test_task_full_includes_project_deploy_command(client): + """GET /api/tasks/{id}/full включает project_deploy_command из таблицы projects.""" + client.patch("/api/projects/p1", json={"deploy_command": "git push"}) + + r = client.get("/api/tasks/P1-001/full") + assert r.status_code == 200 + data = r.json() + assert "project_deploy_command" in data + assert data["project_deploy_command"] == "git push" + + +def test_task_full_project_deploy_command_none_when_not_set(client): + """GET /api/tasks/{id}/full возвращает project_deploy_command=None когда не задана.""" + r = client.get("/api/tasks/P1-001/full") + assert r.status_code == 200 + data = r.json() + assert "project_deploy_command" in data + assert data["project_deploy_command"] is None + + +# --------------------------------------------------------------------------- +# KIN-067 — PATCH obsidian_vault_path + sync/obsidian не возвращает 400 +# --------------------------------------------------------------------------- + +def test_patch_project_obsidian_vault_path_persisted_via_sql(client): + """PATCH с obsidian_vault_path сохраняется в БД — прямой SQL.""" + r = client.patch("/api/projects/p1", json={"obsidian_vault_path": "/tmp/vault"}) + assert r.status_code == 200 + + from core.db import init_db + conn = init_db(api_module.DB_PATH) + row = conn.execute("SELECT obsidian_vault_path FROM projects WHERE id = 'p1'").fetchone() + conn.close() + assert row is not None + assert row[0] == "/tmp/vault" + + +def test_patch_project_obsidian_vault_path_returned_in_response(client): + """PATCH возвращает обновлённый obsidian_vault_path в ответе.""" + r = client.patch("/api/projects/p1", json={"obsidian_vault_path": "/my/vault"}) + assert r.status_code == 200 + assert r.json()["obsidian_vault_path"] == "/my/vault" + + +def test_sync_obsidian_without_vault_path_returns_400(client): + """POST sync/obsidian без сохранённого vault_path → 400 Bad Request.""" + r = client.post("/api/projects/p1/sync/obsidian") + assert r.status_code == 400 + + +def test_sync_obsidian_after_patch_vault_path_not_400(client, tmp_path): + """Сценарий бага KIN-067: сначала PATCH vault_path, затем sync → не 400. + + Раньше runSync() вызывал sync/obsidian без предварительного сохранения пути, + что приводило к 400. После фикса PATCH вызывается первым. + """ + vault = tmp_path / "vault" + vault.mkdir() + + # Шаг 1: сохранить vault_path через PATCH (как теперь делает runSync) + r = client.patch("/api/projects/p1", json={"obsidian_vault_path": str(vault)}) + assert r.status_code == 200 + + # Шаг 2: запустить синхронизацию — не должно вернуть 400 + r = client.post("/api/projects/p1/sync/obsidian") + assert r.status_code != 400, f"Ожидался не 400, получен {r.status_code}: {r.text}" + assert r.status_code == 200 + + +def test_sync_obsidian_after_patch_returns_sync_result_fields(client, tmp_path): + """После PATCH vault_path синхронизация возвращает поля exported_decisions и tasks_updated.""" + vault = tmp_path / "vault" + vault.mkdir() + + client.patch("/api/projects/p1", json={"obsidian_vault_path": str(vault)}) + r = client.post("/api/projects/p1/sync/obsidian") + assert r.status_code == 200 + data = r.json() + assert "exported_decisions" in data + + +# --------------------------------------------------------------------------- +# KIN-016 — GET /api/notifications — эскалации от заблокированных агентов +# --------------------------------------------------------------------------- + +def test_kin016_notifications_empty_when_no_blocked_tasks(client): + """KIN-016: GET /api/notifications возвращает [] когда нет заблокированных задач.""" + r = client.get("/api/notifications") + assert r.status_code == 200 + assert r.json() == [] + + +def test_kin016_notifications_returns_blocked_task_as_escalation(client): + """KIN-016: заблокированная задача появляется в /api/notifications с корректными полями.""" + from core.db import init_db + from core import models + conn = init_db(api_module.DB_PATH) + models.update_task( + conn, "P1-001", + status="blocked", + blocked_reason="cannot access external API", + blocked_at="2026-03-16T10:00:00", + blocked_agent_role="debugger", + blocked_pipeline_step="1", + ) + conn.close() + + r = client.get("/api/notifications") + assert r.status_code == 200 + items = r.json() + assert len(items) == 1 + + item = items[0] + assert item["task_id"] == "P1-001" + assert item["agent_role"] == "debugger" + assert item["reason"] == "cannot access external API" + assert item["pipeline_step"] == "1" + assert item["blocked_at"] == "2026-03-16T10:00:00" + + +def test_kin016_notifications_contains_project_id_and_title(client): + """KIN-016: уведомление содержит project_id и title задачи.""" + from core.db import init_db + from core import models + conn = init_db(api_module.DB_PATH) + models.update_task(conn, "P1-001", status="blocked", + blocked_reason="out of scope", + blocked_agent_role="architect") + conn.close() + + r = client.get("/api/notifications") + assert r.status_code == 200 + item = r.json()[0] + assert item["project_id"] == "p1" + assert item["title"] == "Fix bug" + + +def test_kin016_notifications_filters_by_project_id(client): + """KIN-016: ?project_id= фильтрует уведомления по проекту.""" + from core.db import init_db + from core import models + conn = init_db(api_module.DB_PATH) + # Создаём второй проект с заблокированной задачей + models.create_project(conn, "p2", "P2", "/p2") + models.create_task(conn, "P2-001", "p2", "Another task") + models.update_task(conn, "P1-001", status="blocked", + blocked_reason="reason A", blocked_agent_role="debugger") + models.update_task(conn, "P2-001", status="blocked", + blocked_reason="reason B", blocked_agent_role="tester") + conn.close() + + r = client.get("/api/notifications?project_id=p1") + assert r.status_code == 200 + items = r.json() + assert all(i["project_id"] == "p1" for i in items) + assert len(items) == 1 + assert items[0]["task_id"] == "P1-001" + + +def test_kin016_notifications_only_returns_blocked_status(client): + """KIN-016: задачи в статусе pending/review/done НЕ попадают в уведомления.""" + from core.db import init_db + from core import models + conn = init_db(api_module.DB_PATH) + # Задача остаётся в pending (дефолт) + assert models.get_task(conn, "P1-001")["status"] == "pending" + conn.close() + + r = client.get("/api/notifications") + assert r.status_code == 200 + assert r.json() == [] + + +def test_kin016_pipeline_blocked_agent_stops_next_steps_integration(client): + """KIN-016: после blocked пайплайна задача блокируется, /api/notifications показывает её. + + Интеграционный тест: pipeline → blocked → /api/notifications содержит task. + """ + import json + from unittest.mock import patch, MagicMock + + blocked_output = json.dumps({ + "result": json.dumps({"status": "blocked", "reason": "no repo access"}), + }) + mock_proc = MagicMock() + mock_proc.pid = 123 + + with patch("web.api.subprocess.Popen") as mock_popen: + mock_popen.return_value = mock_proc + r = client.post("/api/tasks/P1-001/run") + assert r.status_code == 202 + + # Вручную помечаем задачу blocked (имитируем результат пайплайна) + from core.db import init_db + from core import models + conn = init_db(api_module.DB_PATH) + models.update_task( + conn, "P1-001", + status="blocked", + blocked_reason="no repo access", + blocked_agent_role="debugger", + blocked_pipeline_step="1", + ) + conn.close() + + r = client.get("/api/notifications") + assert r.status_code == 200 + items = r.json() + assert len(items) == 1 + assert items[0]["task_id"] == "P1-001" + assert items[0]["reason"] == "no repo access" + assert items[0]["agent_role"] == "debugger" + + +# --------------------------------------------------------------------------- +# KIN-BIZ-001 — telegram_sent из БД (не заглушка) +# --------------------------------------------------------------------------- + +def test_kin_biz_001_telegram_sent_not_stub(client): + """Регрессия KIN-BIZ-001: /api/notifications возвращает реальный telegram_sent из БД, не False-заглушку.""" + from core.db import init_db + from core import models + conn = init_db(api_module.DB_PATH) + models.update_task( + conn, "P1-001", + status="blocked", + blocked_reason="cannot access repo", + blocked_agent_role="debugger", + ) + models.mark_telegram_sent(conn, "P1-001") + conn.close() + + r = client.get("/api/notifications") + assert r.status_code == 200 + items = r.json() + assert len(items) == 1 + # Ключевая проверка: telegram_sent должен быть True из БД, не False-заглушка + assert items[0]["telegram_sent"] is True + + +def test_kin_biz_001_notifications_telegram_sent_false_when_not_sent(client): + """KIN-BIZ-001: telegram_sent=False для задачи, где уведомление не отправлялось.""" + from core.db import init_db + from core import models + conn = init_db(api_module.DB_PATH) + models.update_task( + conn, "P1-001", + status="blocked", + blocked_reason="no access", + blocked_agent_role="tester", + ) + # Не вызываем mark_telegram_sent → telegram_sent остаётся 0 + conn.close() + + r = client.get("/api/notifications") + assert r.status_code == 200 + items = r.json() + assert len(items) == 1 + assert items[0]["telegram_sent"] is False + + +def test_kin_biz_001_telegram_sent_distinguishes_sent_and_not_sent(client): + """KIN-BIZ-001: список уведомлений корректно различает sent/not-sent задачи.""" + from core.db import init_db + from core import models + conn = init_db(api_module.DB_PATH) + models.create_task(conn, "P1-002", "p1", "Another task") + models.update_task( + conn, "P1-001", + status="blocked", + blocked_reason="reason 1", + blocked_agent_role="debugger", + ) + models.update_task( + conn, "P1-002", + status="blocked", + blocked_reason="reason 2", + blocked_agent_role="tester", + ) + # Telegram отправлен только для P1-001 + models.mark_telegram_sent(conn, "P1-001") + conn.close() + + r = client.get("/api/notifications") + assert r.status_code == 200 + items = r.json() + assert len(items) == 2 + by_id = {item["task_id"]: item for item in items} + assert by_id["P1-001"]["telegram_sent"] is True + assert by_id["P1-002"]["telegram_sent"] is False + + +# --------------------------------------------------------------------------- +# KIN-071: project_type и SSH-поля в API +# --------------------------------------------------------------------------- + +def test_create_operations_project_with_ssh_fields(client): + """KIN-071: POST /api/projects с project_type=operations и SSH-полями возвращает 200.""" + r = client.post("/api/projects", json={ + "id": "srv1", + "name": "My Server", + "project_type": "operations", + "ssh_host": "10.0.0.1", + "ssh_user": "root", + "ssh_key_path": "~/.ssh/id_rsa", + "ssh_proxy_jump": "jumpt", + }) + assert r.status_code == 200 + data = r.json() + assert data["project_type"] == "operations" + assert data["path"] is None + assert data["ssh_host"] == "10.0.0.1" + assert data["ssh_user"] == "root" + assert data["ssh_key_path"] == "~/.ssh/id_rsa" + assert data["ssh_proxy_jump"] == "jumpt" + + +def test_create_project_invalid_type_returns_400(client): + """KIN-071: POST /api/projects с недопустимым project_type → 400.""" + r = client.post("/api/projects", json={ + "id": "bad", + "name": "Bad", + "path": "/bad", + "project_type": "legacy", + }) + assert r.status_code == 400 + + +def test_patch_project_invalid_type_returns_400(client): + """KIN-071: PATCH /api/projects/{id} с недопустимым project_type → 400.""" + r = client.patch("/api/projects/p1", json={"project_type": "invalid_type"}) + assert r.status_code == 400 + + +def test_create_operations_project_without_ssh_host_allowed(client): + """Регрессионный тест KIN-ARCH-001: воспроизводит СЛОМАННОЕ поведение до фикса. + + До фикса: POST operations-проекта без ssh_host возвращал 200. + После фикса: должен возвращать 422 (Pydantic model_validator). + Этот тест НАМЕРЕННО проверяет, что старое поведение больше не существует. + """ + r = client.post("/api/projects", json={ + "id": "srv2", + "name": "Server No SSH", + "project_type": "operations", + }) + # Фикс KIN-ARCH-001: был 200, стал 422 + assert r.status_code == 422, ( + "Регрессия KIN-ARCH-001: POST operations-проекта без ssh_host " + "должен возвращать 422, а не 200" + ) + + +# --------------------------------------------------------------------------- +# KIN-ARCH-001 — серверная валидация ssh_host для operations-проектов +# --------------------------------------------------------------------------- + +def test_kin_arch_001_operations_without_ssh_host_returns_422(client): + """Регрессионный тест KIN-ARCH-001: POST /api/projects с project_type='operations' + и без ssh_host → 422 Unprocessable Entity.""" + r = client.post("/api/projects", json={ + "id": "ops_no_ssh", + "name": "Ops Without SSH", + "project_type": "operations", + }) + assert r.status_code == 422 + + +def test_kin_arch_001_operations_with_empty_ssh_host_returns_422(client): + """Регрессионный тест KIN-ARCH-001: пустая строка в ssh_host считается отсутствующим + значением → 422.""" + r = client.post("/api/projects", json={ + "id": "ops_empty_ssh", + "name": "Ops Empty SSH", + "project_type": "operations", + "ssh_host": "", + }) + assert r.status_code == 422 + + +def test_kin_arch_001_operations_with_valid_ssh_host_returns_200(client): + """Регрессионный тест KIN-ARCH-001: POST /api/projects с project_type='operations' + и корректным ssh_host → 200, проект создаётся.""" + r = client.post("/api/projects", json={ + "id": "ops_with_ssh", + "name": "Ops With SSH", + "project_type": "operations", + "ssh_host": "10.0.0.42", + }) + assert r.status_code == 200 + data = r.json() + assert data["project_type"] == "operations" + assert data["path"] is None + assert data["ssh_host"] == "10.0.0.42" + + +def test_kin_arch_001_development_without_ssh_host_allowed(client): + """Регрессионный тест KIN-ARCH-001: project_type='development' без ssh_host + должен создаваться без ошибок — валидатор срабатывает только для operations.""" + r = client.post("/api/projects", json={ + "id": "dev_no_ssh", + "name": "Dev No SSH", + "path": "/dev", + "project_type": "development", + }) + assert r.status_code == 200 + assert r.json()["project_type"] == "development" + + +def test_kin_arch_001_research_without_ssh_host_allowed(client): + """Регрессионный тест KIN-ARCH-001: project_type='research' без ssh_host + должен создаваться без ошибок.""" + r = client.post("/api/projects", json={ + "id": "res_no_ssh", + "name": "Research No SSH", + "path": "/research", + "project_type": "research", + }) + assert r.status_code == 200 + assert r.json()["project_type"] == "research" + + +def test_kin_arch_001_422_error_message_mentions_ssh_host(client): + """Регрессионный тест KIN-ARCH-001: тело 422-ответа содержит сообщение об ошибке + с упоминанием ssh_host.""" + r = client.post("/api/projects", json={ + "id": "ops_err_msg", + "name": "Check Error Message", + "project_type": "operations", + }) + assert r.status_code == 422 + body = r.json() + # Pydantic возвращает detail со списком ошибок + detail_str = str(body) + assert "ssh_host" in detail_str + + +def test_create_research_project_type_accepted(client): + """KIN-071: project_type=research принимается API.""" + r = client.post("/api/projects", json={ + "id": "res1", + "name": "Research Project", + "path": "/research", + "project_type": "research", + }) + assert r.status_code == 200 + assert r.json()["project_type"] == "research" + + +# --------------------------------------------------------------------------- +# KIN-ARCH-003 — path nullable для operations-проектов +# Исправляет баг: workaround с пустой строкой ("") для operations-проектов +# --------------------------------------------------------------------------- + +def test_kin_arch_003_operations_project_without_path_returns_200(client): + """KIN-ARCH-003: POST /api/projects с project_type='operations' без path → 200. + + До фикса: path="" передавался как workaround для NOT NULL constraint. + После фикса: path не передаётся вовсе, сохраняется как NULL. + """ + r = client.post("/api/projects", json={ + "id": "ops_null_path", + "name": "Ops Null Path", + "project_type": "operations", + "ssh_host": "10.0.0.1", + }) + assert r.status_code == 200 + data = r.json() + assert data["path"] is None, ( + "KIN-ARCH-003 регрессия: path должен быть NULL, а не пустой строкой" + ) + + +def test_kin_arch_003_development_project_without_path_returns_422(client): + """KIN-ARCH-003: POST /api/projects с project_type='development' без path → 422. + + Pydantic validate_fields: path обязателен для non-operations проектов. + """ + r = client.post("/api/projects", json={ + "id": "dev_no_path", + "name": "Dev No Path", + "project_type": "development", + }) + assert r.status_code == 422 + + +def test_kin_arch_003_development_without_path_error_mentions_path(client): + """KIN-ARCH-003: тело 422-ответа содержит упоминание об обязательности path.""" + r = client.post("/api/projects", json={ + "id": "dev_no_path_msg", + "name": "Dev No Path Msg", + "project_type": "development", + }) + assert r.status_code == 422 + detail_str = str(r.json()) + assert "path" in detail_str + + +def test_kin_arch_003_deploy_operations_project_null_path_uses_cwd_none(client): + """KIN-ARCH-003: deploy_project для operations-проекта с path=NULL + не вызывает Path.exists() — передаёт cwd=None в subprocess.run.""" + from unittest.mock import patch, MagicMock + client.post("/api/projects", json={ + "id": "ops_deploy_null", + "name": "Ops Deploy Null Path", + "project_type": "operations", + "ssh_host": "10.0.0.1", + }) + client.patch("/api/projects/ops_deploy_null", json={"deploy_command": "echo ok"}) + + mock_result = MagicMock() + mock_result.returncode = 0 + mock_result.stdout = "ok\n" + mock_result.stderr = "" + + with patch("subprocess.run", return_value=mock_result) as mock_run: + r = client.post("/api/projects/ops_deploy_null/deploy") + + assert r.status_code == 200 + call_kwargs = mock_run.call_args.kwargs + assert call_kwargs.get("cwd") is None, ( + "KIN-ARCH-003: для operations-проектов без path, cwd должен быть None" + ) + + +# --------------------------------------------------------------------------- +# Bootstrap endpoint — KIN-081 +# --------------------------------------------------------------------------- + +@pytest.fixture +def bootstrap_client(tmp_path): + """TestClient без seed-данных, с отдельным DB_PATH.""" + db_path = tmp_path / "bs_test.db" + api_module.DB_PATH = db_path + from web.api import app + return TestClient(app), tmp_path + + +def test_bootstrap_endpoint_invalid_path_returns_400(bootstrap_client): + """KIN-081: bootstrap возвращает 400 если путь не существует.""" + client, _ = bootstrap_client + r = client.post("/api/bootstrap", json={ + "id": "newproj", "name": "New Project", "path": "/nonexistent/path/that/does/not/exist" + }) + assert r.status_code == 400 + assert "not a directory" in r.json()["detail"].lower() + + +def test_bootstrap_endpoint_duplicate_id_returns_409(bootstrap_client, tmp_path): + """KIN-081: bootstrap возвращает 409 если проект с таким ID уже существует.""" + client, _ = bootstrap_client + proj_dir = tmp_path / "myproj" + proj_dir.mkdir() + # Create project first + client.post("/api/projects", json={"id": "existing", "name": "Existing", "path": str(proj_dir)}) + # Try bootstrap with same ID + r = client.post("/api/bootstrap", json={ + "id": "existing", "name": "Same ID", "path": str(proj_dir) + }) + assert r.status_code == 409 + assert "already exists" in r.json()["detail"] + + +def test_bootstrap_endpoint_rollback_on_save_error(bootstrap_client, tmp_path): + """KIN-081: при ошибке в save_to_db проект удаляется (rollback), возвращается 500.""" + client, _ = bootstrap_client + proj_dir = tmp_path / "rollbackproj" + proj_dir.mkdir() + + from core.db import init_db + from core import models as _models + + def _save_create_then_fail(conn, project_id, name, path, *args, **kwargs): + # Simulate partial write: project row created, then error + _models.create_project(conn, project_id, name, path) + raise RuntimeError("simulated DB error after project created") + + with patch("web.api.save_to_db", side_effect=_save_create_then_fail): + r = client.post("/api/bootstrap", json={ + "id": "rollbackproj", "name": "Rollback Test", "path": str(proj_dir) + }) + + assert r.status_code == 500 + assert "Bootstrap failed" in r.json()["detail"] + + # Project must NOT remain in DB (rollback was executed) + conn = init_db(api_module.DB_PATH) + assert _models.get_project(conn, "rollbackproj") is None + conn.close() + + +def test_bootstrap_endpoint_success(bootstrap_client, tmp_path): + """KIN-081: успешный bootstrap возвращает 200 с project и counts.""" + client, _ = bootstrap_client + proj_dir = tmp_path / "goodproj" + proj_dir.mkdir() + (proj_dir / "requirements.txt").write_text("fastapi\n") + + with patch("web.api.find_vault_root", return_value=None): + r = client.post("/api/bootstrap", json={ + "id": "goodproj", "name": "Good Project", "path": str(proj_dir) + }) + + assert r.status_code == 200 + data = r.json() + assert data["project"]["id"] == "goodproj" + assert "modules_count" in data + assert "decisions_count" in data + + +def test_delete_project_ok(client): + # Create a separate project to delete + r = client.post("/api/projects", json={"id": "del1", "name": "Del1", "path": "/del1"}) + assert r.status_code == 200 + + r = client.delete("/api/projects/del1") + assert r.status_code == 204 + assert r.content == b"" + + # Verify project is gone + r = client.get("/api/projects/del1") + assert r.status_code == 404 + + +def test_delete_project_not_found(client): + r = client.delete("/api/projects/99999") + assert r.status_code == 404 + + +# --------------------------------------------------------------------------- +# Environments (KIN-087) +# --------------------------------------------------------------------------- + +def test_create_environment(client): + r = client.post("/api/projects/p1/environments", json={ + "name": "prod", + "host": "10.0.0.1", + "username": "pelmen", + "port": 22, + "auth_type": "password", + "auth_value": "s3cr3t", + "is_installed": False, + }) + assert r.status_code == 201 + data = r.json() + assert data["name"] == "prod" + assert data["host"] == "10.0.0.1" + assert data["username"] == "pelmen" + # auth_value must be hidden in responses + assert data.get("auth_value") is None + assert "scan_task_id" not in data + + +def test_create_environment_project_not_found(client): + r = client.post("/api/projects/nope/environments", json={ + "name": "prod", + "host": "10.0.0.1", + "username": "root", + }) + assert r.status_code == 404 + + +def test_create_environment_invalid_auth_type(client): + r = client.post("/api/projects/p1/environments", json={ + "name": "prod", + "host": "10.0.0.1", + "username": "root", + "auth_type": "oauth", + }) + assert r.status_code == 422 + + +def test_create_environment_invalid_port(client): + r = client.post("/api/projects/p1/environments", json={ + "name": "prod", + "host": "10.0.0.1", + "username": "root", + "port": 99999, + }) + assert r.status_code == 422 + + +def test_create_environment_triggers_scan_when_installed(client): + """is_installed=True на POST должен создать задачу sysadmin и вернуть scan_task_id.""" + with patch("subprocess.Popen") as mock_popen: + mock_popen.return_value = MagicMock(pid=12345) + r = client.post("/api/projects/p1/environments", json={ + "name": "prod", + "host": "10.0.0.2", + "username": "pelmen", + "is_installed": True, + }) + assert r.status_code == 201 + data = r.json() + assert "scan_task_id" in data + task_id = data["scan_task_id"] + # Verify the task exists with sysadmin role + from core.db import init_db + from core import models as m + conn = init_db(api_module.DB_PATH) + task = m.get_task(conn, task_id) + conn.close() + assert task is not None + assert task["assigned_role"] == "sysadmin" + assert task["category"] == "INFRA" + + +def test_list_environments(client): + client.post("/api/projects/p1/environments", json={ + "name": "dev", "host": "10.0.0.10", "username": "dev", + }) + client.post("/api/projects/p1/environments", json={ + "name": "prod", "host": "10.0.0.11", "username": "prod", + }) + r = client.get("/api/projects/p1/environments") + assert r.status_code == 200 + data = r.json() + assert len(data) == 2 + names = {e["name"] for e in data} + assert names == {"dev", "prod"} + + +def test_list_environments_project_not_found(client): + r = client.get("/api/projects/nope/environments") + assert r.status_code == 404 + + +def test_patch_environment(client): + r = client.post("/api/projects/p1/environments", json={ + "name": "dev", "host": "10.0.0.20", "username": "root", + }) + env_id = r.json()["id"] + + r = client.patch(f"/api/projects/p1/environments/{env_id}", json={ + "host": "10.0.0.99", + }) + assert r.status_code == 200 + assert r.json()["host"] == "10.0.0.99" + + +def test_patch_environment_triggers_scan_on_false_to_true(client): + """PATCH is_installed false→true должен запустить скан.""" + r = client.post("/api/projects/p1/environments", json={ + "name": "staging", "host": "10.0.0.30", "username": "root", "is_installed": False, + }) + env_id = r.json()["id"] + + with patch("subprocess.Popen") as mock_popen: + mock_popen.return_value = MagicMock(pid=22222) + r = client.patch(f"/api/projects/p1/environments/{env_id}", json={ + "is_installed": True, + }) + assert r.status_code == 200 + assert "scan_task_id" in r.json() + + +def test_patch_environment_no_duplicate_scan(client): + """Повторный PATCH is_installed=True (true→true) не создаёт новую задачу.""" + with patch("subprocess.Popen") as mock_popen: + mock_popen.return_value = MagicMock(pid=33333) + r = client.post("/api/projects/p1/environments", json={ + "name": "prod2", "host": "10.0.0.40", "username": "root", "is_installed": True, + }) + first_task_id = r.json().get("scan_task_id") + assert first_task_id is not None + env_id = r.json()["id"] + + # Second PATCH with host change — was_installed=True, so no scan triggered + with patch("subprocess.Popen") as mock_popen2: + mock_popen2.return_value = MagicMock(pid=44444) + r2 = client.patch(f"/api/projects/p1/environments/{env_id}", json={ + "host": "10.0.0.41", + }) + assert r2.status_code == 200 + assert "scan_task_id" not in r2.json() + + +def test_patch_environment_nothing_to_update(client): + r = client.post("/api/projects/p1/environments", json={ + "name": "dev", "host": "10.0.0.50", "username": "root", + }) + env_id = r.json()["id"] + r = client.patch(f"/api/projects/p1/environments/{env_id}", json={}) + assert r.status_code == 400 + + +def test_patch_environment_not_found(client): + r = client.patch("/api/projects/p1/environments/99999", json={"host": "1.2.3.4"}) + assert r.status_code == 404 + + +def test_delete_environment(client): + r = client.post("/api/projects/p1/environments", json={ + "name": "dev", "host": "10.0.0.60", "username": "root", + }) + env_id = r.json()["id"] + + r = client.delete(f"/api/projects/p1/environments/{env_id}") + assert r.status_code == 204 + + # Verify gone + r = client.get("/api/projects/p1/environments") + ids = [e["id"] for e in r.json()] + assert env_id not in ids + + +def test_delete_environment_not_found(client): + r = client.delete("/api/projects/p1/environments/99999") + assert r.status_code == 404 + + +def test_scan_environment(client): + r = client.post("/api/projects/p1/environments", json={ + "name": "prod", "host": "10.0.0.70", "username": "root", + }) + env_id = r.json()["id"] + + with patch("subprocess.Popen") as mock_popen: + mock_popen.return_value = MagicMock(pid=55555) + r = client.post(f"/api/projects/p1/environments/{env_id}/scan") + assert r.status_code == 202 + data = r.json() + assert data["status"] == "started" + assert "task_id" in data + + +def test_scan_environment_not_found(client): + r = client.post("/api/projects/p1/environments/99999/scan") + assert r.status_code == 404 + + +# --------------------------------------------------------------------------- +# Environments (KIN-087) — дополнительные тесты по acceptance criteria +# --------------------------------------------------------------------------- + +def test_list_environments_auth_value_hidden(): + """GET /environments не должен возвращать auth_value (AC: маскировка).""" + import web.api as api_module2 + from pathlib import Path + import tempfile + with tempfile.TemporaryDirectory() as tmp: + db_path = Path(tmp) / "t.db" + api_module2.DB_PATH = db_path + from web.api import app + from fastapi.testclient import TestClient + c = TestClient(app) + c.post("/api/projects", json={"id": "p2", "name": "P2", "path": "/p2"}) + c.post("/api/projects/p2/environments", json={ + "name": "prod", "host": "1.2.3.4", "username": "root", + "auth_type": "password", "auth_value": "supersecret", + }) + r = c.get("/api/projects/p2/environments") + assert r.status_code == 200 + for env in r.json(): + assert env.get("auth_value") is None + + +def test_patch_environment_auth_value_hidden(client): + """PATCH /environments/{id} не должен возвращать auth_value в ответе (AC: маскировка).""" + r = client.post("/api/projects/p1/environments", json={ + "name": "masked", "host": "5.5.5.5", "username": "user", + "auth_value": "topsecret", + }) + env_id = r.json()["id"] + r = client.patch(f"/api/projects/p1/environments/{env_id}", json={"host": "6.6.6.6"}) + assert r.status_code == 200 + assert r.json().get("auth_value") is None + + +def test_is_installed_flag_persisted(client): + """is_installed=True сохраняется и возвращается в GET-списке (AC: чекбокс работает).""" + with patch("subprocess.Popen") as mock_popen: + mock_popen.return_value = MagicMock(pid=99001) + r = client.post("/api/projects/p1/environments", json={ + "name": "installed_prod", "host": "7.7.7.7", "username": "admin", + "is_installed": True, + }) + assert r.status_code == 201 + env_id = r.json()["id"] + + r = client.get("/api/projects/p1/environments") + envs = {e["id"]: e for e in r.json()} + assert bool(envs[env_id]["is_installed"]) is True + + +def test_is_installed_false_not_installed(client): + """is_installed=False по умолчанию сохраняется корректно.""" + r = client.post("/api/projects/p1/environments", json={ + "name": "notinstalled", "host": "8.8.8.8", "username": "ops", + "is_installed": False, + }) + assert r.status_code == 201 + env_id = r.json()["id"] + + r = client.get("/api/projects/p1/environments") + envs = {e["id"]: e for e in r.json()} + assert not bool(envs[env_id]["is_installed"]) + + +def test_sysadmin_scan_task_has_escalation_in_brief(client): + """Задача sysadmin должна содержать инструкцию об эскалации при нехватке данных (AC#4).""" + with patch("subprocess.Popen") as mock_popen: + mock_popen.return_value = MagicMock(pid=99002) + r = client.post("/api/projects/p1/environments", json={ + "name": "esc_test", "host": "9.9.9.9", "username": "deploy", + "is_installed": True, + }) + task_id = r.json()["scan_task_id"] + + from core.db import init_db + from core import models as m + conn = init_db(api_module.DB_PATH) + task = m.get_task(conn, task_id) + conn.close() + + brief = task["brief"] + assert isinstance(brief, dict), "brief must be a dict" + text = brief.get("text", "") + assert "эскалация" in text.lower(), ( + "Sysadmin task brief must mention escalation to user when data is insufficient" + ) + + +def test_create_environment_key_auth_type(client): + """auth_type='key' должен быть принят и сохранён (AC: ключ SSH).""" + r = client.post("/api/projects/p1/environments", json={ + "name": "ssh_key_env", "host": "10.10.10.10", "username": "git", + "auth_type": "key", "auth_value": "-----BEGIN OPENSSH PRIVATE KEY-----", + }) + assert r.status_code == 201 + data = r.json() + assert data["auth_type"] == "key" + assert data.get("auth_value") is None + + +def test_create_environment_duplicate_name_conflict(client): + """Повторное создание среды с тем же именем в проекте → 409 Conflict.""" + client.post("/api/projects/p1/environments", json={ + "name": "unique_env", "host": "11.11.11.11", "username": "root", + }) + r = client.post("/api/projects/p1/environments", json={ + "name": "unique_env", "host": "22.22.22.22", "username": "root", + }) + assert r.status_code == 409 + + +def test_patch_environment_empty_auth_value_preserves_stored(client): + """PATCH с пустым auth_value не стирает сохранённый credential (AC: безопасность).""" + r = client.post("/api/projects/p1/environments", json={ + "name": "cred_safe", "host": "33.33.33.33", "username": "ops", + "auth_value": "original_password", + }) + env_id = r.json()["id"] + + # Patch без auth_value — credential должен сохраниться + r = client.patch(f"/api/projects/p1/environments/{env_id}", json={"host": "44.44.44.44"}) + assert r.status_code == 200 + + # Читаем raw запись из БД (get_environment возвращает obfuscated auth_value) + from core.db import init_db + from core import models as m + conn = init_db(api_module.DB_PATH) + raw = m.get_environment(conn, env_id) + conn.close() + + assert raw["auth_value"] is not None, "Stored credential must be preserved after PATCH without auth_value" + decrypted = m._decrypt_auth(raw["auth_value"]) + assert decrypted == "original_password", "Stored credential must be decryptable and match original" + + +# --------------------------------------------------------------------------- +# KIN-088 — POST /run возвращает 409 если задача уже in_progress +# --------------------------------------------------------------------------- + +def test_run_returns_409_when_task_already_in_progress(client): + """KIN-088: повторный POST /run для задачи со статусом in_progress → 409 с task_already_running.""" + from core.db import init_db + from core import models as m + conn = init_db(api_module.DB_PATH) + m.update_task(conn, "P1-001", status="in_progress") + conn.close() + + r = client.post("/api/tasks/P1-001/run") + assert r.status_code == 409 + assert r.json()["error"] == "task_already_running" + + +def test_run_409_error_key_is_task_already_running(client): + """KIN-088: тело ответа 409 содержит ключ error='task_already_running'.""" + from core.db import init_db + from core import models as m + conn = init_db(api_module.DB_PATH) + m.update_task(conn, "P1-001", status="in_progress") + conn.close() + + r = client.post("/api/tasks/P1-001/run") + body = r.json() + assert "error" in body + assert body["error"] == "task_already_running" + + +def test_run_second_call_does_not_change_status(client): + """KIN-088: при повторном /run задача остаётся in_progress, статус не сбрасывается.""" + from core.db import init_db + from core import models as m + conn = init_db(api_module.DB_PATH) + m.update_task(conn, "P1-001", status="in_progress") + conn.close() + + client.post("/api/tasks/P1-001/run") # второй вызов — должен вернуть 409 + + r = client.get("/api/tasks/P1-001") + assert r.json()["status"] == "in_progress" + + +def test_run_pending_task_still_returns_202(client): + """KIN-088: задача со статусом pending запускается без ошибки — 202.""" + r = client.post("/api/tasks/P1-001/run") + assert r.status_code == 202 + + +def test_run_kin085_parallel_different_tasks_not_blocked(client): + """KIN-085: /run для разных задач независимы — in_progress одной не блокирует другую.""" + # Создаём вторую задачу + client.post("/api/tasks", json={"project_id": "p1", "title": "Second task"}) + + # Ставим первую задачу в in_progress + from core.db import init_db + from core import models as m + conn = init_db(api_module.DB_PATH) + m.update_task(conn, "P1-001", status="in_progress") + conn.close() + + # Запуск второй задачи должен быть успешным + r = client.post("/api/tasks/P1-002/run") + assert r.status_code == 202 diff --git a/tests/test_api_attachments.py b/tests/test_api_attachments.py new file mode 100644 index 0000000..b98af9f --- /dev/null +++ b/tests/test_api_attachments.py @@ -0,0 +1,304 @@ +""" +KIN-090: Integration tests for task attachment API endpoints. + +Tests cover: + AC1 — upload saves file to {project_path}/.kin/attachments/{task_id}/ + AC3 — file available for download via GET /api/attachments/{id}/file + AC4 — data persists in SQLite + Integration: upload → list → verify agent context (build_context) +""" + +import io +import pytest +from pathlib import Path +from fastapi.testclient import TestClient + +import web.api as api_module + + +@pytest.fixture +def client(tmp_path): + """TestClient with isolated DB and a seeded project+task. + + Project path set to tmp_path so attachment dirs are created there + and cleaned up automatically. + """ + db_path = tmp_path / "test.db" + api_module.DB_PATH = db_path + + from web.api import app + c = TestClient(app) + + project_path = str(tmp_path / "myproject") + c.post("/api/projects", json={ + "id": "prj", + "name": "My Project", + "path": project_path, + }) + c.post("/api/tasks", json={"project_id": "prj", "title": "Fix login bug"}) + return c + + +def _png_bytes() -> bytes: + """Minimal valid 1x1 PNG image.""" + import base64 + # 1x1 red pixel PNG (base64-encoded) + data = ( + b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00\x00\x01" + b"\x08\x02\x00\x00\x00\x90wS\xde\x00\x00\x00\x0cIDATx\x9cc\xf8\x0f\x00" + b"\x00\x01\x01\x00\x05\x18\xd8N\x00\x00\x00\x00IEND\xaeB`\x82" + ) + return data + + +# --------------------------------------------------------------------------- +# Upload +# --------------------------------------------------------------------------- + +def test_upload_attachment_returns_201(client): + """KIN-090: POST /api/tasks/{id}/attachments возвращает 201 и данные вложения.""" + r = client.post( + "/api/tasks/PRJ-001/attachments", + files={"file": ("bug.png", io.BytesIO(_png_bytes()), "image/png")}, + ) + assert r.status_code == 201 + data = r.json() + assert data["task_id"] == "PRJ-001" + assert data["filename"] == "bug.png" + assert data["mime_type"] == "image/png" + assert data["size"] == len(_png_bytes()) + assert data["id"] is not None + + +def test_upload_attachment_saves_file_to_correct_path(client, tmp_path): + """KIN-090: AC1 — файл сохраняется в {project_path}/.kin/attachments/{task_id}/.""" + r = client.post( + "/api/tasks/PRJ-001/attachments", + files={"file": ("shot.png", io.BytesIO(_png_bytes()), "image/png")}, + ) + assert r.status_code == 201 + saved_path = Path(r.json()["path"]) + + # Path structure: /.kin/attachments/PRJ-001/shot.png + assert saved_path.name == "shot.png" + assert saved_path.parent.name == "PRJ-001" + assert saved_path.parent.parent.name == "attachments" + assert saved_path.parent.parent.parent.name == ".kin" + assert saved_path.exists() + + +def test_upload_attachment_file_content_matches(client): + """KIN-090: содержимое сохранённого файла совпадает с загруженным.""" + content = _png_bytes() + r = client.post( + "/api/tasks/PRJ-001/attachments", + files={"file": ("img.png", io.BytesIO(content), "image/png")}, + ) + assert r.status_code == 201 + saved_path = Path(r.json()["path"]) + assert saved_path.read_bytes() == content + + +def test_upload_attachment_persists_in_sqlite(client, tmp_path): + """KIN-090: AC4 — запись о вложении сохраняется в SQLite и доступна через list.""" + client.post( + "/api/tasks/PRJ-001/attachments", + files={"file": ("db_test.png", io.BytesIO(_png_bytes()), "image/png")}, + ) + # Verify via list endpoint (reads from DB) + r = client.get("/api/tasks/PRJ-001/attachments") + assert r.status_code == 200 + assert any(a["filename"] == "db_test.png" for a in r.json()) + + +def test_upload_attachment_task_not_found_returns_404(client): + """KIN-090: 404 если задача не существует.""" + r = client.post( + "/api/tasks/PRJ-999/attachments", + files={"file": ("x.png", io.BytesIO(_png_bytes()), "image/png")}, + ) + assert r.status_code == 404 + + +def test_upload_attachment_operations_project_returns_400(client, tmp_path): + """KIN-090: 400 для operations-проекта (нет project path).""" + db_path = tmp_path / "test2.db" + api_module.DB_PATH = db_path + + from web.api import app + c = TestClient(app) + c.post("/api/projects", json={ + "id": "ops", + "name": "Ops Server", + "project_type": "operations", + "ssh_host": "10.0.0.1", + }) + c.post("/api/tasks", json={"project_id": "ops", "title": "Reboot server"}) + + r = c.post( + "/api/tasks/OPS-001/attachments", + files={"file": ("x.png", io.BytesIO(_png_bytes()), "image/png")}, + ) + assert r.status_code == 400 + + +def test_upload_oversized_file_returns_413(client): + """KIN-090: 413 если файл превышает 10 MB.""" + big_content = b"x" * (10 * 1024 * 1024 + 1) + r = client.post( + "/api/tasks/PRJ-001/attachments", + files={"file": ("huge.png", io.BytesIO(big_content), "image/png")}, + ) + assert r.status_code == 413 + + +# --------------------------------------------------------------------------- +# List +# --------------------------------------------------------------------------- + +def test_list_attachments_empty_for_new_task(client): + """KIN-090: GET /api/tasks/{id}/attachments возвращает [] для задачи без вложений.""" + r = client.get("/api/tasks/PRJ-001/attachments") + assert r.status_code == 200 + assert r.json() == [] + + +def test_list_attachments_returns_all_uploaded(client): + """KIN-090: список содержит все загруженные вложения.""" + client.post("/api/tasks/PRJ-001/attachments", + files={"file": ("a.png", io.BytesIO(_png_bytes()), "image/png")}) + client.post("/api/tasks/PRJ-001/attachments", + files={"file": ("b.jpg", io.BytesIO(_png_bytes()), "image/jpeg")}) + + r = client.get("/api/tasks/PRJ-001/attachments") + assert r.status_code == 200 + filenames = {a["filename"] for a in r.json()} + assert "a.png" in filenames + assert "b.jpg" in filenames + + +def test_list_attachments_task_not_found_returns_404(client): + """KIN-090: 404 если задача не существует.""" + r = client.get("/api/tasks/PRJ-999/attachments") + assert r.status_code == 404 + + +# --------------------------------------------------------------------------- +# Delete +# --------------------------------------------------------------------------- + +def test_delete_attachment_returns_204(client): + """KIN-090: DELETE возвращает 204.""" + r = client.post("/api/tasks/PRJ-001/attachments", + files={"file": ("del.png", io.BytesIO(_png_bytes()), "image/png")}) + att_id = r.json()["id"] + + r = client.delete(f"/api/tasks/PRJ-001/attachments/{att_id}") + assert r.status_code == 204 + + +def test_delete_attachment_removes_from_list(client): + """KIN-090: после удаления вложение не появляется в списке.""" + r = client.post("/api/tasks/PRJ-001/attachments", + files={"file": ("rm.png", io.BytesIO(_png_bytes()), "image/png")}) + att_id = r.json()["id"] + client.delete(f"/api/tasks/PRJ-001/attachments/{att_id}") + + attachments = client.get("/api/tasks/PRJ-001/attachments").json() + assert not any(a["id"] == att_id for a in attachments) + + +def test_delete_attachment_removes_file_from_disk(client): + """KIN-090: удаление вложения удаляет файл с диска.""" + r = client.post("/api/tasks/PRJ-001/attachments", + files={"file": ("disk.png", io.BytesIO(_png_bytes()), "image/png")}) + saved_path = Path(r.json()["path"]) + att_id = r.json()["id"] + + assert saved_path.exists() + client.delete(f"/api/tasks/PRJ-001/attachments/{att_id}") + assert not saved_path.exists() + + +def test_delete_attachment_not_found_returns_404(client): + """KIN-090: 404 если вложение не существует.""" + r = client.delete("/api/tasks/PRJ-001/attachments/99999") + assert r.status_code == 404 + + +# --------------------------------------------------------------------------- +# Download +# --------------------------------------------------------------------------- + +def test_download_attachment_file_returns_correct_content(client): + """KIN-090: AC3 — GET /api/attachments/{id}/file возвращает содержимое файла.""" + content = _png_bytes() + r = client.post("/api/tasks/PRJ-001/attachments", + files={"file": ("get.png", io.BytesIO(content), "image/png")}) + att_id = r.json()["id"] + + r = client.get(f"/api/attachments/{att_id}/file") + assert r.status_code == 200 + assert r.content == content + + +def test_download_attachment_file_returns_correct_content_type(client): + """KIN-090: AC3 — Content-Type соответствует mime_type вложения.""" + r = client.post("/api/tasks/PRJ-001/attachments", + files={"file": ("ct.png", io.BytesIO(_png_bytes()), "image/png")}) + att_id = r.json()["id"] + + r = client.get(f"/api/attachments/{att_id}/file") + assert r.status_code == 200 + assert "image/png" in r.headers["content-type"] + + +def test_download_attachment_not_found_returns_404(client): + """KIN-090: 404 если вложение не существует.""" + r = client.get("/api/attachments/99999/file") + assert r.status_code == 404 + + +# --------------------------------------------------------------------------- +# Integration: upload → list → agent context (AC2) +# --------------------------------------------------------------------------- + +def test_integration_upload_list_agent_context(client, tmp_path): + """KIN-090: Интеграционный тест: upload → list → build_context включает вложения. + + Проверяет AC1 (путь), AC3 (доступен для скачивания), AC4 (SQLite), + и AC2 (агенты получают вложения через build_context). + """ + # Step 1: Upload image + content = _png_bytes() + r = client.post("/api/tasks/PRJ-001/attachments", + files={"file": ("integration.png", io.BytesIO(content), "image/png")}) + assert r.status_code == 201 + att = r.json() + + # Step 2: AC1 — file is at correct path inside project + saved_path = Path(att["path"]) + assert saved_path.exists() + assert "PRJ-001" in str(saved_path) + assert ".kin/attachments" in str(saved_path) + + # Step 3: List confirms persistence (AC4) + r = client.get("/api/tasks/PRJ-001/attachments") + assert r.status_code == 200 + assert len(r.json()) == 1 + + # Step 4: Download works (AC3) + r = client.get(f"/api/attachments/{att['id']}/file") + assert r.status_code == 200 + assert r.content == content + + # Step 5: AC2 — agent context includes attachment path + from core.db import init_db + from core.context_builder import build_context + conn = init_db(api_module.DB_PATH) + ctx = build_context(conn, "PRJ-001", "debugger", "prj") + conn.close() + + assert "attachments" in ctx + paths = [a["path"] for a in ctx["attachments"]] + assert att["path"] in paths diff --git a/tests/test_api_chat.py b/tests/test_api_chat.py new file mode 100644 index 0000000..162c961 --- /dev/null +++ b/tests/test_api_chat.py @@ -0,0 +1,56 @@ +"""Tests for chat endpoints: GET/POST /api/projects/{project_id}/chat (KIN-UI-005).""" + +import pytest +from unittest.mock import patch, MagicMock +from fastapi.testclient import TestClient + +import web.api as api_module + + +@pytest.fixture +def client(tmp_path): + db_path = tmp_path / "test.db" + api_module.DB_PATH = db_path + from web.api import app + c = TestClient(app) + c.post("/api/projects", json={"id": "p1", "name": "P1", "path": "/p1"}) + return c + + +def test_get_chat_history_empty_for_new_project(client): + r = client.get("/api/projects/p1/chat") + assert r.status_code == 200 + assert r.json() == [] + + +def test_post_chat_task_request_creates_task_stub(client): + with patch("core.chat_intent.classify_intent", return_value="task_request"), \ + patch("web.api.subprocess.Popen") as mock_popen: + mock_popen.return_value = MagicMock() + r = client.post("/api/projects/p1/chat", json={"content": "Добавь кнопку выхода"}) + + assert r.status_code == 200 + data = r.json() + assert data["user_message"]["role"] == "user" + assert data["assistant_message"]["message_type"] == "task_created" + assert "task_stub" in data["assistant_message"] + assert data["assistant_message"]["task_stub"]["status"] == "pending" + assert data["task"] is not None + assert mock_popen.called + + +def test_post_chat_status_query_returns_text_response(client): + with patch("core.chat_intent.classify_intent", return_value="status_query"): + r = client.post("/api/projects/p1/chat", json={"content": "что сейчас в работе?"}) + + assert r.status_code == 200 + data = r.json() + assert data["user_message"]["role"] == "user" + assert data["assistant_message"]["role"] == "assistant" + assert data["task"] is None + assert "Нет активных задач" in data["assistant_message"]["content"] + + +def test_post_chat_empty_content_returns_400(client): + r = client.post("/api/projects/p1/chat", json={"content": " "}) + assert r.status_code == 400 diff --git a/tests/test_api_phases.py b/tests/test_api_phases.py new file mode 100644 index 0000000..8c479d7 --- /dev/null +++ b/tests/test_api_phases.py @@ -0,0 +1,314 @@ +"""Tests for web/api.py — Phase endpoints (KIN-059). + +Covers: + - POST /api/projects/new — создание проекта с фазами + - GET /api/projects/{id}/phases — список фаз с joined task + - POST /api/phases/{id}/approve — approve фазы + - POST /api/phases/{id}/reject — reject фазы + - POST /api/phases/{id}/revise — revise фазы + - POST /api/projects/{id}/phases/start — запуск агента для активной фазы +""" + +from unittest.mock import MagicMock, patch + +import pytest +from fastapi.testclient import TestClient + +import web.api as api_module + + +@pytest.fixture +def client(tmp_path): + """KIN-059: TestClient с изолированной временной БД.""" + db_path = tmp_path / "test.db" + api_module.DB_PATH = db_path + from web.api import app + return TestClient(app) + + +@pytest.fixture +def client_with_phases(client): + """KIN-059: клиент с уже созданным проектом + фазами.""" + r = client.post("/api/projects/new", json={ + "id": "proj1", + "name": "Test Project", + "path": "/tmp/proj1", + "description": "Описание тестового проекта", + "roles": ["business_analyst"], + }) + assert r.status_code == 200 + return client + + +# --------------------------------------------------------------------------- +# POST /api/projects/new +# --------------------------------------------------------------------------- + + +def test_post_projects_new_creates_project_and_phases(client): + """KIN-059: POST /api/projects/new создаёт проект с фазами (researcher + architect).""" + r = client.post("/api/projects/new", json={ + "id": "p1", + "name": "My Project", + "path": "/tmp/p1", + "description": "Описание", + "roles": ["tech_researcher"], + }) + assert r.status_code == 200 + data = r.json() + assert data["project"]["id"] == "p1" + # tech_researcher + architect = 2 фазы + assert len(data["phases"]) == 2 + phase_roles = [ph["role"] for ph in data["phases"]] + assert "architect" in phase_roles + assert phase_roles[-1] == "architect" + + +def test_post_projects_new_no_roles_returns_400(client): + """KIN-059: POST /api/projects/new без ролей возвращает 400.""" + r = client.post("/api/projects/new", json={ + "id": "p1", + "name": "P1", + "path": "/tmp/p1", + "description": "Desc", + "roles": [], + }) + assert r.status_code == 400 + + +def test_post_projects_new_only_architect_returns_400(client): + """KIN-059: только architect в roles → 400 (architect не researcher).""" + r = client.post("/api/projects/new", json={ + "id": "p1", + "name": "P1", + "path": "/tmp/p1", + "description": "Desc", + "roles": ["architect"], + }) + assert r.status_code == 400 + + +def test_post_projects_new_duplicate_id_returns_409(client): + """KIN-059: повторное создание проекта с тем же id → 409.""" + payload = { + "id": "dup", + "name": "Dup", + "path": "/tmp/dup", + "description": "Desc", + "roles": ["marketer"], + } + client.post("/api/projects/new", json=payload) + r = client.post("/api/projects/new", json=payload) + assert r.status_code == 409 + + +def test_post_projects_new_first_phase_is_active(client): + """KIN-059: первая фаза созданного проекта сразу имеет status=active.""" + r = client.post("/api/projects/new", json={ + "id": "p1", + "name": "P1", + "path": "/tmp/p1", + "description": "Desc", + "roles": ["market_researcher", "tech_researcher"], + }) + assert r.status_code == 200 + first_phase = r.json()["phases"][0] + assert first_phase["status"] == "active" + + +# --------------------------------------------------------------------------- +# GET /api/projects/{project_id}/phases +# --------------------------------------------------------------------------- + + +def test_get_project_phases_returns_phases_with_task(client_with_phases): + """KIN-059: GET /api/projects/{id}/phases возвращает фазы с joined полем task.""" + r = client_with_phases.get("/api/projects/proj1/phases") + assert r.status_code == 200 + phases = r.json() + assert len(phases) >= 1 + # Активная первая фаза должна иметь task + active = next((ph for ph in phases if ph["status"] == "active"), None) + assert active is not None + assert active["task"] is not None + + +def test_get_project_phases_project_not_found_returns_404(client): + """KIN-059: GET /api/projects/missing/phases → 404.""" + r = client.get("/api/projects/missing/phases") + assert r.status_code == 404 + + +# --------------------------------------------------------------------------- +# POST /api/phases/{phase_id}/approve +# --------------------------------------------------------------------------- + + +def _get_first_active_phase_id(client, project_id: str) -> int: + """Вспомогательная: получить id первой активной фазы.""" + phases = client.get(f"/api/projects/{project_id}/phases").json() + active = next(ph for ph in phases if ph["status"] == "active") + return active["id"] + + +def test_approve_phase_returns_200_and_activates_next(client_with_phases): + """KIN-059: POST /api/phases/{id}/approve → 200, следующая фаза активируется.""" + phase_id = _get_first_active_phase_id(client_with_phases, "proj1") + r = client_with_phases.post(f"/api/phases/{phase_id}/approve", json={}) + assert r.status_code == 200 + data = r.json() + assert data["phase"]["status"] == "approved" + # Следующая фаза (architect) активирована + assert data["next_phase"] is not None + assert data["next_phase"]["status"] == "active" + + +def test_approve_phase_not_found_returns_404(client): + """KIN-059: approve несуществующей фазы → 404.""" + r = client.post("/api/phases/9999/approve", json={}) + assert r.status_code == 404 + + +def test_approve_phase_not_active_returns_400(client): + """KIN-059: approve pending-фазы → 400 (фаза не active).""" + # Создаём проект с двумя researcher-ролями + client.post("/api/projects/new", json={ + "id": "p2", + "name": "P2", + "path": "/tmp/p2", + "description": "Desc", + "roles": ["market_researcher", "tech_researcher"], + }) + phases = client.get("/api/projects/p2/phases").json() + # Вторая фаза pending + pending = next(ph for ph in phases if ph["status"] == "pending") + r = client.post(f"/api/phases/{pending['id']}/approve", json={}) + assert r.status_code == 400 + + +# --------------------------------------------------------------------------- +# POST /api/phases/{phase_id}/reject +# --------------------------------------------------------------------------- + + +def test_reject_phase_returns_200(client_with_phases): + """KIN-059: POST /api/phases/{id}/reject → 200, status=rejected.""" + phase_id = _get_first_active_phase_id(client_with_phases, "proj1") + r = client_with_phases.post(f"/api/phases/{phase_id}/reject", json={"reason": "Не актуально"}) + assert r.status_code == 200 + assert r.json()["status"] == "rejected" + + +def test_reject_phase_not_found_returns_404(client): + """KIN-059: reject несуществующей фазы → 404.""" + r = client.post("/api/phases/9999/reject", json={"reason": "test"}) + assert r.status_code == 404 + + +def test_reject_phase_not_active_returns_400(client): + """KIN-059: reject pending-фазы → 400.""" + client.post("/api/projects/new", json={ + "id": "p3", + "name": "P3", + "path": "/tmp/p3", + "description": "Desc", + "roles": ["legal_researcher", "ux_designer"], + }) + phases = client.get("/api/projects/p3/phases").json() + pending = next(ph for ph in phases if ph["status"] == "pending") + r = client.post(f"/api/phases/{pending['id']}/reject", json={"reason": "test"}) + assert r.status_code == 400 + + +# --------------------------------------------------------------------------- +# POST /api/phases/{phase_id}/revise +# --------------------------------------------------------------------------- + + +def test_revise_phase_returns_200_and_creates_new_task(client_with_phases): + """KIN-059: POST /api/phases/{id}/revise → 200, создаётся новая задача.""" + phase_id = _get_first_active_phase_id(client_with_phases, "proj1") + r = client_with_phases.post( + f"/api/phases/{phase_id}/revise", + json={"comment": "Добавь детали по монетизации"}, + ) + assert r.status_code == 200 + data = r.json() + assert data["phase"]["status"] == "revising" + assert data["new_task"] is not None + assert data["new_task"]["brief"]["revise_comment"] == "Добавь детали по монетизации" + + +def test_revise_phase_empty_comment_returns_400(client_with_phases): + """KIN-059: revise с пустым комментарием → 400.""" + phase_id = _get_first_active_phase_id(client_with_phases, "proj1") + r = client_with_phases.post(f"/api/phases/{phase_id}/revise", json={"comment": " "}) + assert r.status_code == 400 + + +def test_revise_phase_not_found_returns_404(client): + """KIN-059: revise несуществующей фазы → 404.""" + r = client.post("/api/phases/9999/revise", json={"comment": "test"}) + assert r.status_code == 404 + + +def test_revise_phase_not_active_returns_400(client): + """KIN-059: revise pending-фазы → 400.""" + client.post("/api/projects/new", json={ + "id": "p4", + "name": "P4", + "path": "/tmp/p4", + "description": "Desc", + "roles": ["marketer", "ux_designer"], + }) + phases = client.get("/api/projects/p4/phases").json() + pending = next(ph for ph in phases if ph["status"] == "pending") + r = client.post(f"/api/phases/{pending['id']}/revise", json={"comment": "test"}) + assert r.status_code == 400 + + +# --------------------------------------------------------------------------- +# POST /api/projects/{project_id}/phases/start +# --------------------------------------------------------------------------- + + +def test_start_phase_returns_202_and_starts_agent(client_with_phases): + """KIN-059: POST /api/projects/{id}/phases/start → 202, агент запускается в фоне.""" + with patch("subprocess.Popen") as mock_popen: + mock_proc = MagicMock() + mock_proc.pid = 12345 + mock_popen.return_value = mock_proc + + r = client_with_phases.post("/api/projects/proj1/phases/start") + + assert r.status_code == 202 + data = r.json() + assert data["status"] == "started" + assert "phase_id" in data + assert "task_id" in data + mock_popen.assert_called_once() + + +def test_start_phase_task_set_to_in_progress(client_with_phases): + """KIN-059: start устанавливает task.status=in_progress перед запуском агента.""" + with patch("subprocess.Popen") as mock_popen: + mock_popen.return_value = MagicMock(pid=1) + r = client_with_phases.post("/api/projects/proj1/phases/start") + task_id = r.json()["task_id"] + + task = client_with_phases.get(f"/api/tasks/{task_id}").json() + assert task["status"] == "in_progress" + + +def test_start_phase_no_active_phase_returns_404(client): + """KIN-059: start без активной/revising фазы → 404.""" + # Проект без фаз (обычный проект через /api/projects) + client.post("/api/projects", json={"id": "plain", "name": "Plain", "path": "/tmp/plain"}) + r = client.post("/api/projects/plain/phases/start") + assert r.status_code == 404 + + +def test_start_phase_project_not_found_returns_404(client): + """KIN-059: start для несуществующего проекта → 404.""" + r = client.post("/api/projects/missing/phases/start") + assert r.status_code == 404 diff --git a/tests/test_arch_002.py b/tests/test_arch_002.py new file mode 100644 index 0000000..6ada9af --- /dev/null +++ b/tests/test_arch_002.py @@ -0,0 +1,121 @@ +"""Regression tests for KIN-ARCH-002. + +Проблема: функция create_project_with_phases имела нестабильную сигнатуру — +параметр path с дефолтом на позиции 4, после чего шли обязательные параметры +(description, selected_roles), что могло приводить к SyntaxError при инвалидации +.pyc-кеша в Python 3.14+. + +Фикс: параметры path переносится после обязательных ИЛИ изолируется через * +(keyword-only) — текущий код использует * для description/selected_roles. + +Тесты покрывают: + 1. Вызов с path как позиционным аргументом (текущая конвенция в тестах) + 2. Вызов с path=... как keyword-аргументом (безопасная конвенция) + 3. Вызов без path=None (дефолт работает) + 4. Нет SyntaxError при импорте core.phases (regression guard) + 5. Стабильность числа тестов: полный suite запускается без collection errors +""" + +import pytest +from core.db import init_db +from core import models +from core.phases import create_project_with_phases + + +@pytest.fixture +def conn(): + c = init_db(db_path=":memory:") + yield c + c.close() + + +# --------------------------------------------------------------------------- +# KIN-ARCH-002 — regression: signature stability of create_project_with_phases +# --------------------------------------------------------------------------- + + +def test_arch_002_import_core_phases_no_syntax_error(): + """KIN-ARCH-002: импорт core.phases не вызывает SyntaxError.""" + import core.phases # noqa: F401 — если упадёт SyntaxError, тест падает + + +def test_arch_002_path_as_positional_arg(conn): + """KIN-ARCH-002: path передаётся как позиционный аргумент (4-я позиция). + + Текущая конвенция во всех тестах и в web/api.py. + Регрессионная защита: изменение сигнатуры не должно сломать этот вызов. + """ + result = create_project_with_phases( + conn, "arch002a", "Project A", "/some/path", + description="Описание A", selected_roles=["business_analyst"], + ) + assert result["project"]["id"] == "arch002a" + assert len(result["phases"]) == 2 # business_analyst + architect + + +def test_arch_002_path_as_keyword_arg(conn): + """KIN-ARCH-002: path передаётся как keyword-аргумент. + + Рекомендуемая конвенция по итогам debugger-расследования. + Гарантирует, что будущий рефакторинг сигнатуры не сломает код. + """ + result = create_project_with_phases( + conn, "arch002b", "Project B", + description="Описание B", + selected_roles=["tech_researcher"], + path="/keyword/path", + ) + assert result["project"]["id"] == "arch002b" + assert result["project"]["path"] == "/keyword/path" + + + +def test_arch_002_path_none_without_operations_raises(conn): + """KIN-ARCH-002: path=None для non-operations проекта → IntegrityError из БД (CHECK constraint).""" + import sqlite3 + with pytest.raises(sqlite3.IntegrityError, match="CHECK constraint"): + create_project_with_phases( + conn, "arch002fail", "Fail", + description="D", + selected_roles=["marketer"], + path=None, + ) + + +def test_arch_002_phases_count_is_deterministic(conn): + """KIN-ARCH-002: при каждом вызове создаётся ровно N+1 фаз (N researchers + architect).""" + for idx, (roles, expected_count) in enumerate([ + (["business_analyst"], 2), + (["business_analyst", "tech_researcher"], 3), + (["business_analyst", "market_researcher", "legal_researcher"], 4), + ]): + project_id = f"arch002_det_{idx}" + result = create_project_with_phases( + conn, project_id, f"Project {len(roles)}", + description="Det test", + selected_roles=roles, + path=f"/tmp/det/{idx}", + ) + assert len(result["phases"]) == expected_count, ( + f"roles={roles}: ожидали {expected_count} фаз, " + f"получили {len(result['phases'])}" + ) + + +def test_arch_002_first_phase_active_regardless_of_call_convention(conn): + """KIN-ARCH-002: первая фаза всегда active независимо от способа передачи path.""" + # Positional convention + r1 = create_project_with_phases( + conn, "p_pos", "P pos", "/pos", + description="D", selected_roles=["business_analyst"], + ) + assert r1["phases"][0]["status"] == "active" + assert r1["phases"][0]["task_id"] is not None + + # Keyword convention + r2 = create_project_with_phases( + conn, "p_kw", "P kw", + description="D", selected_roles=["business_analyst"], path="/kw", + ) + assert r2["phases"][0]["status"] == "active" + assert r2["phases"][0]["task_id"] is not None diff --git a/tests/test_auto_mode.py b/tests/test_auto_mode.py index e71c1e7..2799d6a 100644 --- a/tests/test_auto_mode.py +++ b/tests/test_auto_mode.py @@ -1,7 +1,8 @@ """ -Tests for KIN-012 auto mode features: +Tests for KIN-012/KIN-063 auto mode features: - TestAutoApprove: pipeline auto-approves (status → done) без ручного review + (KIN-063: auto_complete только если последний шаг — tester или reviewer) - TestAutoRerunOnPermissionDenied: runner делает retry при permission error, останавливается после одного retry (лимит = 1) - TestAutoFollowup: generate_followups вызывается сразу, без ожидания @@ -75,30 +76,30 @@ class TestAutoApprove: @patch("agents.runner.run_hooks") @patch("agents.runner.subprocess.run") def test_auto_mode_sets_status_done(self, mock_run, mock_hooks, mock_followup, conn): - """Auto-режим: статус задачи становится 'done', а не 'review'.""" + """Auto-complete режим: статус становится 'done', если последний шаг — tester.""" mock_run.return_value = _mock_success() mock_hooks.return_value = [] mock_followup.return_value = {"created": [], "pending_actions": []} - models.update_project(conn, "vdol", execution_mode="auto") - steps = [{"role": "debugger", "brief": "find bug"}] + models.update_project(conn, "vdol", execution_mode="auto_complete") + steps = [{"role": "debugger", "brief": "find bug"}, {"role": "tester", "brief": "verify fix"}] result = run_pipeline(conn, "VDOL-001", steps) assert result["success"] is True task = models.get_task(conn, "VDOL-001") - assert task["status"] == "done", "Auto-mode должен auto-approve: status=done" + assert task["status"] == "done", "Auto-complete должен auto-approve: status=done" @patch("core.followup.generate_followups") @patch("agents.runner.run_hooks") @patch("agents.runner.subprocess.run") def test_auto_mode_fires_task_auto_approved_hook(self, mock_run, mock_hooks, mock_followup, conn): - """В auto-режиме срабатывает хук task_auto_approved.""" + """В auto_complete-режиме срабатывает хук task_auto_approved (если последний шаг — tester).""" mock_run.return_value = _mock_success() mock_hooks.return_value = [] mock_followup.return_value = {"created": [], "pending_actions": []} - models.update_project(conn, "vdol", execution_mode="auto") - steps = [{"role": "debugger", "brief": "find bug"}] + models.update_project(conn, "vdol", execution_mode="auto_complete") + steps = [{"role": "debugger", "brief": "find bug"}, {"role": "tester", "brief": "verify"}] run_pipeline(conn, "VDOL-001", steps) events = _get_hook_events(mock_hooks) @@ -140,20 +141,20 @@ class TestAutoApprove: @patch("agents.runner.run_hooks") @patch("agents.runner.subprocess.run") def test_task_level_auto_overrides_project_review(self, mock_run, mock_hooks, mock_followup, conn): - """Если у задачи execution_mode=auto, pipeline auto-approve, даже если проект в review.""" + """Если у задачи execution_mode=auto_complete, pipeline auto-approve, даже если проект в review.""" mock_run.return_value = _mock_success() mock_hooks.return_value = [] mock_followup.return_value = {"created": [], "pending_actions": []} - # Проект в review, но задача — auto - models.update_task(conn, "VDOL-001", execution_mode="auto") + # Проект в review, но задача — auto_complete + models.update_task(conn, "VDOL-001", execution_mode="auto_complete") - steps = [{"role": "debugger", "brief": "find"}] + steps = [{"role": "debugger", "brief": "find"}, {"role": "reviewer", "brief": "approve"}] result = run_pipeline(conn, "VDOL-001", steps) assert result["success"] is True task = models.get_task(conn, "VDOL-001") - assert task["status"] == "done", "Task-level auto должен override project review" + assert task["status"] == "done", "Task-level auto_complete должен override project review" @patch("core.followup.generate_followups") @patch("agents.runner.run_hooks") @@ -164,11 +165,11 @@ class TestAutoApprove: mock_hooks.return_value = [] mock_followup.return_value = {"created": [], "pending_actions": []} - models.update_project(conn, "vdol", execution_mode="auto") - steps = [{"role": "debugger", "brief": "find"}] + models.update_project(conn, "vdol", execution_mode="auto_complete") + steps = [{"role": "debugger", "brief": "find"}, {"role": "tester", "brief": "test"}] result = run_pipeline(conn, "VDOL-001", steps) - assert result.get("mode") == "auto" + assert result.get("mode") == "auto_complete" # --------------------------------------------------------------------------- @@ -178,10 +179,13 @@ class TestAutoApprove: class TestAutoRerunOnPermissionDenied: """Runner повторяет шаг при permission issues, останавливается по лимиту (1 retry).""" + @patch("agents.runner._get_changed_files", return_value=[]) + @patch("agents.runner._run_autocommit") + @patch("agents.runner._run_learning_extraction") @patch("core.followup.generate_followups") @patch("agents.runner.run_hooks") @patch("agents.runner.subprocess.run") - def test_auto_mode_retries_on_permission_error(self, mock_run, mock_hooks, mock_followup, conn): + def test_auto_mode_retries_on_permission_error(self, mock_run, mock_hooks, mock_followup, mock_learn, mock_autocommit, mock_changed_files, conn): """Auto-режим: при permission denied runner делает 1 retry с allow_write=True.""" mock_run.side_effect = [ _mock_permission_denied(), # 1-й вызов: permission error @@ -189,8 +193,9 @@ class TestAutoRerunOnPermissionDenied: ] mock_hooks.return_value = [] mock_followup.return_value = {"created": [], "pending_actions": []} + mock_learn.return_value = {"added": 0, "skipped": 0} - models.update_project(conn, "vdol", execution_mode="auto") + models.update_project(conn, "vdol", execution_mode="auto_complete") steps = [{"role": "debugger", "brief": "fix file"}] result = run_pipeline(conn, "VDOL-001", steps) @@ -209,7 +214,7 @@ class TestAutoRerunOnPermissionDenied: mock_hooks.return_value = [] mock_followup.return_value = {"created": [], "pending_actions": []} - models.update_project(conn, "vdol", execution_mode="auto") + models.update_project(conn, "vdol", execution_mode="auto_complete") steps = [{"role": "debugger", "brief": "fix"}] run_pipeline(conn, "VDOL-001", steps) @@ -229,7 +234,7 @@ class TestAutoRerunOnPermissionDenied: mock_hooks.return_value = [] mock_followup.return_value = {"created": [], "pending_actions": []} - models.update_project(conn, "vdol", execution_mode="auto") + models.update_project(conn, "vdol", execution_mode="auto_complete") steps = [{"role": "debugger", "brief": "fix"}] run_pipeline(conn, "VDOL-001", steps) @@ -248,7 +253,7 @@ class TestAutoRerunOnPermissionDenied: mock_hooks.return_value = [] mock_followup.return_value = {"created": [], "pending_actions": []} - models.update_project(conn, "vdol", execution_mode="auto") + models.update_project(conn, "vdol", execution_mode="auto_complete") steps = [{"role": "debugger", "brief": "fix"}] result = run_pipeline(conn, "VDOL-001", steps) @@ -257,10 +262,13 @@ class TestAutoRerunOnPermissionDenied: task = models.get_task(conn, "VDOL-001") assert task["status"] == "blocked" + @patch("agents.runner._get_changed_files", return_value=[]) + @patch("agents.runner._run_autocommit") + @patch("agents.runner._run_learning_extraction") @patch("core.followup.generate_followups") @patch("agents.runner.run_hooks") @patch("agents.runner.subprocess.run") - def test_subsequent_steps_use_allow_write_after_retry(self, mock_run, mock_hooks, mock_followup, conn): + def test_subsequent_steps_use_allow_write_after_retry(self, mock_run, mock_hooks, mock_followup, mock_learn, mock_autocommit, mock_changed_files, conn): """После успешного retry все следующие шаги тоже используют allow_write.""" mock_run.side_effect = [ _mock_permission_denied(), # Шаг 1: permission error @@ -269,8 +277,9 @@ class TestAutoRerunOnPermissionDenied: ] mock_hooks.return_value = [] mock_followup.return_value = {"created": [], "pending_actions": []} + mock_learn.return_value = {"added": 0, "skipped": 0} - models.update_project(conn, "vdol", execution_mode="auto") + models.update_project(conn, "vdol", execution_mode="auto_complete") steps = [ {"role": "debugger", "brief": "fix"}, {"role": "tester", "brief": "test"}, @@ -293,7 +302,7 @@ class TestAutoRerunOnPermissionDenied: mock_hooks.return_value = [] mock_followup.return_value = {"created": [], "pending_actions": []} - models.update_project(conn, "vdol", execution_mode="auto") + models.update_project(conn, "vdol", execution_mode="auto_complete") steps = [{"role": "debugger", "brief": "fix"}] result = run_pipeline(conn, "VDOL-001", steps) @@ -330,13 +339,13 @@ class TestAutoFollowup: @patch("agents.runner.run_hooks") @patch("agents.runner.subprocess.run") def test_auto_followup_triggered_immediately(self, mock_run, mock_hooks, mock_followup, conn): - """В auto-режиме generate_followups вызывается сразу после pipeline.""" + """В auto_complete-режиме generate_followups вызывается сразу после pipeline (последний шаг — tester).""" mock_run.return_value = _mock_success() mock_hooks.return_value = [] mock_followup.return_value = {"created": [], "pending_actions": []} - models.update_project(conn, "vdol", execution_mode="auto") - steps = [{"role": "debugger", "brief": "find"}] + models.update_project(conn, "vdol", execution_mode="auto_complete") + steps = [{"role": "debugger", "brief": "find"}, {"role": "tester", "brief": "test"}] result = run_pipeline(conn, "VDOL-001", steps) assert result["success"] is True @@ -357,8 +366,8 @@ class TestAutoFollowup: mock_followup.return_value = {"created": [], "pending_actions": pending} mock_resolve.return_value = [{"resolved": "rerun", "result": {}}] - models.update_project(conn, "vdol", execution_mode="auto") - steps = [{"role": "debugger", "brief": "find"}] + models.update_project(conn, "vdol", execution_mode="auto_complete") + steps = [{"role": "debugger", "brief": "find"}, {"role": "tester", "brief": "test"}] run_pipeline(conn, "VDOL-001", steps) mock_resolve.assert_called_once_with(conn, "VDOL-001", pending) @@ -392,10 +401,10 @@ class TestAutoFollowup: mock_hooks.return_value = [] mock_followup.return_value = {"created": [], "pending_actions": []} - models.update_project(conn, "vdol", execution_mode="auto") + models.update_project(conn, "vdol", execution_mode="auto_complete") models.update_task(conn, "VDOL-001", brief={"source": "followup:VDOL-000"}) - steps = [{"role": "debugger", "brief": "find"}] + steps = [{"role": "debugger", "brief": "find"}, {"role": "tester", "brief": "test"}] result = run_pipeline(conn, "VDOL-001", steps) assert result["success"] is True @@ -412,8 +421,8 @@ class TestAutoFollowup: mock_hooks.return_value = [] mock_followup.side_effect = Exception("followup PM crashed") - models.update_project(conn, "vdol", execution_mode="auto") - steps = [{"role": "debugger", "brief": "find"}] + models.update_project(conn, "vdol", execution_mode="auto_complete") + steps = [{"role": "debugger", "brief": "find"}, {"role": "tester", "brief": "test"}] result = run_pipeline(conn, "VDOL-001", steps) assert result["success"] is True # Pipeline succeeded, followup failure absorbed @@ -431,8 +440,8 @@ class TestAutoFollowup: mock_followup.return_value = {"created": [], "pending_actions": []} mock_resolve.return_value = [] - models.update_project(conn, "vdol", execution_mode="auto") - steps = [{"role": "debugger", "brief": "find"}] + models.update_project(conn, "vdol", execution_mode="auto_complete") + steps = [{"role": "debugger", "brief": "find"}, {"role": "tester", "brief": "test"}] run_pipeline(conn, "VDOL-001", steps) mock_resolve.assert_not_called() diff --git a/tests/test_bootstrap.py b/tests/test_bootstrap.py index 20dc5ea..ee61ceb 100644 --- a/tests/test_bootstrap.py +++ b/tests/test_bootstrap.py @@ -114,6 +114,26 @@ def test_detect_modules_empty(tmp_path): assert detect_modules(tmp_path) == [] +def test_detect_modules_deduplication_by_name(tmp_path): + """KIN-081: detect_modules дедуплицирует по имени (не по имени+путь). + + Если два разных scan_dir дают одноимённые модули (например, frontend/src/components + и backend/src/components), результат содержит только первый. + Это соответствует UNIQUE constraint (project_id, name) в таблице modules. + """ + fe_comp = tmp_path / "frontend" / "src" / "components" + fe_comp.mkdir(parents=True) + (fe_comp / "App.vue").write_text("") + + be_comp = tmp_path / "backend" / "src" / "components" + be_comp.mkdir(parents=True) + (be_comp / "Service.ts").write_text("export class Service {}") + + modules = detect_modules(tmp_path) + names = [m["name"] for m in modules] + assert names.count("components") == 1 + + def test_detect_modules_backend_pg(tmp_path): """Test detection in backend-pg/src/ pattern (like vdolipoperek).""" src = tmp_path / "backend-pg" / "src" / "services" diff --git a/tests/test_context_builder.py b/tests/test_context_builder.py index 64bf732..b659082 100644 --- a/tests/test_context_builder.py +++ b/tests/test_context_builder.py @@ -161,3 +161,506 @@ class TestLanguageInProject: def test_context_carries_language(self, conn): ctx = build_context(conn, "VDOL-001", "pm", "vdol") assert ctx["project"]["language"] == "ru" + + +# --------------------------------------------------------------------------- +# KIN-045: Revise context — revise_comment + last agent output injection +# --------------------------------------------------------------------------- + +class TestReviseContext: + """build_context и format_prompt корректно инжектируют контекст ревизии.""" + + def test_build_context_includes_revise_comment_in_task(self, conn): + """Если у задачи есть revise_comment, он попадает в ctx['task'].""" + conn.execute( + "UPDATE tasks SET revise_comment=? WHERE id='VDOL-001'", + ("Доисследуй edge case с пустым массивом",), + ) + conn.commit() + ctx = build_context(conn, "VDOL-001", "backend_dev", "vdol") + assert ctx["task"]["revise_comment"] == "Доисследуй edge case с пустым массивом" + + def test_build_context_fetches_last_agent_output_when_revise_comment_set(self, conn): + """При revise_comment build_context достаёт last_agent_output из agent_logs.""" + from core import models + models.log_agent_run( + conn, "vdol", "developer", "execute", + task_id="VDOL-001", + output_summary="Реализован endpoint POST /api/items", + success=True, + ) + conn.execute( + "UPDATE tasks SET revise_comment=? WHERE id='VDOL-001'", + ("Добавь валидацию входных данных",), + ) + conn.commit() + ctx = build_context(conn, "VDOL-001", "backend_dev", "vdol") + assert ctx.get("last_agent_output") == "Реализован endpoint POST /api/items" + + def test_build_context_no_last_agent_output_when_no_successful_logs(self, conn): + """revise_comment есть, но нет успешных логов — last_agent_output отсутствует.""" + from core import models + models.log_agent_run( + conn, "vdol", "developer", "execute", + task_id="VDOL-001", + output_summary="Permission denied", + success=False, + ) + conn.execute( + "UPDATE tasks SET revise_comment=? WHERE id='VDOL-001'", + ("Повтори без ошибок",), + ) + conn.commit() + ctx = build_context(conn, "VDOL-001", "backend_dev", "vdol") + assert "last_agent_output" not in ctx + + def test_build_context_no_revise_fields_when_no_revise_comment(self, conn): + """Обычная задача без revise_comment не получает last_agent_output в контексте.""" + from core import models + models.log_agent_run( + conn, "vdol", "developer", "execute", + task_id="VDOL-001", + output_summary="Всё готово", + success=True, + ) + # revise_comment не устанавливаем + ctx = build_context(conn, "VDOL-001", "backend_dev", "vdol") + assert "last_agent_output" not in ctx + assert ctx["task"].get("revise_comment") is None + + def test_format_prompt_includes_director_revision_request(self, conn): + """format_prompt содержит секцию '## Director's revision request:' при revise_comment.""" + conn.execute( + "UPDATE tasks SET revise_comment=? WHERE id='VDOL-001'", + ("Обработай случай пустого списка",), + ) + conn.commit() + ctx = build_context(conn, "VDOL-001", "backend_dev", "vdol") + prompt = format_prompt(ctx, "backend_dev", "You are a developer.") + assert "## Director's revision request:" in prompt + assert "Обработай случай пустого списка" in prompt + + def test_format_prompt_includes_previous_output_before_revision(self, conn): + """format_prompt содержит '## Your previous output (before revision):' при last_agent_output.""" + from core import models + models.log_agent_run( + conn, "vdol", "developer", "execute", + task_id="VDOL-001", + output_summary="Сделал миграцию БД", + success=True, + ) + conn.execute( + "UPDATE tasks SET revise_comment=? WHERE id='VDOL-001'", + ("Ещё добавь индекс",), + ) + conn.commit() + ctx = build_context(conn, "VDOL-001", "backend_dev", "vdol") + prompt = format_prompt(ctx, "backend_dev", "You are a developer.") + assert "## Your previous output (before revision):" in prompt + assert "Сделал миграцию БД" in prompt + + def test_format_prompt_no_revision_sections_when_no_revise_comment(self, conn): + """Без revise_comment в prompt нет секций ревизии.""" + ctx = build_context(conn, "VDOL-001", "backend_dev", "vdol") + prompt = format_prompt(ctx, "backend_dev", "You are a developer.") + assert "## Director's revision request:" not in prompt + assert "## Your previous output (before revision):" not in prompt + + +# --------------------------------------------------------------------------- +# KIN-071: project_type and SSH context +# --------------------------------------------------------------------------- + +class TestOperationsProject: + """KIN-071: operations project_type propagates to context and prompt.""" + + @pytest.fixture + def ops_conn(self): + c = init_db(":memory:") + models.create_project( + c, "srv", "My Server", "", + project_type="operations", + ssh_host="10.0.0.1", + ssh_user="root", + ssh_key_path="~/.ssh/id_rsa", + ssh_proxy_jump="jumpt", + ) + models.create_task(c, "SRV-001", "srv", "Scan server") + yield c + c.close() + + def test_slim_project_includes_project_type(self, ops_conn): + """KIN-071: _slim_project включает project_type.""" + ctx = build_context(ops_conn, "SRV-001", "sysadmin", "srv") + assert ctx["project"]["project_type"] == "operations" + + def test_slim_project_includes_ssh_fields_for_operations(self, ops_conn): + """KIN-071: _slim_project включает ssh_* поля для operations-проектов.""" + ctx = build_context(ops_conn, "SRV-001", "sysadmin", "srv") + proj = ctx["project"] + assert proj["ssh_host"] == "10.0.0.1" + assert proj["ssh_user"] == "root" + assert proj["ssh_key_path"] == "~/.ssh/id_rsa" + assert proj["ssh_proxy_jump"] == "jumpt" + + def test_slim_project_no_ssh_fields_for_development(self): + """KIN-071: development-проект не получает ssh_* в slim.""" + c = init_db(":memory:") + models.create_project(c, "dev", "Dev", "/path") + models.create_task(c, "DEV-001", "dev", "A task") + ctx = build_context(c, "DEV-001", "backend_dev", "dev") + assert "ssh_host" not in ctx["project"] + c.close() + + def test_sysadmin_context_gets_decisions_and_modules(self, ops_conn): + """KIN-071: sysadmin роль получает все decisions и modules.""" + models.add_module(ops_conn, "srv", "nginx", "service", "/etc/nginx") + models.add_decision(ops_conn, "srv", "gotcha", "Port 80 in use", "conflict") + ctx = build_context(ops_conn, "SRV-001", "sysadmin", "srv") + assert "decisions" in ctx + assert "modules" in ctx + assert len(ctx["modules"]) == 1 + + def test_format_prompt_includes_ssh_connection_section(self, ops_conn): + """KIN-071: format_prompt добавляет '## SSH Connection' для operations.""" + ctx = build_context(ops_conn, "SRV-001", "sysadmin", "srv") + prompt = format_prompt(ctx, "sysadmin", "You are sysadmin.") + assert "## SSH Connection" in prompt + assert "10.0.0.1" in prompt + assert "root" in prompt + assert "jumpt" in prompt + + def test_format_prompt_no_ssh_section_for_development(self): + """KIN-071: development-проект не получает SSH-секцию в prompt.""" + c = init_db(":memory:") + models.create_project(c, "dev", "Dev", "/path") + models.create_task(c, "DEV-001", "dev", "A task") + ctx = build_context(c, "DEV-001", "backend_dev", "dev") + prompt = format_prompt(ctx, "backend_dev", "You are a dev.") + assert "## SSH Connection" not in prompt + c.close() + + def test_format_prompt_includes_project_type(self, ops_conn): + """KIN-071: format_prompt включает Project type в секцию проекта.""" + ctx = build_context(ops_conn, "SRV-001", "sysadmin", "srv") + prompt = format_prompt(ctx, "sysadmin", "You are sysadmin.") + assert "Project type: operations" in prompt + + +# --------------------------------------------------------------------------- +# KIN-071: PM routing — operations project routes PM to infra_* pipelines +# --------------------------------------------------------------------------- + +class TestPMRoutingOperations: + """PM-контекст для operations-проекта должен содержать infra-маршруты, + не включающие architect/frontend_dev.""" + + @pytest.fixture + def ops_conn(self): + c = init_db(":memory:") + models.create_project( + c, "srv", "My Server", "", + project_type="operations", + ssh_host="10.0.0.1", + ssh_user="root", + ) + models.create_task(c, "SRV-001", "srv", "Scan server") + yield c + c.close() + + def test_pm_context_has_operations_project_type(self, ops_conn): + """PM получает project_type=operations в контексте проекта.""" + ctx = build_context(ops_conn, "SRV-001", "pm", "srv") + assert ctx["project"]["project_type"] == "operations" + + def test_pm_context_has_infra_scan_route(self, ops_conn): + """PM-контекст содержит маршрут infra_scan из specialists.yaml.""" + ctx = build_context(ops_conn, "SRV-001", "pm", "srv") + assert "infra_scan" in ctx["routes"] + + def test_pm_context_has_infra_debug_route(self, ops_conn): + """PM-контекст содержит маршрут infra_debug из specialists.yaml.""" + ctx = build_context(ops_conn, "SRV-001", "pm", "srv") + assert "infra_debug" in ctx["routes"] + + def test_infra_scan_route_uses_sysadmin(self, ops_conn): + """infra_scan маршрут включает sysadmin в шагах.""" + ctx = build_context(ops_conn, "SRV-001", "pm", "srv") + steps = ctx["routes"]["infra_scan"]["steps"] + assert "sysadmin" in steps + + def test_infra_scan_route_excludes_architect(self, ops_conn): + """infra_scan маршрут не назначает architect.""" + ctx = build_context(ops_conn, "SRV-001", "pm", "srv") + steps = ctx["routes"]["infra_scan"]["steps"] + assert "architect" not in steps + + def test_infra_scan_route_excludes_frontend_dev(self, ops_conn): + """infra_scan маршрут не назначает frontend_dev.""" + ctx = build_context(ops_conn, "SRV-001", "pm", "srv") + steps = ctx["routes"]["infra_scan"]["steps"] + assert "frontend_dev" not in steps + + def test_format_prompt_pm_operations_project_type_label(self, ops_conn): + """format_prompt для PM с operations-проектом содержит 'Project type: operations'.""" + ctx = build_context(ops_conn, "SRV-001", "pm", "srv") + prompt = format_prompt(ctx, "pm", "You are PM.") + assert "Project type: operations" in prompt + + +# --------------------------------------------------------------------------- +# KIN-090: Attachments — context builder includes attachment paths +# --------------------------------------------------------------------------- + +class TestAttachmentsInContext: + """KIN-090: AC2 — агенты получают пути к вложениям в контексте задачи.""" + + @pytest.fixture + def conn_with_attachments(self): + c = init_db(":memory:") + models.create_project(c, "prj", "Project", "/tmp/prj") + models.create_task(c, "PRJ-001", "prj", "Fix bug") + models.create_attachment( + c, "PRJ-001", "screenshot.png", + "/tmp/prj/.kin/attachments/PRJ-001/screenshot.png", + "image/png", 1024, + ) + models.create_attachment( + c, "PRJ-001", "mockup.jpg", + "/tmp/prj/.kin/attachments/PRJ-001/mockup.jpg", + "image/jpeg", 2048, + ) + yield c + c.close() + + def test_build_context_includes_attachments(self, conn_with_attachments): + """KIN-090: AC2 — build_context включает вложения в контекст для всех ролей.""" + ctx = build_context(conn_with_attachments, "PRJ-001", "debugger", "prj") + assert "attachments" in ctx + assert len(ctx["attachments"]) == 2 + + def test_build_context_attachments_have_filename_and_path(self, conn_with_attachments): + """KIN-090: вложения в контексте содержат filename и path.""" + ctx = build_context(conn_with_attachments, "PRJ-001", "debugger", "prj") + filenames = {a["filename"] for a in ctx["attachments"]} + paths = {a["path"] for a in ctx["attachments"]} + assert "screenshot.png" in filenames + assert "mockup.jpg" in filenames + assert "/tmp/prj/.kin/attachments/PRJ-001/screenshot.png" in paths + + def test_build_context_attachments_key_always_present(self, conn): + """KIN-094 #213: ключ 'attachments' всегда присутствует в контексте (пустой список если нет вложений).""" + # conn fixture has no attachments + ctx = build_context(conn, "VDOL-001", "debugger", "vdol") + assert "attachments" in ctx + assert ctx["attachments"] == [] + + def test_all_roles_get_attachments(self, conn_with_attachments): + """KIN-090: AC2 — все роли (debugger, pm, tester, reviewer) получают вложения.""" + for role in ("debugger", "pm", "tester", "reviewer", "backend_dev", "frontend_dev"): + ctx = build_context(conn_with_attachments, "PRJ-001", role, "prj") + assert "attachments" in ctx, f"Role '{role}' did not receive attachments" + + def test_format_prompt_includes_attachments_section(self, conn_with_attachments): + """KIN-090: format_prompt включает секцию '## Attachments' с именами и путями.""" + ctx = build_context(conn_with_attachments, "PRJ-001", "debugger", "prj") + prompt = format_prompt(ctx, "debugger", "You are a debugger.") + assert "## Attachments" in prompt + assert "screenshot.png" in prompt + assert "/tmp/prj/.kin/attachments/PRJ-001/screenshot.png" in prompt + + def test_format_prompt_no_attachments_section_when_none(self, conn): + """KIN-090: format_prompt не добавляет секцию вложений, если их нет.""" + ctx = build_context(conn, "VDOL-001", "debugger", "vdol") + prompt = format_prompt(ctx, "debugger", "Debug this.") + assert "## Attachments" not in prompt + + +# --------------------------------------------------------------------------- +# KIN-094: Attachments — ctx["attachments"] always present + inline text content +# --------------------------------------------------------------------------- + +class TestAttachmentsKIN094: + """KIN-094: AC3 — PM и другие агенты всегда получают ключ attachments в контексте; + текстовые файлы <= 32 KB вставляются inline в промпт.""" + + @pytest.fixture + def conn_no_attachments(self): + c = init_db(":memory:") + models.create_project(c, "prj", "Prj", "/tmp/prj") + models.create_task(c, "PRJ-001", "prj", "Task") + yield c + c.close() + + @pytest.fixture + def conn_text_attachment(self, tmp_path): + """Проект с текстовым вложением <= 32 KB на диске.""" + c = init_db(":memory:") + models.create_project(c, "prj", "Prj", str(tmp_path)) + models.create_task(c, "PRJ-001", "prj", "Task") + txt_file = tmp_path / "spec.txt" + txt_file.write_text("Привет, это спека задачи", encoding="utf-8") + models.create_attachment( + c, "PRJ-001", "spec.txt", str(txt_file), "text/plain", txt_file.stat().st_size, + ) + yield c + c.close() + + @pytest.fixture + def conn_md_attachment(self, tmp_path): + """Проект с .md вложением (text/markdown или определяется по расширению).""" + c = init_db(":memory:") + models.create_project(c, "prj", "Prj", str(tmp_path)) + models.create_task(c, "PRJ-001", "prj", "Task") + md_file = tmp_path / "README.md" + md_file.write_text("# Title\n\nContent of readme", encoding="utf-8") + models.create_attachment( + c, "PRJ-001", "README.md", str(md_file), "text/markdown", md_file.stat().st_size, + ) + yield c + c.close() + + @pytest.fixture + def conn_json_attachment(self, tmp_path): + """Проект с JSON-вложением (application/json).""" + c = init_db(":memory:") + models.create_project(c, "prj", "Prj", str(tmp_path)) + models.create_task(c, "PRJ-001", "prj", "Task") + json_file = tmp_path / "config.json" + json_file.write_text('{"key": "value"}', encoding="utf-8") + models.create_attachment( + c, "PRJ-001", "config.json", str(json_file), "application/json", json_file.stat().st_size, + ) + yield c + c.close() + + @pytest.fixture + def conn_large_text_attachment(self, tmp_path): + """Проект с текстовым вложением > 32 KB (не должно инлайниться).""" + c = init_db(":memory:") + models.create_project(c, "prj", "Prj", str(tmp_path)) + models.create_task(c, "PRJ-001", "prj", "Task") + big_file = tmp_path / "big.txt" + big_file.write_text("x" * (32 * 1024 + 1), encoding="utf-8") + models.create_attachment( + c, "PRJ-001", "big.txt", str(big_file), "text/plain", big_file.stat().st_size, + ) + yield c + c.close() + + @pytest.fixture + def conn_image_attachment(self, tmp_path): + """Проект с бинарным PNG-вложением (не должно инлайниться).""" + c = init_db(":memory:") + models.create_project(c, "prj", "Prj", str(tmp_path)) + models.create_task(c, "PRJ-001", "prj", "Task") + png_file = tmp_path / "screen.png" + png_file.write_bytes(b"\x89PNG\r\n\x1a\n" + b"\x00" * 64) + models.create_attachment( + c, "PRJ-001", "screen.png", str(png_file), "image/png", png_file.stat().st_size, + ) + yield c + c.close() + + # ------------------------------------------------------------------ + # ctx["attachments"] always present + # ------------------------------------------------------------------ + + def test_pm_context_attachments_empty_list_when_no_attachments(self, conn_no_attachments): + """KIN-094: PM получает пустой список attachments, а не отсутствующий ключ.""" + ctx = build_context(conn_no_attachments, "PRJ-001", "pm", "prj") + assert "attachments" in ctx + assert ctx["attachments"] == [] + + def test_all_roles_attachments_key_present_when_empty(self, conn_no_attachments): + """KIN-094: все роли получают ключ attachments (пустой список) даже без вложений.""" + for role in ("pm", "debugger", "tester", "reviewer", "backend_dev", "frontend_dev", "architect"): + ctx = build_context(conn_no_attachments, "PRJ-001", role, "prj") + assert "attachments" in ctx, f"Role '{role}' missing 'attachments' key" + assert isinstance(ctx["attachments"], list), f"Role '{role}': attachments is not a list" + + # ------------------------------------------------------------------ + # Inline content for small text files + # ------------------------------------------------------------------ + + def test_format_prompt_inlines_small_text_file_content(self, conn_text_attachment): + """KIN-094: содержимое текстового файла <= 32 KB вставляется inline в промпт.""" + ctx = build_context(conn_text_attachment, "PRJ-001", "pm", "prj") + prompt = format_prompt(ctx, "pm", "You are PM.") + assert "Привет, это спека задачи" in prompt + + def test_format_prompt_inlines_text_file_in_code_block(self, conn_text_attachment): + """KIN-094: inline-контент обёрнут в блок кода (``` ... ```).""" + ctx = build_context(conn_text_attachment, "PRJ-001", "pm", "prj") + prompt = format_prompt(ctx, "pm", "You are PM.") + assert "```" in prompt + + def test_format_prompt_inlines_md_file_by_extension(self, conn_md_attachment): + """KIN-094: .md файл определяется по расширению и вставляется inline.""" + ctx = build_context(conn_md_attachment, "PRJ-001", "pm", "prj") + prompt = format_prompt(ctx, "pm", "You are PM.") + assert "# Title" in prompt + assert "Content of readme" in prompt + + def test_format_prompt_inlines_json_file_by_mime(self, conn_json_attachment): + """KIN-094: application/json файл вставляется inline по MIME-типу.""" + ctx = build_context(conn_json_attachment, "PRJ-001", "pm", "prj") + prompt = format_prompt(ctx, "pm", "You are PM.") + assert '"key": "value"' in prompt + + # ------------------------------------------------------------------ + # NOT inlined: binary and large files + # ------------------------------------------------------------------ + + def test_format_prompt_does_not_inline_image_file(self, conn_image_attachment): + """KIN-094: бинарный PNG файл НЕ вставляется inline.""" + ctx = build_context(conn_image_attachment, "PRJ-001", "pm", "prj") + prompt = format_prompt(ctx, "pm", "You are PM.") + # File is listed in ## Attachments section but no ``` block with binary content + assert "screen.png" in prompt # listed + assert "image/png" in prompt + # Should not contain raw binary or ``` code block for the PNG + # We verify the file content (PNG header) is NOT inlined + assert "\x89PNG" not in prompt + + def test_format_prompt_does_not_inline_large_text_file(self, conn_large_text_attachment): + """KIN-094: текстовый файл > 32 KB НЕ вставляется inline.""" + ctx = build_context(conn_large_text_attachment, "PRJ-001", "pm", "prj") + prompt = format_prompt(ctx, "pm", "You are PM.") + assert "big.txt" in prompt # listed + # Content should NOT be inlined (32KB+1 of 'x' chars) + assert "x" * 100 not in prompt + + # ------------------------------------------------------------------ + # Resilience: missing file on disk + # ------------------------------------------------------------------ + + def test_format_prompt_handles_missing_file_gracefully(self, tmp_path): + """KIN-094: если файл отсутствует на диске, format_prompt не падает.""" + c = init_db(":memory:") + models.create_project(c, "prj", "Prj", str(tmp_path)) + models.create_task(c, "PRJ-001", "prj", "Task") + # Register attachment pointing to non-existent file + models.create_attachment( + c, "PRJ-001", "missing.txt", + str(tmp_path / "missing.txt"), + "text/plain", 100, + ) + ctx = build_context(c, "PRJ-001", "pm", "prj") + # Should not raise — exception is caught silently + prompt = format_prompt(ctx, "pm", "You are PM.") + assert "missing.txt" in prompt # still listed + c.close() + + # ------------------------------------------------------------------ + # PM pipeline: attachments available in brief context + # ------------------------------------------------------------------ + + def test_pm_context_includes_attachment_paths_for_pipeline(self, conn_text_attachment): + """KIN-094: PM-агент получает пути к вложениям в контексте для старта pipeline.""" + ctx = build_context(conn_text_attachment, "PRJ-001", "pm", "prj") + assert len(ctx["attachments"]) == 1 + att = ctx["attachments"][0] + assert att["filename"] == "spec.txt" + assert att["mime_type"] == "text/plain" + assert "path" in att diff --git a/tests/test_db.py b/tests/test_db.py new file mode 100644 index 0000000..6514bbf --- /dev/null +++ b/tests/test_db.py @@ -0,0 +1,285 @@ +"""Tests for core/db.py — schema and migration (KIN-071, KIN-073).""" + +import sqlite3 +import pytest +from core.db import init_db, _migrate + + +@pytest.fixture +def conn(): + c = init_db(db_path=":memory:") + yield c + c.close() + + +def _cols(conn, table: str) -> set[str]: + """Return set of column names for a table.""" + return {row["name"] for row in conn.execute(f"PRAGMA table_info({table})").fetchall()} + + +# --------------------------------------------------------------------------- +# Schema: новые колонки KIN-071 присутствуют при свежей инициализации +# --------------------------------------------------------------------------- + +class TestProjectsSchemaKin071: + """PRAGMA table_info(projects) должен содержать новые KIN-071 колонки.""" + + def test_schema_has_project_type_column(self, conn): + assert "project_type" in _cols(conn, "projects") + + def test_schema_has_ssh_host_column(self, conn): + assert "ssh_host" in _cols(conn, "projects") + + def test_schema_has_ssh_user_column(self, conn): + assert "ssh_user" in _cols(conn, "projects") + + def test_schema_has_ssh_key_path_column(self, conn): + assert "ssh_key_path" in _cols(conn, "projects") + + def test_schema_has_ssh_proxy_jump_column(self, conn): + assert "ssh_proxy_jump" in _cols(conn, "projects") + + def test_schema_has_description_column(self, conn): + assert "description" in _cols(conn, "projects") + + def test_project_type_defaults_to_development(self, conn): + """INSERT без project_type → значение по умолчанию 'development'.""" + conn.execute( + "INSERT INTO projects (id, name, path) VALUES ('t1', 'T', '/t')" + ) + conn.commit() + row = conn.execute( + "SELECT project_type FROM projects WHERE id='t1'" + ).fetchone() + assert row["project_type"] == "development" + + def test_ssh_fields_default_to_null(self, conn): + """SSH-поля по умолчанию NULL.""" + conn.execute( + "INSERT INTO projects (id, name, path) VALUES ('t2', 'T', '/t')" + ) + conn.commit() + row = conn.execute( + "SELECT ssh_host, ssh_user, ssh_key_path, ssh_proxy_jump FROM projects WHERE id='t2'" + ).fetchone() + assert row["ssh_host"] is None + assert row["ssh_user"] is None + assert row["ssh_key_path"] is None + assert row["ssh_proxy_jump"] is None + + +# --------------------------------------------------------------------------- +# Migration: _migrate добавляет KIN-071 колонки в старую схему (без них) +# --------------------------------------------------------------------------- + +def _old_schema_conn() -> sqlite3.Connection: + """Создаёт соединение с минимальной 'старой' схемой без KIN-071 колонок.""" + conn = sqlite3.connect(":memory:") + conn.row_factory = sqlite3.Row + conn.executescript(""" + CREATE TABLE projects ( + id TEXT PRIMARY KEY, + name TEXT NOT NULL, + path TEXT NOT NULL, + status TEXT DEFAULT 'active', + language TEXT DEFAULT 'ru', + execution_mode TEXT NOT NULL DEFAULT 'review' + ); + CREATE TABLE tasks ( + id TEXT PRIMARY KEY, + project_id TEXT NOT NULL, + title TEXT NOT NULL, + status TEXT DEFAULT 'pending', + execution_mode TEXT + ); + """) + conn.commit() + return conn + + +def test_migrate_adds_project_type_to_old_schema(): + """_migrate добавляет project_type в старую схему без этой колонки.""" + conn = _old_schema_conn() + _migrate(conn) + assert "project_type" in _cols(conn, "projects") + conn.close() + + +def test_migrate_adds_ssh_host_to_old_schema(): + """_migrate добавляет ssh_host в старую схему.""" + conn = _old_schema_conn() + _migrate(conn) + assert "ssh_host" in _cols(conn, "projects") + conn.close() + + +def test_migrate_adds_all_ssh_columns_to_old_schema(): + """_migrate добавляет все SSH-колонки разом в старую схему.""" + conn = _old_schema_conn() + _migrate(conn) + cols = _cols(conn, "projects") + assert {"ssh_host", "ssh_user", "ssh_key_path", "ssh_proxy_jump", "description"}.issubset(cols) + conn.close() + + +def test_migrate_is_idempotent(): + """Повторный вызов _migrate не ломает схему.""" + conn = init_db(":memory:") + before = _cols(conn, "projects") + _migrate(conn) + after = _cols(conn, "projects") + assert before == after + conn.close() + + +# --------------------------------------------------------------------------- +# Migration KIN-UI-002: рекреация таблицы на минимальной схеме не падает +# --------------------------------------------------------------------------- + +def test_migrate_recreates_table_without_operationalerror(): + """_migrate не бросает OperationalError при рекреации projects на минимальной схеме. + + Регрессионный тест KIN-UI-002: INSERT SELECT в блоке KIN-ARCH-003 ранее + падал на отсутствующих колонках (tech_stack, priority, pm_prompt и др.). + """ + conn = _old_schema_conn() # path NOT NULL — триггер рекреации + try: + _migrate(conn) + except Exception as exc: + pytest.fail(f"_migrate raised {type(exc).__name__}: {exc}") + conn.close() + + +def test_migrate_path_becomes_nullable_on_old_schema(): + """После миграции старой схемы (path NOT NULL) колонка path становится nullable.""" + conn = _old_schema_conn() + _migrate(conn) + path_col = next( + r for r in conn.execute("PRAGMA table_info(projects)").fetchall() + if r[1] == "path" + ) + assert path_col[3] == 0, "path должна быть nullable после миграции KIN-ARCH-003" + conn.close() + + +def test_migrate_preserves_existing_rows_on_recreation(): + """Рекреация таблицы сохраняет существующие строки.""" + conn = _old_schema_conn() + conn.execute( + "INSERT INTO projects (id, name, path, status) VALUES ('p1', 'MyProj', '/p', 'active')" + ) + conn.commit() + _migrate(conn) + row = conn.execute("SELECT id, name, path, status FROM projects WHERE id='p1'").fetchone() + assert row is not None + assert row["name"] == "MyProj" + assert row["path"] == "/p" + assert row["status"] == "active" + conn.close() + + +def test_migrate_adds_missing_columns_before_recreation(): + """_migrate добавляет tech_stack, priority, pm_prompt, claude_md_path, forgejo_repo, created_at перед рекреацией.""" + conn = _old_schema_conn() + _migrate(conn) + cols = _cols(conn, "projects") + required = {"tech_stack", "priority", "pm_prompt", "claude_md_path", "forgejo_repo", "created_at"} + assert required.issubset(cols), f"Отсутствуют колонки: {required - cols}" + conn.close() + + +def test_migrate_operations_project_with_null_path(): + """После миграции можно вставить operations-проект с path=NULL.""" + conn = _old_schema_conn() + _migrate(conn) + conn.execute( + "INSERT INTO projects (id, name, path, project_type) VALUES ('ops1', 'Ops', NULL, 'operations')" + ) + conn.commit() + row = conn.execute("SELECT path, project_type FROM projects WHERE id='ops1'").fetchone() + assert row["path"] is None + assert row["project_type"] == "operations" + conn.close() + + +# --------------------------------------------------------------------------- +# Schema KIN-073: acceptance_criteria в таблице tasks +# --------------------------------------------------------------------------- + +class TestTasksAcceptanceCriteriaSchema: + """Колонка acceptance_criteria присутствует в таблице tasks.""" + + def test_schema_has_acceptance_criteria_column(self, conn): + assert "acceptance_criteria" in _cols(conn, "tasks") + + def test_acceptance_criteria_defaults_to_null(self, conn): + """Создание задачи без acceptance_criteria — поле NULL (nullable).""" + conn.execute( + "INSERT INTO projects (id, name, path) VALUES ('p1', 'P', '/p')" + ) + conn.execute( + "INSERT INTO tasks (id, project_id, title) VALUES ('t1', 'p1', 'My Task')" + ) + conn.commit() + row = conn.execute( + "SELECT acceptance_criteria FROM tasks WHERE id='t1'" + ).fetchone() + assert row["acceptance_criteria"] is None + + def test_create_task_with_acceptance_criteria_saves_field(self, conn): + """Создание задачи с acceptance_criteria — значение сохраняется в БД.""" + conn.execute( + "INSERT INTO projects (id, name, path) VALUES ('p2', 'P', '/p')" + ) + criteria = "Поле должно сохраняться. GET возвращает значение." + conn.execute( + "INSERT INTO tasks (id, project_id, title, acceptance_criteria)" + " VALUES ('t2', 'p2', 'Task with criteria', ?)", + (criteria,), + ) + conn.commit() + row = conn.execute( + "SELECT acceptance_criteria FROM tasks WHERE id='t2'" + ).fetchone() + assert row["acceptance_criteria"] == criteria + + def test_get_task_returns_acceptance_criteria(self, conn): + """SELECT задачи возвращает acceptance_criteria (критерий приёмки 3).""" + conn.execute( + "INSERT INTO projects (id, name, path) VALUES ('p3', 'P', '/p')" + ) + conn.execute( + "INSERT INTO tasks (id, project_id, title, acceptance_criteria)" + " VALUES ('t3', 'p3', 'T', 'AC value')", + ) + conn.commit() + row = conn.execute("SELECT * FROM tasks WHERE id='t3'").fetchone() + assert row["acceptance_criteria"] == "AC value" + + +# --------------------------------------------------------------------------- +# Migration KIN-073: _migrate добавляет acceptance_criteria в старую схему +# --------------------------------------------------------------------------- + +def test_migrate_adds_acceptance_criteria_to_old_schema(): + """_migrate добавляет acceptance_criteria в tasks если колонки нет.""" + conn = _old_schema_conn() + _migrate(conn) + assert "acceptance_criteria" in _cols(conn, "tasks") + conn.close() + + +def test_migrate_acceptance_criteria_is_nullable_after_migration(): + """После миграции acceptance_criteria nullable — старые строки не ломаются.""" + conn = _old_schema_conn() + conn.execute( + "INSERT INTO projects (id, name, path) VALUES ('pm', 'P', '/p')" + ) + conn.execute( + "INSERT INTO tasks (id, project_id, title) VALUES ('tm', 'pm', 'Old Task')" + ) + conn.commit() + _migrate(conn) + row = conn.execute("SELECT acceptance_criteria FROM tasks WHERE id='tm'").fetchone() + assert row["acceptance_criteria"] is None + conn.close() diff --git a/tests/test_followup.py b/tests/test_followup.py index ec10d33..7422680 100644 --- a/tests/test_followup.py +++ b/tests/test_followup.py @@ -219,6 +219,35 @@ class TestResolvePendingAction: # _run_claude with allow_write=True assert result["rerun_result"]["success"] is True + def test_manual_task_brief_has_task_type_manual_escalation(self, conn): + """brief["task_type"] должен быть 'manual_escalation' — KIN-020.""" + action = { + "type": "permission_fix", + "original_item": {"title": "Fix .dockerignore", "type": "hotfix", + "priority": 3, "brief": "Create .dockerignore"}, + } + result = resolve_pending_action(conn, "VDOL-001", action, "manual_task") + assert result is not None + assert result["brief"]["task_type"] == "manual_escalation" + + def test_manual_task_brief_includes_source(self, conn): + """brief["source"] должен содержать ссылку на родительскую задачу — KIN-020.""" + action = { + "type": "permission_fix", + "original_item": {"title": "Fix X"}, + } + result = resolve_pending_action(conn, "VDOL-001", action, "manual_task") + assert result["brief"]["source"] == "followup:VDOL-001" + + def test_manual_task_brief_includes_description(self, conn): + """brief["description"] копируется из original_item.brief — KIN-020.""" + action = { + "type": "permission_fix", + "original_item": {"title": "Fix Y", "brief": "Detailed context here"}, + } + result = resolve_pending_action(conn, "VDOL-001", action, "manual_task") + assert result["brief"]["description"] == "Detailed context here" + def test_nonexistent_task(self, conn): action = {"type": "permission_fix", "original_item": {}} assert resolve_pending_action(conn, "NOPE", action, "skip") is None @@ -261,9 +290,177 @@ class TestAutoResolvePendingActions: tasks = models.list_tasks(conn, project_id="vdol") assert len(tasks) == 2 # VDOL-001 + новая manual task + @patch("agents.runner._run_claude") + def test_escalated_manual_task_has_task_type_manual_escalation(self, mock_claude, conn): + """При эскалации после провала rerun созданная задача имеет task_type='manual_escalation' — KIN-020.""" + mock_claude.return_value = {"output": "", "returncode": 1} + action = { + "type": "permission_fix", + "description": "Fix X", + "original_item": {"title": "Fix X", "type": "frontend_dev", "brief": "Apply fix"}, + "options": ["rerun", "manual_task", "skip"], + } + results = auto_resolve_pending_actions(conn, "VDOL-001", [action]) + + assert results[0]["resolved"] == "manual_task" + created_task = results[0]["result"] + assert created_task["brief"]["task_type"] == "manual_escalation" + @patch("agents.runner._run_claude") def test_empty_pending_actions(self, mock_claude, conn): """Пустой список — пустой результат.""" results = auto_resolve_pending_actions(conn, "VDOL-001", []) assert results == [] mock_claude.assert_not_called() + + +# --------------------------------------------------------------------------- +# KIN-068 — category наследуется при создании followup и manual задач +# --------------------------------------------------------------------------- + +class TestNextTaskIdWithCategory: + """_next_task_id с category генерирует ID в формате PROJ-CAT-NNN.""" + + @pytest.mark.parametrize("category,expected_prefix", [ + ("SEC", "VDOL-SEC-"), + ("UI", "VDOL-UI-"), + ("API", "VDOL-API-"), + ("INFRA", "VDOL-INFRA-"), + ("BIZ", "VDOL-BIZ-"), + ]) + def test_with_category_produces_cat_format(self, conn, category, expected_prefix): + """_next_task_id с category возвращает PROJ-CAT-NNN.""" + result = _next_task_id(conn, "vdol", category=category) + assert result.startswith(expected_prefix) + suffix = result[len(expected_prefix):] + assert suffix.isdigit() and len(suffix) == 3 + + def test_with_none_category_produces_plain_format(self, conn): + """_next_task_id без category возвращает PROJ-NNN (backward compat).""" + result = _next_task_id(conn, "vdol", category=None) + # VDOL-001 already exists → next is VDOL-002 + assert result == "VDOL-002" + parts = result.split("-") + assert len(parts) == 2 + assert parts[1].isdigit() + + def test_first_cat_task_is_001(self, conn): + """Первая задача категории всегда получает номер 001.""" + result = _next_task_id(conn, "vdol", category="DB") + assert result == "VDOL-DB-001" + + def test_cat_counter_is_per_category(self, conn): + """Счётчик независим для каждой категории.""" + models.create_task(conn, "VDOL-SEC-001", "vdol", "Security task", category="SEC") + assert _next_task_id(conn, "vdol", category="SEC") == "VDOL-SEC-002" + assert _next_task_id(conn, "vdol", category="UI") == "VDOL-UI-001" + + +class TestFollowupCategoryInheritance: + """Регрессионный тест KIN-068: followup задачи наследуют category родителя.""" + + @pytest.mark.parametrize("category", ["SEC", "UI", "API", "INFRA", "BIZ", None]) + @patch("agents.runner._run_claude") + def test_generate_followups_followup_inherits_category( + self, mock_claude, category, conn + ): + """Followup задача наследует category родительской задачи (включая None).""" + # Установить category на родительской задаче + models.update_task(conn, "VDOL-001", category=category) + + mock_claude.return_value = { + "output": json.dumps([ + {"title": "Followup task", "type": "feature", "priority": 3}, + ]), + "returncode": 0, + } + + result = generate_followups(conn, "VDOL-001") + + assert len(result["created"]) == 1 + followup = result["created"][0] + + # category должен совпадать с родительской задачей + assert followup["category"] == category + + # ID должен иметь правильный формат + if category: + assert followup["id"].startswith(f"VDOL-{category}-"), ( + f"Ожидался ID вида VDOL-{category}-NNN, получен {followup['id']!r}" + ) + else: + # Без категории: старый формат VDOL-NNN + parts = followup["id"].split("-") + assert len(parts) == 2, ( + f"Ожидался ID вида VDOL-NNN (2 части), получен {followup['id']!r}" + ) + assert parts[1].isdigit() + + @pytest.mark.parametrize("category", ["SEC", "UI", "API", "INFRA", "BIZ", None]) + def test_resolve_pending_action_manual_task_inherits_category( + self, category, conn + ): + """manual_task при resolve_pending_action наследует category родителя.""" + models.update_task(conn, "VDOL-001", category=category) + + action = { + "type": "permission_fix", + "original_item": { + "title": "Fix manually", + "type": "hotfix", + "priority": 4, + "brief": "Apply permissions fix", + }, + } + result = resolve_pending_action(conn, "VDOL-001", action, "manual_task") + + assert result is not None + assert result["category"] == category + + if category: + assert result["id"].startswith(f"VDOL-{category}-"), ( + f"Ожидался ID вида VDOL-{category}-NNN, получен {result['id']!r}" + ) + else: + parts = result["id"].split("-") + assert len(parts) == 2 + assert parts[1].isdigit() + + @patch("agents.runner._run_claude") + def test_generate_followups_sec_category_id_format(self, mock_claude, conn): + """Регрессионный тест KIN-068: followup задача с category=SEC получает ID VDOL-SEC-001.""" + models.update_task(conn, "VDOL-001", category="SEC") + + mock_claude.return_value = { + "output": json.dumps([{"title": "Fix SQL injection", "priority": 2}]), + "returncode": 0, + } + + result = generate_followups(conn, "VDOL-001") + + assert len(result["created"]) == 1 + followup = result["created"][0] + assert followup["id"] == "VDOL-SEC-001" + assert followup["category"] == "SEC" + + @patch("agents.runner._run_claude") + def test_generate_followups_multiple_followups_same_category(self, mock_claude, conn): + """Несколько followup задач с одной category получают инкрементальные номера.""" + models.update_task(conn, "VDOL-001", category="API") + + mock_claude.return_value = { + "output": json.dumps([ + {"title": "Add auth header", "priority": 2}, + {"title": "Add rate limit", "priority": 3}, + ]), + "returncode": 0, + } + + result = generate_followups(conn, "VDOL-001") + + assert len(result["created"]) == 2 + ids = [t["id"] for t in result["created"]] + assert ids[0] == "VDOL-API-001" + assert ids[1] == "VDOL-API-002" + for t in result["created"]: + assert t["category"] == "API" diff --git a/tests/test_hooks.py b/tests/test_hooks.py index 4a9d554..226437c 100644 --- a/tests/test_hooks.py +++ b/tests/test_hooks.py @@ -1,6 +1,8 @@ """Tests for core/hooks.py — post-pipeline hook execution.""" +import os import subprocess +import tempfile import pytest from unittest.mock import patch, MagicMock @@ -538,27 +540,25 @@ class TestKIN052RebuildFrontendCommand: """Хук должен сохраняться в файловой БД и быть доступен после пересоздания соединения. Симулирует рестарт: создаём хук, закрываем соединение, открываем новое — хук на месте. + Используем проект НЕ 'kin', чтобы _seed_default_hooks не мигрировал хук. """ - import tempfile - import os - from core.db import init_db - with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as f: db_path = f.name try: # Первое соединение — создаём проект и хук conn1 = init_db(db_path) from core import models as _models - _models.create_project(conn1, "kin", "Kin", "/projects/kin", tech_stack=["vue3"]) - cmd = "cd /Users/grosfrumos/projects/kin/web/frontend && npm run build" - hook = create_hook(conn1, "kin", "rebuild-frontend", "pipeline_completed", cmd, + _models.create_project(conn1, "kin-test", "KinTest", "/projects/kin-test", + tech_stack=["vue3"]) + cmd = "cd /projects/kin-test/web/frontend && npm run build" + hook = create_hook(conn1, "kin-test", "rebuild-frontend", "pipeline_completed", cmd, trigger_module_path=None) hook_id = hook["id"] conn1.close() # Второе соединение — «рестарт», хук должен быть на месте conn2 = init_db(db_path) - hooks = get_hooks(conn2, "kin", event="pipeline_completed", enabled_only=True) + hooks = get_hooks(conn2, "kin-test", event="pipeline_completed", enabled_only=True) conn2.close() assert len(hooks) == 1, "После пересоздания соединения хук должен оставаться в БД" @@ -568,3 +568,337 @@ class TestKIN052RebuildFrontendCommand: assert hooks[0]["trigger_module_path"] is None finally: os.unlink(db_path) + + +# --------------------------------------------------------------------------- +# KIN-053: _seed_default_hooks — автоматический хук при инициализации БД +# --------------------------------------------------------------------------- + +class TestKIN053SeedDefaultHooks: + """Тесты для _seed_default_hooks (KIN-053). + + При init_db автоматически создаётся rebuild-frontend хук для проекта 'kin', + если этот проект уже существует в БД. Функция идемпотентна. + """ + + def test_seed_skipped_when_no_kin_project(self): + """_seed_default_hooks не создаёт хук, если проекта 'kin' нет.""" + with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as f: + db_path = f.name + try: + conn = init_db(db_path) + hooks = get_hooks(conn, "kin", enabled_only=False) + conn.close() + assert hooks == [] + finally: + os.unlink(db_path) + + def test_seed_creates_hook_when_kin_project_exists(self): + """_seed_default_hooks создаёт rebuild-frontend хук при наличии проекта 'kin'. + + Порядок: init_db → create_project('kin') → повторный init_db → хук есть. + KIN-003: команда теперь scripts/rebuild-frontend.sh, не cd && npm run build. + """ + with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as f: + db_path = f.name + try: + conn1 = init_db(db_path) + models.create_project(conn1, "kin", "Kin", "/projects/kin") + conn1.close() + + conn2 = init_db(db_path) + hooks = get_hooks(conn2, "kin", event="pipeline_completed", enabled_only=True) + conn2.close() + + assert len(hooks) == 1 + assert hooks[0]["name"] == "rebuild-frontend" + assert "rebuild-frontend.sh" in hooks[0]["command"] + finally: + os.unlink(db_path) + + def test_seed_hook_has_correct_command(self): + """Команда хука использует динамический путь из projects.path (KIN-BIZ-004). + + KIN-003: хук мигрирован на скрипт scripts/rebuild-frontend.sh + с trigger_module_path='web/frontend/*' для точного git-фильтра. + KIN-BIZ-004: путь берётся из projects.path, не захардкожен. + """ + project_path = "/projects/kin" + with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as f: + db_path = f.name + try: + conn1 = init_db(db_path) + models.create_project(conn1, "kin", "Kin", project_path) + conn1.close() + + conn2 = init_db(db_path) + hooks = get_hooks(conn2, "kin", event="pipeline_completed", enabled_only=False) + conn2.close() + + assert hooks[0]["command"] == f"{project_path}/scripts/rebuild-frontend.sh" + assert hooks[0]["trigger_module_path"] == "web/frontend/*" + assert hooks[0]["working_dir"] == project_path + assert hooks[0]["timeout_seconds"] == 300 + finally: + os.unlink(db_path) + + def test_seed_idempotent_no_duplicate(self): + """Повторные вызовы init_db не дублируют хук.""" + with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as f: + db_path = f.name + try: + conn = init_db(db_path) + models.create_project(conn, "kin", "Kin", "/projects/kin") + conn.close() + + for _ in range(3): + c = init_db(db_path) + c.close() + + conn_final = init_db(db_path) + hooks = get_hooks(conn_final, "kin", event="pipeline_completed", enabled_only=False) + conn_final.close() + + assert len(hooks) == 1, f"Ожидается 1 хук, получено {len(hooks)}" + finally: + os.unlink(db_path) + + def test_seed_hook_does_not_affect_other_projects(self): + """Seed не создаёт хуки для других проектов.""" + with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as f: + db_path = f.name + try: + conn1 = init_db(db_path) + models.create_project(conn1, "kin", "Kin", "/projects/kin") + models.create_project(conn1, "other", "Other", "/projects/other") + conn1.close() + + conn2 = init_db(db_path) + other_hooks = get_hooks(conn2, "other", enabled_only=False) + conn2.close() + + assert other_hooks == [] + finally: + os.unlink(db_path) + + def test_seed_hook_migration_updates_existing_hook(self): + """_seed_default_hooks мигрирует существующий хук используя динамический путь (KIN-BIZ-004). + + Если rebuild-frontend уже существует со старой командой (cd && npm run build), + повторный init_db должен обновить его на scripts/rebuild-frontend.sh + с путём из projects.path. + """ + project_path = "/projects/kin" + with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as f: + db_path = f.name + try: + conn1 = init_db(db_path) + models.create_project(conn1, "kin", "Kin", project_path) + # Вставляем старый хук вручную (имитация состояния до KIN-003) + old_cmd = f"cd {project_path}/web/frontend && npm run build" + conn1.execute( + """INSERT INTO hooks (project_id, name, event, trigger_module_path, command, + working_dir, timeout_seconds, enabled) + VALUES ('kin', 'rebuild-frontend', 'pipeline_completed', + NULL, ?, NULL, 120, 1)""", + (old_cmd,), + ) + conn1.commit() + conn1.close() + + # Повторный init_db запускает _seed_default_hooks с миграцией + conn2 = init_db(db_path) + hooks = get_hooks(conn2, "kin", event="pipeline_completed", enabled_only=False) + conn2.close() + + assert len(hooks) == 1 + assert hooks[0]["command"] == f"{project_path}/scripts/rebuild-frontend.sh" + assert hooks[0]["trigger_module_path"] == "web/frontend/*" + assert hooks[0]["working_dir"] == project_path + assert hooks[0]["timeout_seconds"] == 300 + finally: + os.unlink(db_path) + + def test_seed_hook_uses_dynamic_path_not_hardcoded(self): + """Команда хука содержит путь из projects.path, а не захардкоженный /Users/grosfrumos/... (KIN-BIZ-004). + + Создаём проект с нестандартным путём и проверяем, + что хук использует именно этот путь. + """ + custom_path = "/srv/custom/kin-deployment" + with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as f: + db_path = f.name + try: + conn1 = init_db(db_path) + models.create_project(conn1, "kin", "Kin", custom_path) + conn1.close() + + conn2 = init_db(db_path) + hooks = get_hooks(conn2, "kin", event="pipeline_completed", enabled_only=False) + conn2.close() + + assert len(hooks) == 1 + assert hooks[0]["command"] == f"{custom_path}/scripts/rebuild-frontend.sh", ( + "Команда должна использовать путь из projects.path, не захардкоженный" + ) + assert hooks[0]["working_dir"] == custom_path, ( + "working_dir должен совпадать с projects.path" + ) + assert "/Users/grosfrumos" not in hooks[0]["command"], ( + "Захардкоженный путь /Users/grosfrumos не должен присутствовать в команде" + ) + finally: + os.unlink(db_path) + + +# --------------------------------------------------------------------------- +# KIN-003: changed_files — точный git-фильтр для trigger_module_path +# --------------------------------------------------------------------------- + +class TestChangedFilesMatching: + """Тесты для нового параметра changed_files в run_hooks() (KIN-003). + + Когда changed_files передан — trigger_module_path матчится по реальным + git-изменённым файлам, а не по task_modules из БД. + """ + + def _make_proc(self, returncode=0, stdout="ok", stderr=""): + m = MagicMock() + m.returncode = returncode + m.stdout = stdout + m.stderr = stderr + return m + + @pytest.fixture + def frontend_trigger_hook(self, conn): + """Хук с trigger_module_path='web/frontend/*'.""" + return create_hook( + conn, "vdol", "rebuild-frontend", "pipeline_completed", + "scripts/rebuild-frontend.sh", + trigger_module_path="web/frontend/*", + working_dir="/tmp", + ) + + @patch("core.hooks.subprocess.run") + def test_hook_fires_when_frontend_file_in_changed_files( + self, mock_run, conn, frontend_trigger_hook + ): + """Хук срабатывает, если среди changed_files есть файл в web/frontend/.""" + mock_run.return_value = self._make_proc() + results = run_hooks( + conn, "vdol", "VDOL-001", + event="pipeline_completed", + task_modules=[], + changed_files=["web/frontend/App.vue", "core/models.py"], + ) + assert len(results) == 1 + assert results[0].name == "rebuild-frontend" + mock_run.assert_called_once() + + @patch("core.hooks.subprocess.run") + def test_hook_skipped_when_no_frontend_file_in_changed_files( + self, mock_run, conn, frontend_trigger_hook + ): + """Хук НЕ срабатывает, если changed_files не содержит web/frontend/* файлов.""" + mock_run.return_value = self._make_proc() + results = run_hooks( + conn, "vdol", "VDOL-001", + event="pipeline_completed", + task_modules=[], + changed_files=["core/models.py", "web/api.py", "agents/runner.py"], + ) + assert len(results) == 0 + mock_run.assert_not_called() + + @patch("core.hooks.subprocess.run") + def test_hook_skipped_when_changed_files_is_empty_list( + self, mock_run, conn, frontend_trigger_hook + ): + """Пустой changed_files [] — хук с trigger_module_path не срабатывает.""" + mock_run.return_value = self._make_proc() + results = run_hooks( + conn, "vdol", "VDOL-001", + event="pipeline_completed", + task_modules=[{"path": "web/frontend/App.vue", "name": "App"}], + changed_files=[], # git говорит: ничего не изменилось + ) + assert len(results) == 0 + mock_run.assert_not_called() + + @patch("core.hooks.subprocess.run") + def test_changed_files_overrides_task_modules_match( + self, mock_run, conn, frontend_trigger_hook + ): + """Если changed_files передан, task_modules игнорируется для фильтрации. + + task_modules содержит frontend-файл, но changed_files — нет. + Хук не должен сработать: changed_files имеет приоритет. + """ + mock_run.return_value = self._make_proc() + results = run_hooks( + conn, "vdol", "VDOL-001", + event="pipeline_completed", + task_modules=[{"path": "web/frontend/App.vue", "name": "App"}], + changed_files=["core/models.py"], # нет frontend-файлов + ) + assert len(results) == 0, ( + "changed_files должен иметь приоритет над task_modules" + ) + mock_run.assert_not_called() + + @patch("core.hooks.subprocess.run") + def test_fallback_to_task_modules_when_changed_files_is_none( + self, mock_run, conn, frontend_trigger_hook + ): + """Если changed_files=None — используется старое поведение через task_modules.""" + mock_run.return_value = self._make_proc() + results = run_hooks( + conn, "vdol", "VDOL-001", + event="pipeline_completed", + task_modules=[{"path": "web/frontend/App.vue", "name": "App"}], + changed_files=None, # не передан — fallback + ) + assert len(results) == 1 + assert results[0].name == "rebuild-frontend" + mock_run.assert_called_once() + + @patch("core.hooks.subprocess.run") + def test_hook_without_trigger_fires_regardless_of_changed_files( + self, mock_run, conn + ): + """Хук без trigger_module_path всегда срабатывает, даже если changed_files=[]. + + Используется для хуков, которые должны запускаться после каждого pipeline. + """ + mock_run.return_value = self._make_proc() + create_hook( + conn, "vdol", "always-run", "pipeline_completed", + "echo always", + trigger_module_path=None, + working_dir="/tmp", + ) + results = run_hooks( + conn, "vdol", "VDOL-001", + event="pipeline_completed", + task_modules=[], + changed_files=[], # пусто — но хук без фильтра всегда запустится + ) + assert len(results) == 1 + assert results[0].name == "always-run" + mock_run.assert_called_once() + + @patch("core.hooks.subprocess.run") + def test_deep_frontend_path_matches_glob( + self, mock_run, conn, frontend_trigger_hook + ): + """Вложенные пути web/frontend/src/components/Foo.vue матчатся по 'web/frontend/*'.""" + mock_run.return_value = self._make_proc() + results = run_hooks( + conn, "vdol", "VDOL-001", + event="pipeline_completed", + task_modules=[], + changed_files=["web/frontend/src/components/TaskCard.vue"], + ) + assert len(results) == 1, ( + "fnmatch должен рекурсивно матчить 'web/frontend/*' на вложенные пути" + ) diff --git a/tests/test_kin_089_regression.py b/tests/test_kin_089_regression.py new file mode 100644 index 0000000..068d16c --- /dev/null +++ b/tests/test_kin_089_regression.py @@ -0,0 +1,377 @@ +"""Regression tests for KIN-089: 500 Internal Server Error when adding credentials. + +Root cause: DB schema had label/login/credential columns; code expected name/username/auth_value. +Fix: Migration in core/db.py (_migrate) renames columns label→name, login→username, credential→auth_value. + +Acceptance criteria: + 1. Credentials can be added without error (status 201, not 500) + 2. Credentials are stored in DB (encrypted) + 3. Sysadmin task brief contains environment fields for inventory +""" +import sqlite3 +import pytest +from unittest.mock import patch, MagicMock + +from core.db import init_db, _migrate + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + +def _cols(conn: sqlite3.Connection, table: str) -> set[str]: + return {r[1] for r in conn.execute(f"PRAGMA table_info({table})").fetchall()} + + +def _conn_with_old_env_schema() -> sqlite3.Connection: + """Creates in-memory DB with OLD project_environments schema (label/login/credential).""" + conn = sqlite3.connect(":memory:") + conn.row_factory = sqlite3.Row + conn.executescript(""" + CREATE TABLE projects ( + id TEXT PRIMARY KEY, + name TEXT NOT NULL, + path TEXT, + status TEXT DEFAULT 'active', + language TEXT DEFAULT 'ru', + execution_mode TEXT NOT NULL DEFAULT 'review' + ); + CREATE TABLE tasks ( + id TEXT PRIMARY KEY, + project_id TEXT NOT NULL, + title TEXT NOT NULL, + status TEXT DEFAULT 'pending', + execution_mode TEXT + ); + CREATE TABLE project_environments ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + project_id TEXT NOT NULL REFERENCES projects(id), + label TEXT NOT NULL, + host TEXT NOT NULL, + port INTEGER DEFAULT 22, + login TEXT NOT NULL, + auth_type TEXT NOT NULL DEFAULT 'password', + credential TEXT, + is_installed INTEGER NOT NULL DEFAULT 0, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP, + updated_at DATETIME DEFAULT CURRENT_TIMESTAMP, + UNIQUE(project_id, label) + ); + INSERT INTO projects VALUES ('corelock', 'Corelock', '/corelock', 'active', 'ru', 'review'); + INSERT INTO project_environments + (project_id, label, host, port, login, auth_type, credential, is_installed) + VALUES ('corelock', 'prod', '10.5.1.254', 22, 'pelmen', 'password', 'b64:c2VjcmV0', 0); + """) + conn.commit() + return conn + + +# --------------------------------------------------------------------------- +# Migration: label/login/credential → name/username/auth_value +# --------------------------------------------------------------------------- + +class TestKin089Migration: + """Regression: _migrate renames env columns from old schema to new schema.""" + + def test_migration_renames_label_to_name(self): + conn = _conn_with_old_env_schema() + _migrate(conn) + cols = _cols(conn, "project_environments") + assert "name" in cols, "After migration, 'name' column must exist" + assert "label" not in cols, "After migration, 'label' column must not exist" + conn.close() + + def test_migration_renames_login_to_username(self): + conn = _conn_with_old_env_schema() + _migrate(conn) + cols = _cols(conn, "project_environments") + assert "username" in cols, "After migration, 'username' column must exist" + assert "login" not in cols, "After migration, 'login' column must not exist" + conn.close() + + def test_migration_renames_credential_to_auth_value(self): + conn = _conn_with_old_env_schema() + _migrate(conn) + cols = _cols(conn, "project_environments") + assert "auth_value" in cols, "After migration, 'auth_value' column must exist" + assert "credential" not in cols, "After migration, 'credential' column must not exist" + conn.close() + + def test_migration_preserves_existing_data(self): + """After migration, existing env rows must be accessible with new column names.""" + conn = _conn_with_old_env_schema() + _migrate(conn) + row = conn.execute( + "SELECT name, username, auth_value FROM project_environments WHERE project_id = 'corelock'" + ).fetchone() + assert row is not None, "Existing row must survive migration" + assert row["name"] == "prod" + assert row["username"] == "pelmen" + assert row["auth_value"] == "b64:c2VjcmV0" + conn.close() + + def test_migration_is_idempotent_on_new_schema(self): + """Calling _migrate on a DB that already has new schema must not fail.""" + conn = init_db(":memory:") + before = _cols(conn, "project_environments") + _migrate(conn) + after = _cols(conn, "project_environments") + assert before == after, "_migrate must not alter schema when new columns already exist" + conn.close() + + def test_migration_preserves_unique_constraint(self): + """After migration, UNIQUE(project_id, name) constraint must still work.""" + conn = _conn_with_old_env_schema() + _migrate(conn) + with pytest.raises(sqlite3.IntegrityError): + conn.execute( + "INSERT INTO project_environments (project_id, name, host, username) " + "VALUES ('corelock', 'prod', '1.2.3.4', 'root')" + ) + conn.close() + + +# --------------------------------------------------------------------------- +# Endpoint regression: POST /environments must return 201, not 500 +# --------------------------------------------------------------------------- + +@pytest.fixture +def client(tmp_path): + import web.api as api_module + api_module.DB_PATH = tmp_path / "test.db" + from web.api import app + from fastapi.testclient import TestClient + c = TestClient(app) + c.post("/api/projects", json={"id": "corelock", "name": "Corelock", "path": "/corelock"}) + return c + + +def test_create_environment_returns_201_not_500(client): + """Regression KIN-089: POST /environments must not return 500.""" + r = client.post("/api/projects/corelock/environments", json={ + "name": "prod", + "host": "10.5.1.254", + "username": "pelmen", + "port": 22, + "auth_type": "password", + "auth_value": "s3cr3t", + "is_installed": False, + }) + assert r.status_code == 201, f"Expected 201, got {r.status_code}: {r.text}" + + +def test_create_environment_missing_kin_secret_key_returns_503(tmp_path): + """When KIN_SECRET_KEY is not set, POST /environments must return 503, not 500. + + 503 = server misconfiguration (operator error), not 500 (code bug). + """ + import os + import web.api as api_module + api_module.DB_PATH = tmp_path / "test503.db" + from web.api import app + from fastapi.testclient import TestClient + + env_without_key = {k: v for k, v in os.environ.items() if k != "KIN_SECRET_KEY"} + with patch.dict(os.environ, env_without_key, clear=True): + c = TestClient(app) + c.post("/api/projects", json={"id": "corelock", "name": "Corelock", "path": "/corelock"}) + r = c.post("/api/projects/corelock/environments", json={ + "name": "prod", + "host": "10.5.1.254", + "username": "pelmen", + "auth_value": "secret", + }) + assert r.status_code == 503, ( + f"Missing KIN_SECRET_KEY must return 503 (not 500 or other), got {r.status_code}: {r.text}" + ) + + +# --------------------------------------------------------------------------- +# AC: Credentials stored in DB +# --------------------------------------------------------------------------- + +def test_create_environment_auth_value_encrypted_in_db(client): + """AC: auth_value is stored encrypted in DB, not plain text.""" + import web.api as api_module + from core.db import init_db + from core import models as m + + r = client.post("/api/projects/corelock/environments", json={ + "name": "db-creds-test", + "host": "10.5.1.254", + "username": "pelmen", + "auth_value": "supersecret", + }) + assert r.status_code == 201 + env_id = r.json()["id"] + + conn = init_db(api_module.DB_PATH) + row = conn.execute( + "SELECT auth_value FROM project_environments WHERE id = ?", (env_id,) + ).fetchone() + conn.close() + + assert row["auth_value"] is not None, "auth_value must be stored in DB" + assert row["auth_value"] != "supersecret", "auth_value must NOT be stored as plain text" + + +def test_create_environment_auth_value_hidden_in_response(client): + """AC: auth_value is never returned in API response.""" + r = client.post("/api/projects/corelock/environments", json={ + "name": "hidden-creds", + "host": "10.5.1.254", + "username": "pelmen", + "auth_value": "supersecret", + }) + assert r.status_code == 201 + assert r.json().get("auth_value") is None, "auth_value must be None in response" + + +def test_create_environment_stored_credential_is_decryptable(client): + """AC: Stored credential can be decrypted back to original value.""" + import web.api as api_module + from core.db import init_db + from core import models as m + + r = client.post("/api/projects/corelock/environments", json={ + "name": "decrypt-test", + "host": "10.5.1.254", + "username": "pelmen", + "auth_value": "mypassword123", + }) + assert r.status_code == 201 + env_id = r.json()["id"] + + conn = init_db(api_module.DB_PATH) + row = conn.execute( + "SELECT auth_value FROM project_environments WHERE id = ?", (env_id,) + ).fetchone() + conn.close() + + decrypted = m._decrypt_auth(row["auth_value"]) + assert decrypted == "mypassword123", "Stored credential must decrypt to original value" + + +# --------------------------------------------------------------------------- +# AC: Sysadmin sees environment fields in context for inventory +# --------------------------------------------------------------------------- + +def test_sysadmin_task_created_with_env_fields_in_brief(client): + """AC: When is_installed=True, sysadmin task brief contains host and username.""" + import web.api as api_module + from core.db import init_db + from core import models as m + + with patch("subprocess.Popen") as mock_popen: + mock_popen.return_value = MagicMock(pid=12345) + r = client.post("/api/projects/corelock/environments", json={ + "name": "prod-scan", + "host": "10.5.1.254", + "username": "pelmen", + "is_installed": True, + }) + + assert r.status_code == 201 + assert "scan_task_id" in r.json(), "scan_task_id must be returned when is_installed=True" + task_id = r.json()["scan_task_id"] + + conn = init_db(api_module.DB_PATH) + task = m.get_task(conn, task_id) + conn.close() + + assert task is not None, "Sysadmin task must be created in DB" + assert task["assigned_role"] == "sysadmin" + assert task["category"] == "INFRA" + + brief = task["brief"] + brief_str = str(brief) + assert "10.5.1.254" in brief_str, "Sysadmin brief must contain host for inventory" + assert "pelmen" in brief_str, "Sysadmin brief must contain username for inventory" + + +def test_sysadmin_task_brief_is_dict_not_string(client): + """Sysadmin task brief must be a structured dict (not raw string) for agent parsing.""" + import web.api as api_module + from core.db import init_db + from core import models as m + + with patch("subprocess.Popen") as mock_popen: + mock_popen.return_value = MagicMock(pid=99999) + r = client.post("/api/projects/corelock/environments", json={ + "name": "brief-type-test", + "host": "10.5.1.1", + "username": "root", + "is_installed": True, + }) + + task_id = r.json()["scan_task_id"] + conn = init_db(api_module.DB_PATH) + task = m.get_task(conn, task_id) + conn.close() + + assert isinstance(task["brief"], dict), ( + f"Sysadmin task brief must be a dict, got {type(task['brief'])}" + ) + + +def test_post_migration_create_environment_works(tmp_path): + """AC: After DB migration from old schema, create_environment works end-to-end.""" + import web.api as api_module + from fastapi.testclient import TestClient + + # Set up DB with old schema using a file-based DB (to test init_db migration path) + old_db_path = tmp_path / "old.db" + conn = sqlite3.connect(str(old_db_path)) + conn.row_factory = sqlite3.Row + conn.executescript(""" + CREATE TABLE projects ( + id TEXT PRIMARY KEY, + name TEXT NOT NULL, + path TEXT, + status TEXT DEFAULT 'active', + language TEXT DEFAULT 'ru', + execution_mode TEXT NOT NULL DEFAULT 'review' + ); + CREATE TABLE tasks ( + id TEXT PRIMARY KEY, + project_id TEXT NOT NULL, + title TEXT NOT NULL, + status TEXT DEFAULT 'pending', + execution_mode TEXT + ); + CREATE TABLE project_environments ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + project_id TEXT NOT NULL REFERENCES projects(id), + label TEXT NOT NULL, + host TEXT NOT NULL, + port INTEGER DEFAULT 22, + login TEXT NOT NULL, + auth_type TEXT NOT NULL DEFAULT 'password', + credential TEXT, + is_installed INTEGER NOT NULL DEFAULT 0, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP, + updated_at DATETIME DEFAULT CURRENT_TIMESTAMP, + UNIQUE(project_id, label) + ); + INSERT INTO projects VALUES ('corelock', 'Corelock', '/corelock', 'active', 'ru', 'review'); + """) + conn.commit() + conn.close() + + # Switch API to use the old DB — init_db will run _migrate on it + api_module.DB_PATH = old_db_path + from web.api import app + c = TestClient(app) + + # Trigger init_db migration by making a request + r = c.post("/api/projects/corelock/environments", json={ + "name": "prod", + "host": "10.5.1.254", + "username": "pelmen", + "auth_value": "topsecret", + }) + assert r.status_code == 201, ( + f"After migration from old schema, create_environment must return 201, got {r.status_code}: {r.text}" + ) + assert r.json()["name"] == "prod" + assert r.json()["username"] == "pelmen" diff --git a/tests/test_kin_091_regression.py b/tests/test_kin_091_regression.py new file mode 100644 index 0000000..6e68a5b --- /dev/null +++ b/tests/test_kin_091_regression.py @@ -0,0 +1,551 @@ +""" +Regression tests for KIN-091: +(1) Revise button — feedback loop, revise_count, target_role, max limit +(2) Auto-test before review — _run_project_tests, fix loop, block on exhaustion +(3) Spec-driven workflow — route exists and has correct steps in specialists.yaml +(4) Git worktrees — create/merge/cleanup/ensure_gitignore with mocked subprocess +(5) Auto-trigger pipeline — task with label 'auto' triggers pipeline on creation +""" + +import json +import subprocess +import pytest +from pathlib import Path +from unittest.mock import patch, MagicMock, call + +import web.api as api_module + + +# --------------------------------------------------------------------------- +# Fixtures +# --------------------------------------------------------------------------- + +@pytest.fixture +def client(tmp_path): + db_path = tmp_path / "test.db" + api_module.DB_PATH = db_path + from web.api import app + from fastapi.testclient import TestClient + c = TestClient(app) + c.post("/api/projects", json={"id": "p1", "name": "P1", "path": "/tmp/p1"}) + c.post("/api/tasks", json={"project_id": "p1", "title": "Fix bug"}) + return c + + +@pytest.fixture +def conn(): + from core.db import init_db + from core import models + c = init_db(":memory:") + models.create_project(c, "vdol", "ВДОЛЬ", "~/projects/vdolipoperek", + tech_stack=["vue3"]) + models.create_task(c, "VDOL-001", "vdol", "Fix bug", + brief={"route_type": "debug"}) + yield c + c.close() + + +# --------------------------------------------------------------------------- +# (1) Revise button — revise_count, target_role, max limit +# --------------------------------------------------------------------------- + +class TestReviseEndpoint: + def test_revise_increments_revise_count(self, client): + """revise_count начинается с 0 и увеличивается на 1 при каждом вызове.""" + r = client.post("/api/tasks/P1-001/revise", json={"comment": "ещё раз"}) + assert r.status_code == 200 + assert r.json()["revise_count"] == 1 + + r = client.post("/api/tasks/P1-001/revise", json={"comment": "и ещё"}) + assert r.status_code == 200 + assert r.json()["revise_count"] == 2 + + def test_revise_stores_target_role(self, client): + """target_role сохраняется в задаче в БД.""" + from core.db import init_db + from core import models + r = client.post("/api/tasks/P1-001/revise", json={ + "comment": "доработай бэкенд", + "target_role": "backend_dev", + }) + assert r.status_code == 200 + + conn = init_db(api_module.DB_PATH) + row = conn.execute( + "SELECT revise_target_role FROM tasks WHERE id = 'P1-001'" + ).fetchone() + conn.close() + assert row["revise_target_role"] == "backend_dev" + + def test_revise_target_role_builds_short_steps(self, client): + """Если передан target_role, pipeline_steps = [target_role, reviewer].""" + r = client.post("/api/tasks/P1-001/revise", json={ + "comment": "фикс", + "target_role": "frontend_dev", + }) + assert r.status_code == 200 + steps = r.json()["pipeline_steps"] + roles = [s["role"] for s in steps] + assert roles == ["frontend_dev", "reviewer"] + + def test_revise_max_count_exceeded_returns_400(self, client): + """После 5 ревизий следующий вызов возвращает 400.""" + from core.db import init_db + from core import models + conn = init_db(api_module.DB_PATH) + models.update_task(conn, "P1-001", revise_count=5) + conn.close() + + r = client.post("/api/tasks/P1-001/revise", json={"comment": "6-й"}) + assert r.status_code == 400 + assert "Max revisions" in r.json()["detail"] + + def test_revise_sets_status_in_progress(self, client): + """После /revise задача переходит в статус in_progress.""" + r = client.post("/api/tasks/P1-001/revise", json={"comment": "исправь"}) + assert r.status_code == 200 + assert r.json()["status"] == "in_progress" + + def test_revise_only_visible_for_review_done_tasks(self, client): + """Задача со статусом 'review' возвращает 200, а не 404.""" + from core.db import init_db + from core import models + conn = init_db(api_module.DB_PATH) + models.update_task(conn, "P1-001", status="review") + conn.close() + + r = client.post("/api/tasks/P1-001/revise", json={"comment": "review→revise"}) + assert r.status_code == 200 + + def test_revise_done_task_allowed(self, client): + """Задача со статусом 'done' тоже может быть ревизована.""" + from core.db import init_db + from core import models + conn = init_db(api_module.DB_PATH) + models.update_task(conn, "P1-001", status="done") + conn.close() + + r = client.post("/api/tasks/P1-001/revise", json={"comment": "done→revise"}) + assert r.status_code == 200 + assert r.json()["status"] == "in_progress" + + +# --------------------------------------------------------------------------- +# (2) Auto-test before review — _run_project_tests, fix loop, block +# --------------------------------------------------------------------------- + +class TestRunProjectTests: + def test_returns_success_when_make_exits_0(self): + """_run_project_tests возвращает success=True при returncode=0.""" + from agents.runner import _run_project_tests + mock_result = MagicMock() + mock_result.returncode = 0 + mock_result.stdout = "All tests passed." + mock_result.stderr = "" + with patch("agents.runner.subprocess.run", return_value=mock_result): + result = _run_project_tests("/fake/path") + assert result["success"] is True + assert "All tests passed." in result["output"] + + def test_returns_failure_when_make_exits_nonzero(self): + """_run_project_tests возвращает success=False при returncode!=0.""" + from agents.runner import _run_project_tests + mock_result = MagicMock() + mock_result.returncode = 2 + mock_result.stdout = "" + mock_result.stderr = "FAILED 3 tests" + with patch("agents.runner.subprocess.run", return_value=mock_result): + result = _run_project_tests("/fake/path") + assert result["success"] is False + assert "FAILED" in result["output"] + + def test_handles_make_not_found(self): + """_run_project_tests возвращает success=False если make не найден.""" + from agents.runner import _run_project_tests + with patch("agents.runner.subprocess.run", side_effect=FileNotFoundError): + result = _run_project_tests("/fake/path") + assert result["success"] is False + assert result["returncode"] == 127 + + def test_handles_timeout(self): + """_run_project_tests возвращает success=False при таймауте.""" + from agents.runner import _run_project_tests + with patch("agents.runner.subprocess.run", + side_effect=subprocess.TimeoutExpired(cmd="make", timeout=120)): + result = _run_project_tests("/fake/path", timeout=120) + assert result["success"] is False + assert result["returncode"] == 124 + + +def _mock_success(output="done"): + m = MagicMock() + m.stdout = json.dumps({"result": output}) + m.stderr = "" + m.returncode = 0 + return m + + +def _mock_failure(msg="error"): + m = MagicMock() + m.stdout = "" + m.stderr = msg + m.returncode = 1 + return m + + +class TestAutoTestInPipeline: + """Pipeline с auto_test_enabled: тесты запускаются автоматически после dev-шага.""" + + @patch("agents.runner._run_autocommit") + @patch("agents.runner._run_project_tests") + @patch("agents.runner.subprocess.run") + def test_auto_test_passes_pipeline_continues( + self, mock_run, mock_tests, mock_autocommit, conn + ): + """Если авто-тест проходит — pipeline завершается успешно.""" + from agents.runner import run_pipeline + from core import models + mock_run.return_value = _mock_success() + mock_tests.return_value = {"success": True, "output": "OK", "returncode": 0} + models.update_project(conn, "vdol", auto_test_enabled=True) + + steps = [{"role": "backend_dev", "brief": "implement"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + mock_tests.assert_called_once() + + @patch("agents.runner._run_autocommit") + @patch("agents.runner._run_project_tests") + @patch("agents.runner.subprocess.run") + def test_auto_test_disabled_not_called( + self, mock_run, mock_tests, mock_autocommit, conn + ): + """Если auto_test_enabled=False — make test не вызывается.""" + from agents.runner import run_pipeline + from core import models + mock_run.return_value = _mock_success() + # auto_test_enabled по умолчанию 0 + + steps = [{"role": "backend_dev", "brief": "implement"}] + run_pipeline(conn, "VDOL-001", steps) + + mock_tests.assert_not_called() + + @patch("agents.runner._run_autocommit") + @patch("agents.runner._run_project_tests") + @patch("agents.runner.subprocess.run") + def test_auto_test_fail_triggers_fix_loop( + self, mock_run, mock_tests, mock_autocommit, conn + ): + """Если авто-тест падает — запускается fixer агент и тесты перезапускаются.""" + from agents.runner import run_pipeline + from core import models + import os + mock_run.return_value = _mock_success() + # First test call fails, second passes + mock_tests.side_effect = [ + {"success": False, "output": "FAILED: test_foo", "returncode": 1}, + {"success": True, "output": "OK", "returncode": 0}, + ] + models.update_project(conn, "vdol", auto_test_enabled=True) + + with patch.dict(os.environ, {"KIN_AUTO_TEST_MAX_ATTEMPTS": "3"}): + steps = [{"role": "backend_dev", "brief": "implement"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + # _run_project_tests called twice: initial check + after fix + assert mock_tests.call_count == 2 + # subprocess.run called at least twice: backend_dev + fixer backend_dev + assert mock_run.call_count >= 2 + + @patch("agents.runner._run_autocommit") + @patch("agents.runner._run_project_tests") + @patch("agents.runner.subprocess.run") + def test_auto_test_exhausted_blocks_task( + self, mock_run, mock_tests, mock_autocommit, conn + ): + """Если авто-тест падает max_attempts раз — задача блокируется.""" + from agents.runner import run_pipeline + from core import models + import os + + mock_run.return_value = _mock_success() + # Тест всегда падает + mock_tests.return_value = {"success": False, "output": "FAILED", "returncode": 1} + models.update_project(conn, "vdol", auto_test_enabled=True) + + with patch.dict(os.environ, {"KIN_AUTO_TEST_MAX_ATTEMPTS": "2"}): + steps = [{"role": "backend_dev", "brief": "implement"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is False + task = models.get_task(conn, "VDOL-001") + assert task["status"] == "blocked" + assert "Auto-test" in (task.get("blocked_reason") or "") + + @patch("agents.runner._run_autocommit") + @patch("agents.runner._run_project_tests") + @patch("agents.runner.subprocess.run") + def test_auto_test_not_triggered_for_non_dev_roles( + self, mock_run, mock_tests, mock_autocommit, conn + ): + """auto_test запускается только для backend_dev/frontend_dev, не для debugger.""" + from agents.runner import run_pipeline + from core import models + mock_run.return_value = _mock_success() + models.update_project(conn, "vdol", auto_test_enabled=True) + + steps = [{"role": "debugger", "brief": "find"}] + run_pipeline(conn, "VDOL-001", steps) + + mock_tests.assert_not_called() + + +# --------------------------------------------------------------------------- +# (3) Spec-driven workflow route +# --------------------------------------------------------------------------- + +class TestSpecDrivenRoute: + def _load_specialists(self): + import yaml + spec_path = Path(__file__).parent.parent / "agents" / "specialists.yaml" + with open(spec_path) as f: + return yaml.safe_load(f) + + def test_spec_driven_route_exists(self): + """Маршрут spec_driven должен быть объявлен в specialists.yaml.""" + data = self._load_specialists() + assert "spec_driven" in data.get("routes", {}) + + def test_spec_driven_route_steps_order(self): + """spec_driven route: шаги [constitution, spec, architect, task_decomposer].""" + data = self._load_specialists() + steps = data["routes"]["spec_driven"]["steps"] + assert steps == ["constitution", "spec", "architect", "task_decomposer"] + + def test_spec_driven_all_roles_exist(self): + """Все роли в spec_driven route должны быть объявлены в specialists.""" + data = self._load_specialists() + specialists = data.get("specialists", {}) + for role in data["routes"]["spec_driven"]["steps"]: + assert role in specialists, f"Role '{role}' missing from specialists" + + def test_constitution_role_has_output_schema(self): + """constitution должен иметь output_schema (principles, constraints, goals).""" + data = self._load_specialists() + schema = data["specialists"]["constitution"].get("output_schema", {}) + assert "principles" in schema + assert "constraints" in schema + assert "goals" in schema + + def test_spec_role_has_output_schema(self): + """spec должен иметь output_schema (overview, features, api_contracts).""" + data = self._load_specialists() + schema = data["specialists"]["spec"].get("output_schema", {}) + assert "overview" in schema + assert "features" in schema + assert "api_contracts" in schema + + +# --------------------------------------------------------------------------- +# (4) Git worktrees — create / merge / cleanup / ensure_gitignore +# --------------------------------------------------------------------------- + +class TestCreateWorktree: + def test_create_worktree_success(self, tmp_path): + """create_worktree возвращает путь при успешном git worktree add.""" + from core.worktree import create_worktree + mock_r = MagicMock() + mock_r.returncode = 0 + mock_r.stderr = "" + with patch("core.worktree.subprocess.run", return_value=mock_r): + path = create_worktree(str(tmp_path), "TASK-001", "backend_dev") + assert path is not None + assert "TASK-001-backend_dev" in path + + def test_create_worktree_git_failure_returns_none(self, tmp_path): + """create_worktree возвращает None если git worktree add провалился.""" + from core.worktree import create_worktree + mock_r = MagicMock() + mock_r.returncode = 128 + mock_r.stderr = "fatal: branch already exists" + with patch("core.worktree.subprocess.run", return_value=mock_r): + path = create_worktree(str(tmp_path), "TASK-001", "backend_dev") + assert path is None + + def test_create_worktree_exception_returns_none(self, tmp_path): + """create_worktree возвращает None при неожиданном исключении (не поднимает).""" + from core.worktree import create_worktree + with patch("core.worktree.subprocess.run", side_effect=OSError("no git")): + path = create_worktree(str(tmp_path), "TASK-001", "backend_dev") + assert path is None + + def test_create_worktree_branch_name_sanitized(self, tmp_path): + """Слэши и пробелы в имени шага заменяются на _.""" + from core.worktree import create_worktree + mock_r = MagicMock() + mock_r.returncode = 0 + mock_r.stderr = "" + calls_made = [] + def capture(*args, **kwargs): + calls_made.append(args[0]) + return mock_r + with patch("core.worktree.subprocess.run", side_effect=capture): + create_worktree(str(tmp_path), "TASK-001", "step/with spaces") + assert calls_made + cmd = calls_made[0] + branch = cmd[cmd.index("-b") + 1] + assert "/" not in branch + assert " " not in branch + + +class TestMergeWorktree: + def test_merge_success_returns_merged_files(self, tmp_path): + """merge_worktree возвращает success=True и список файлов при успешном merge.""" + from core.worktree import merge_worktree + worktree = str(tmp_path / "TASK-001-backend_dev") + + merge_ok = MagicMock(returncode=0, stdout="", stderr="") + diff_ok = MagicMock(returncode=0, stdout="src/api.py\nsrc/models.py\n", stderr="") + + with patch("core.worktree.subprocess.run", side_effect=[merge_ok, diff_ok]): + result = merge_worktree(worktree, str(tmp_path)) + + assert result["success"] is True + assert "src/api.py" in result["merged_files"] + assert result["conflicts"] == [] + + def test_merge_conflict_returns_conflict_list(self, tmp_path): + """merge_worktree возвращает success=False и список конфликтных файлов.""" + from core.worktree import merge_worktree + worktree = str(tmp_path / "TASK-001-backend_dev") + + merge_fail = MagicMock(returncode=1, stdout="", stderr="CONFLICT") + conflict_files = MagicMock(returncode=0, stdout="src/models.py\n", stderr="") + abort = MagicMock(returncode=0) + + with patch("core.worktree.subprocess.run", + side_effect=[merge_fail, conflict_files, abort]): + result = merge_worktree(worktree, str(tmp_path)) + + assert result["success"] is False + assert "src/models.py" in result["conflicts"] + + def test_merge_exception_returns_success_false(self, tmp_path): + """merge_worktree никогда не поднимает исключение.""" + from core.worktree import merge_worktree + with patch("core.worktree.subprocess.run", side_effect=OSError("git died")): + result = merge_worktree("/fake/wt", str(tmp_path)) + assert result["success"] is False + assert "error" in result + + +class TestCleanupWorktree: + def test_cleanup_calls_worktree_remove_and_branch_delete(self, tmp_path): + """cleanup_worktree вызывает git worktree remove и git branch -D.""" + from core.worktree import cleanup_worktree + calls = [] + def capture(*args, **kwargs): + calls.append(args[0]) + return MagicMock(returncode=0) + with patch("core.worktree.subprocess.run", side_effect=capture): + cleanup_worktree("/fake/path/TASK-branch", str(tmp_path)) + assert len(calls) == 2 + # первый: worktree remove + assert "worktree" in calls[0] + assert "remove" in calls[0] + # второй: branch -D + assert "branch" in calls[1] + assert "-D" in calls[1] + + def test_cleanup_never_raises(self, tmp_path): + """cleanup_worktree не поднимает исключение при ошибке.""" + from core.worktree import cleanup_worktree + with patch("core.worktree.subprocess.run", side_effect=OSError("crashed")): + cleanup_worktree("/fake/wt", str(tmp_path)) # должно пройти тихо + + +class TestEnsureGitignore: + def test_adds_entry_to_existing_gitignore(self, tmp_path): + """ensure_gitignore добавляет .kin_worktrees/ в существующий .gitignore.""" + from core.worktree import ensure_gitignore + gi = tmp_path / ".gitignore" + gi.write_text("*.pyc\n__pycache__/\n") + ensure_gitignore(str(tmp_path)) + assert ".kin_worktrees/" in gi.read_text() + + def test_creates_gitignore_if_missing(self, tmp_path): + """ensure_gitignore создаёт .gitignore если его нет.""" + from core.worktree import ensure_gitignore + ensure_gitignore(str(tmp_path)) + gi = tmp_path / ".gitignore" + assert gi.exists() + assert ".kin_worktrees/" in gi.read_text() + + def test_skips_if_entry_already_present(self, tmp_path): + """ensure_gitignore не дублирует запись.""" + from core.worktree import ensure_gitignore + gi = tmp_path / ".gitignore" + gi.write_text(".kin_worktrees/\n") + ensure_gitignore(str(tmp_path)) + content = gi.read_text() + assert content.count(".kin_worktrees/") == 1 + + def test_never_raises_on_permission_error(self, tmp_path): + """ensure_gitignore не поднимает исключение при ошибке записи.""" + from core.worktree import ensure_gitignore + with patch("core.worktree.Path.open", side_effect=PermissionError): + ensure_gitignore(str(tmp_path)) # должно пройти тихо + + +# --------------------------------------------------------------------------- +# (5) Auto-trigger pipeline — label 'auto' +# --------------------------------------------------------------------------- + +class TestAutoTrigger: + def test_task_with_auto_label_triggers_pipeline(self, client): + """Создание задачи с label 'auto' запускает pipeline в фоне.""" + with patch("web.api._launch_pipeline_subprocess") as mock_launch: + r = client.post("/api/tasks", json={ + "project_id": "p1", + "title": "Auto task", + "labels": ["auto"], + }) + assert r.status_code == 200 + mock_launch.assert_called_once() + called_task_id = mock_launch.call_args[0][0] + assert called_task_id.startswith("P1-") + + def test_task_without_auto_label_does_not_trigger(self, client): + """Создание задачи без label 'auto' НЕ запускает pipeline.""" + with patch("web.api._launch_pipeline_subprocess") as mock_launch: + r = client.post("/api/tasks", json={ + "project_id": "p1", + "title": "Manual task", + "labels": ["feature"], + }) + assert r.status_code == 200 + mock_launch.assert_not_called() + + def test_task_without_labels_does_not_trigger(self, client): + """Создание задачи без labels вообще НЕ запускает pipeline.""" + with patch("web.api._launch_pipeline_subprocess") as mock_launch: + r = client.post("/api/tasks", json={ + "project_id": "p1", + "title": "Plain task", + }) + assert r.status_code == 200 + mock_launch.assert_not_called() + + def test_task_with_auto_among_multiple_labels_triggers(self, client): + """Задача с несколькими метками включая 'auto' запускает pipeline.""" + with patch("web.api._launch_pipeline_subprocess") as mock_launch: + r = client.post("/api/tasks", json={ + "project_id": "p1", + "title": "Multi-label auto task", + "labels": ["feature", "auto", "backend"], + }) + assert r.status_code == 200 + mock_launch.assert_called_once() diff --git a/tests/test_kin_biz_002.py b/tests/test_kin_biz_002.py new file mode 100644 index 0000000..bd1b2e0 --- /dev/null +++ b/tests/test_kin_biz_002.py @@ -0,0 +1,200 @@ +"""Regression tests for KIN-BIZ-002. + +Проблема: approve через /tasks/{id}/approve не продвигал phase state machine. +Фикс: в approve_task() добавлен блок, вызывающий approve_phase() из core.phases + если задача принадлежит активной фазе. + В approve_phase() endpoint добавлена синхронизация task.status='done'. + +Тесты покрывают: + 1. POST /tasks/{id}/approve для phase-задачи → phase.status=done, следующая фаза active + 2. Изменения в БД персистентны после approve + 3. POST /tasks/{id}/approve для обычной задачи → не ломает ничего, phase=None + 4. POST /phases/{id}/approve → task.status синхронизируется в done +""" + +import pytest +from fastapi.testclient import TestClient + +import web.api as api_module + + +@pytest.fixture +def client(tmp_path): + """Изолированная временная БД для каждого теста.""" + db_path = tmp_path / "test_biz002.db" + api_module.DB_PATH = db_path + from web.api import app + return TestClient(app) + + +def _create_project_with_phases(client, project_id: str = "proj_biz002") -> dict: + """Вспомогательная: создаёт проект с двумя researcher-фазами + architect.""" + r = client.post("/api/projects/new", json={ + "id": project_id, + "name": "BIZ-002 Test Project", + "path": f"/tmp/{project_id}", + "description": "Тест регрессии KIN-BIZ-002", + "roles": ["business_analyst", "tech_researcher"], + }) + assert r.status_code == 200, r.json() + return r.json() + + +def _get_active_phase(client, project_id: str) -> dict: + """Вспомогательная: возвращает первую активную фазу.""" + phases = client.get(f"/api/projects/{project_id}/phases").json() + active = next(ph for ph in phases if ph["status"] == "active") + return active + + +# --------------------------------------------------------------------------- +# KIN-BIZ-002 — регрессионные тесты +# --------------------------------------------------------------------------- + + +def test_KIN_BIZ_002_approve_task_advances_phase_state_machine(client): + """KIN-BIZ-002: POST /tasks/{id}/approve для phase-задачи продвигает state machine. + + Ожидаем: phase.status=approved, next_phase активирован. + """ + _create_project_with_phases(client) + active_phase = _get_active_phase(client, "proj_biz002") + task_id = active_phase["task_id"] + + r = client.post(f"/api/tasks/{task_id}/approve", json={}) + + assert r.status_code == 200 + data = r.json() + assert data["status"] == "done" + # Ключ phase должен присутствовать и содержать результат + assert "phase" in data + assert data["phase"] is not None + # Одобренная фаза имеет status=approved + assert data["phase"]["phase"]["status"] == "approved" + # Следующая фаза была активирована + assert data["phase"]["next_phase"] is not None + assert data["phase"]["next_phase"]["status"] == "active" + + +def test_KIN_BIZ_002_approve_task_phase_status_persists_in_db(client): + """KIN-BIZ-002: после approve через /tasks/{id}/approve статусы фаз корректны в БД. + + Первая фаза → approved, вторая фаза → active. + """ + data = _create_project_with_phases(client) + # Три фазы: business_analyst, tech_researcher, architect + assert len(data["phases"]) == 3 + + active_phase = _get_active_phase(client, "proj_biz002") + task_id = active_phase["task_id"] + + client.post(f"/api/tasks/{task_id}/approve", json={}) + + # Перечитываем фазы из БД + phases = client.get("/api/projects/proj_biz002/phases").json() + statuses = {ph["role"]: ph["status"] for ph in phases} + + assert statuses["business_analyst"] == "approved" + assert statuses["tech_researcher"] == "active" + assert statuses["architect"] == "pending" + + +def test_KIN_BIZ_002_approve_task_task_status_is_done(client): + """KIN-BIZ-002: сама задача должна иметь status=done после approve.""" + _create_project_with_phases(client) + active_phase = _get_active_phase(client, "proj_biz002") + task_id = active_phase["task_id"] + + client.post(f"/api/tasks/{task_id}/approve", json={}) + + task = client.get(f"/api/tasks/{task_id}").json() + assert task["status"] == "done" + + +def test_KIN_BIZ_002_approve_regular_task_does_not_affect_phases(client): + """KIN-BIZ-002: approve обычной задачи (без фазы) не ломает ничего, phase=None.""" + # Создаём обычный проект без фаз + client.post("/api/projects", json={ + "id": "plain_proj", + "name": "Plain Project", + "path": "/tmp/plain_proj", + }) + r_task = client.post("/api/tasks", json={ + "project_id": "plain_proj", + "title": "Обычная задача без фазы", + }) + assert r_task.status_code == 200 + task_id = r_task.json()["id"] + + r = client.post(f"/api/tasks/{task_id}/approve", json={}) + + assert r.status_code == 200 + data = r.json() + assert data["status"] == "done" + # phase должен быть None — нет связанной фазы + assert data["phase"] is None + + +def test_KIN_BIZ_002_approve_regular_task_sets_status_done(client): + """KIN-BIZ-002: approve обычной задачи корректно устанавливает status=done.""" + client.post("/api/projects", json={ + "id": "plain2", + "name": "Plain2", + "path": "/tmp/plain2", + }) + r_task = client.post("/api/tasks", json={ + "project_id": "plain2", + "title": "Задача без фазы", + }) + task_id = r_task.json()["id"] + + client.post(f"/api/tasks/{task_id}/approve", json={}) + + task = client.get(f"/api/tasks/{task_id}").json() + assert task["status"] == "done" + + +def test_KIN_BIZ_002_approve_phase_endpoint_syncs_task_status_to_done(client): + """KIN-BIZ-002: POST /phases/{id}/approve синхронизирует task.status=done. + + Гарантируем консистентность обоих путей одобрения фазы. + """ + _create_project_with_phases(client) + active_phase = _get_active_phase(client, "proj_biz002") + phase_id = active_phase["id"] + task_id = active_phase["task_id"] + + r = client.post(f"/api/phases/{phase_id}/approve", json={}) + assert r.status_code == 200 + + # Задача, связанная с фазой, должна иметь status=done + task = client.get(f"/api/tasks/{task_id}").json() + assert task["status"] == "done" + + +def test_KIN_BIZ_002_full_phase_chain_two_approves_completes_workflow(client): + """KIN-BIZ-002: последовательный approve через /tasks/{id}/approve проходит весь chain. + + business_analyst → approved → tech_researcher → approved → architect → approved. + """ + _create_project_with_phases(client) + phases_init = client.get("/api/projects/proj_biz002/phases").json() + assert len(phases_init) == 3 + + # Апруваем каждую фазу последовательно через task-endpoint + for _ in range(3): + phases = client.get("/api/projects/proj_biz002/phases").json() + active = next((ph for ph in phases if ph["status"] == "active"), None) + if active is None: + break + task_id = active["task_id"] + r = client.post(f"/api/tasks/{task_id}/approve", json={}) + assert r.status_code == 200 + assert r.json()["status"] == "done" + + # После всех approve все фазы должны быть approved + final_phases = client.get("/api/projects/proj_biz002/phases").json() + for ph in final_phases: + assert ph["status"] == "approved", ( + f"Ожидали approved для {ph['role']}, получили {ph['status']}" + ) diff --git a/tests/test_kin_biz_007_fernet.py b/tests/test_kin_biz_007_fernet.py new file mode 100644 index 0000000..2609bde --- /dev/null +++ b/tests/test_kin_biz_007_fernet.py @@ -0,0 +1,388 @@ +"""Tests for KIN-BIZ-007: Fernet encryption of credentials in project_environments. + +Acceptance criteria: +1. Roundtrip: _encrypt_auth → _decrypt_auth returns the original string. +2. Migration: b64:-prefixed record is auto-re-encrypted on read; decrypt returns plaintext. +3. Missing KIN_SECRET_KEY → scan endpoint returns 503 (not 500). +4. Runner path: get_environment() returns decrypted plaintext auth_value. +5. Old _obfuscate_auth / _deobfuscate_auth are not present anywhere. + +Decision #214: patch на consuming-модуле, не на defining. +Decision #215: использовать mock.assert_called_once(). +""" + +import base64 +import os +import pytest +from unittest.mock import patch, MagicMock + +from core.db import init_db +from core import models + + +@pytest.fixture +def conn(): + """Fresh in-memory DB for each test.""" + c = init_db(db_path=":memory:") + yield c + c.close() + + +@pytest.fixture +def conn_with_project(conn): + """In-memory DB with a test project.""" + models.create_project(conn, "testproj", "Test Project", "/test") + return conn + + +@pytest.fixture +def scan_client(tmp_path): + """TestClient with project + environment pre-created. Returns (client, env_id).""" + import web.api as api_module + api_module.DB_PATH = tmp_path / "scan_biz007.db" + from web.api import app + from fastapi.testclient import TestClient + c = TestClient(app) + c.post("/api/projects", json={"id": "scanproj", "name": "Scan Project", "path": "/scan"}) + r = c.post("/api/projects/scanproj/environments", json={ + "name": "prod", "host": "10.0.0.1", "username": "root", + }) + env_id = r.json()["id"] + return c, env_id + + +@pytest.fixture +def env_client(tmp_path): + """TestClient with just a project pre-created. Returns client.""" + import web.api as api_module + api_module.DB_PATH = tmp_path / "env_biz007.db" + from web.api import app + from fastapi.testclient import TestClient + c = TestClient(app) + c.post("/api/projects", json={"id": "envproj", "name": "Env Project", "path": "/env"}) + return c + + +# --------------------------------------------------------------------------- +# AC1: Roundtrip — _encrypt_auth → _decrypt_auth returns original string +# --------------------------------------------------------------------------- + +def test_encrypt_decrypt_roundtrip_returns_original(conn): + """AC1: encrypt → decrypt returns the exact original plaintext.""" + original = "my_super_secret_password" + encrypted = models._encrypt_auth(original) + decrypted = models._decrypt_auth(encrypted) + assert decrypted == original + + +def test_encrypt_produces_different_value_than_plaintext(conn): + """AC1: encrypted value is not the original (Fernet token, not plaintext).""" + original = "plain_secret" + encrypted = models._encrypt_auth(original) + assert encrypted != original + assert not encrypted.startswith("b64:") + + +def test_encrypt_two_calls_produce_different_tokens(conn): + """AC1: Fernet uses random IV — two encryptions of same value differ but both decrypt correctly.""" + value = "same_password" + enc1 = models._encrypt_auth(value) + enc2 = models._encrypt_auth(value) + # Encrypted forms must differ due to Fernet IV randomness + assert enc1 != enc2 + # Both must decrypt to original + assert models._decrypt_auth(enc1) == value + assert models._decrypt_auth(enc2) == value + + +def test_encrypt_raises_runtime_error_when_no_key(monkeypatch): + """AC1: _encrypt_auth raises RuntimeError when KIN_SECRET_KEY is absent.""" + monkeypatch.delenv("KIN_SECRET_KEY", raising=False) + with pytest.raises(RuntimeError, match="KIN_SECRET_KEY"): + models._encrypt_auth("any_value") + + +def test_decrypt_fernet_token_without_key_returns_raw_not_plaintext(monkeypatch): + """AC1: _decrypt_auth without key cannot recover plaintext — returns stored token, not original.""" + original = "secret" + encrypted = models._encrypt_auth(original) + monkeypatch.delenv("KIN_SECRET_KEY", raising=False) + result = models._decrypt_auth(encrypted) + # Without the key we cannot get the plaintext back + assert result != original + + +# --------------------------------------------------------------------------- +# AC2: Migration — b64: record auto-re-encrypted on read +# --------------------------------------------------------------------------- + +def test_decrypt_auth_handles_b64_prefix_without_db(conn): + """AC2: _decrypt_auth decodes b64:-prefixed value (no DB needed for the decode itself).""" + plaintext = "legacy_password" + b64_stored = "b64:" + base64.b64encode(plaintext.encode()).decode() + decrypted = models._decrypt_auth(b64_stored) + assert decrypted == plaintext + + +def test_decrypt_auth_b64_rewrites_db_when_conn_provided(conn_with_project): + """AC2: _decrypt_auth with conn+env_id re-encrypts b64: value in DB on read.""" + conn = conn_with_project + plaintext = "legacy_pass_123" + b64_value = "b64:" + base64.b64encode(plaintext.encode()).decode() + + cur = conn.execute( + """INSERT INTO project_environments + (project_id, name, host, port, username, auth_type, auth_value, is_installed) + VALUES ('testproj', 'legacy', 'host.example.com', 22, 'root', 'password', ?, 0)""", + (b64_value,), + ) + conn.commit() + env_id = cur.lastrowid + + # Call decrypt with conn+env_id — must trigger re-encryption + decrypted = models._decrypt_auth(b64_value, conn=conn, env_id=env_id) + assert decrypted == plaintext + + # DB must now have Fernet token, not b64: + stored_after = conn.execute( + "SELECT auth_value FROM project_environments WHERE id = ?", (env_id,) + ).fetchone()["auth_value"] + assert not stored_after.startswith("b64:"), ( + "After migration, b64: prefix must be replaced by a Fernet token" + ) + # And the new token must decrypt correctly + assert models._decrypt_auth(stored_after) == plaintext + + +def test_get_environment_migrates_b64_and_returns_plaintext(conn_with_project): + """AC2: get_environment() transparently migrates b64: values and returns plaintext auth_value.""" + conn = conn_with_project + plaintext = "old_secret" + b64_value = "b64:" + base64.b64encode(plaintext.encode()).decode() + + cur = conn.execute( + """INSERT INTO project_environments + (project_id, name, host, port, username, auth_type, auth_value, is_installed) + VALUES ('testproj', 'legacy2', 'host2.example.com', 22, 'root', 'password', ?, 0)""", + (b64_value,), + ) + conn.commit() + env_id = cur.lastrowid + + env = models.get_environment(conn, env_id) + + assert env["auth_value"] == plaintext, ( + f"get_environment must return plaintext after b64 migration, got: {env['auth_value']!r}" + ) + + # DB must be updated: b64: replaced by Fernet token + stored_after = conn.execute( + "SELECT auth_value FROM project_environments WHERE id = ?", (env_id,) + ).fetchone()["auth_value"] + assert not stored_after.startswith("b64:"), ( + "DB must contain Fernet token after get_environment migrates b64: record" + ) + + +def test_get_environment_second_read_after_migration_still_decrypts(conn_with_project): + """AC2: After b64 migration, subsequent get_environment calls still return plaintext.""" + conn = conn_with_project + plaintext = "migrated_secret" + b64_value = "b64:" + base64.b64encode(plaintext.encode()).decode() + + cur = conn.execute( + """INSERT INTO project_environments + (project_id, name, host, port, username, auth_type, auth_value, is_installed) + VALUES ('testproj', 'legacy3', 'host3.example.com', 22, 'root', 'password', ?, 0)""", + (b64_value,), + ) + conn.commit() + env_id = cur.lastrowid + + # First read: triggers migration + env1 = models.get_environment(conn, env_id) + assert env1["auth_value"] == plaintext + + # Second read: now reads Fernet token (post-migration) + env2 = models.get_environment(conn, env_id) + assert env2["auth_value"] == plaintext + + +# --------------------------------------------------------------------------- +# AC3: Missing KIN_SECRET_KEY → scan endpoint returns 503 (not 500) +# --------------------------------------------------------------------------- + +def test_scan_endpoint_returns_503_when_kin_secret_key_missing(scan_client, monkeypatch): + """AC3: POST /environments/{id}/scan returns 503 when KIN_SECRET_KEY is not set.""" + client, env_id = scan_client + monkeypatch.delenv("KIN_SECRET_KEY", raising=False) + r = client.post(f"/api/projects/scanproj/environments/{env_id}/scan") + assert r.status_code == 503, ( + f"scan must return 503 when KIN_SECRET_KEY is missing, got {r.status_code}: {r.text}" + ) + + +def test_scan_endpoint_returns_503_not_500(scan_client, monkeypatch): + """AC3: HTTP 503 (misconfiguration) must be returned, not 500 (code bug).""" + client, env_id = scan_client + monkeypatch.delenv("KIN_SECRET_KEY", raising=False) + r = client.post(f"/api/projects/scanproj/environments/{env_id}/scan") + assert r.status_code != 500, "Missing KIN_SECRET_KEY must produce 503, not 500" + assert r.status_code == 503 + + +def test_scan_endpoint_returns_202_when_key_present(scan_client): + """AC3: scan endpoint returns 202 when KIN_SECRET_KEY is correctly set.""" + client, env_id = scan_client + with patch("subprocess.Popen") as mock_popen: + mock_popen.return_value = MagicMock(pid=12345) + r = client.post(f"/api/projects/scanproj/environments/{env_id}/scan") + assert r.status_code == 202 + + +# --------------------------------------------------------------------------- +# AC4: Runner path — get_environment() returns decrypted plaintext auth_value +# --------------------------------------------------------------------------- + +def test_get_environment_returns_decrypted_auth_value(conn_with_project): + """AC4: get_environment() returns plaintext, not the Fernet token stored in DB.""" + conn = conn_with_project + plaintext = "runner_secret_42" + env = models.create_environment( + conn, "testproj", "runner-env", "10.0.0.10", "root", + auth_value=plaintext, + ) + env_id = env["id"] + + fetched = models.get_environment(conn, env_id) + assert fetched["auth_value"] == plaintext, ( + f"get_environment must return plaintext auth_value, got: {fetched['auth_value']!r}" + ) + + +def test_get_environment_auth_value_is_not_fernet_token(conn_with_project): + """AC4: auth_value from get_environment is decrypted (not a Fernet base64 token).""" + conn = conn_with_project + plaintext = "real_password_xyz" + env = models.create_environment( + conn, "testproj", "fernet-check", "10.0.0.11", "user", + auth_value=plaintext, + ) + + # Verify DB stores encrypted (not plaintext) + raw_stored = conn.execute( + "SELECT auth_value FROM project_environments WHERE id = ?", (env["id"],) + ).fetchone()["auth_value"] + assert raw_stored != plaintext, "DB must store encrypted value, not plaintext" + + # get_environment must return decrypted plaintext + fetched = models.get_environment(conn, env["id"]) + assert fetched["auth_value"] == plaintext + + +def test_get_environment_returns_none_auth_value_when_not_set(conn_with_project): + """AC4: get_environment() returns auth_value=None when no credential was stored.""" + conn = conn_with_project + env = models.create_environment( + conn, "testproj", "no-cred", "10.0.0.12", "user", + auth_value=None, + ) + fetched = models.get_environment(conn, env["id"]) + assert fetched["auth_value"] is None + + +def test_create_environment_hides_auth_value_in_return(conn_with_project): + """AC4: create_environment() returns auth_value=None — plaintext only via get_environment.""" + conn = conn_with_project + env = models.create_environment( + conn, "testproj", "hidden-cred", "10.0.0.13", "user", + auth_value="secret", + ) + assert env["auth_value"] is None, ( + "create_environment must return auth_value=None for API safety" + ) + + +# --------------------------------------------------------------------------- +# AC5: Old _obfuscate_auth / _deobfuscate_auth are not present anywhere +# --------------------------------------------------------------------------- + +def test_obfuscate_auth_not_in_core_models(): + """AC5: _obfuscate_auth must not exist in core.models (fully removed).""" + import core.models as m + assert not hasattr(m, "_obfuscate_auth"), ( + "_obfuscate_auth must be removed from core.models — use _encrypt_auth instead" + ) + + +def test_deobfuscate_auth_not_in_core_models(): + """AC5: _deobfuscate_auth must not exist in core.models (fully removed).""" + import core.models as m + assert not hasattr(m, "_deobfuscate_auth"), ( + "_deobfuscate_auth must be removed from core.models — use _decrypt_auth instead" + ) + + +def test_obfuscate_auth_not_imported_in_web_api(): + """AC5: _obfuscate_auth must not be imported or defined in web.api.""" + import web.api as api_mod + assert not hasattr(api_mod, "_obfuscate_auth"), ( + "_obfuscate_auth must not appear in web.api" + ) + + +def test_deobfuscate_auth_not_imported_in_web_api(): + """AC5: _deobfuscate_auth must not be imported or defined in web.api.""" + import web.api as api_mod + assert not hasattr(api_mod, "_deobfuscate_auth"), ( + "_deobfuscate_auth must not appear in web.api" + ) + + +# --------------------------------------------------------------------------- +# AC6 (KIN-095): ModuleNotFoundError for cryptography → 503, not 500 +# --------------------------------------------------------------------------- + +def test_create_environment_returns_503_when_cryptography_not_installed(env_client): + """AC6: POST /environments returns 503 when cryptography package missing (not 500).""" + client = env_client + with patch("core.models._encrypt_auth", side_effect=ModuleNotFoundError("No module named 'cryptography'")): + r = client.post("/api/projects/envproj/environments", json={ + "name": "creds-env", "host": "10.0.0.20", "username": "root", + "auth_type": "password", "auth_value": "secret", + }) + assert r.status_code == 503, ( + f"create_environment must return 503 when cryptography is missing, got {r.status_code}: {r.text}" + ) + assert "cryptography" in r.json()["detail"].lower() + + +def test_create_environment_returns_503_not_500_for_missing_cryptography(env_client): + """AC6: 500 must NOT be returned when cryptography package is absent.""" + client = env_client + with patch("core.models._encrypt_auth", side_effect=ModuleNotFoundError("No module named 'cryptography'")): + r = client.post("/api/projects/envproj/environments", json={ + "name": "creds-env2", "host": "10.0.0.21", "username": "root", + "auth_value": "secret2", + }) + assert r.status_code != 500, "Missing cryptography must produce 503, not 500" + + +def test_patch_environment_returns_503_when_cryptography_not_installed(env_client): + """AC6: PATCH /environments/{id} returns 503 when cryptography package missing.""" + client = env_client + # Create env without auth_value so no encryption at create time + r = client.post("/api/projects/envproj/environments", json={ + "name": "patch-env", "host": "10.0.0.22", "username": "root", + }) + assert r.status_code == 201, f"Setup failed: {r.text}" + env_id = r.json()["id"] + + with patch("core.models._encrypt_auth", side_effect=ModuleNotFoundError("No module named 'cryptography'")): + r = client.patch(f"/api/projects/envproj/environments/{env_id}", json={ + "auth_value": "new_secret", + }) + assert r.status_code == 503, ( + f"patch_environment must return 503 when cryptography is missing, got {r.status_code}: {r.text}" + ) + assert "cryptography" in r.json()["detail"].lower() diff --git a/tests/test_kin_fix_006_regression.py b/tests/test_kin_fix_006_regression.py new file mode 100644 index 0000000..c2aca3c --- /dev/null +++ b/tests/test_kin_fix_006_regression.py @@ -0,0 +1,156 @@ +"""Regression tests for KIN-FIX-006: 'ssh_key' must be a valid auth_type. + +Root cause: VALID_AUTH_TYPES did not include 'ssh_key', causing 422 on POST credentials. +Fix: VALID_AUTH_TYPES = {"password", "key", "ssh_key"} (web/api.py line 1028). + +Acceptance criteria: + 1. POST /projects/{id}/environments with auth_type='ssh_key' returns 201 (not 422) + 2. auth_type='key' still returns 201 + 3. auth_type='password' still returns 201 + 4. auth_type='ftp' (invalid) returns 422 +""" +import pytest +from unittest.mock import patch, MagicMock + + +# --------------------------------------------------------------------------- +# Fixture +# --------------------------------------------------------------------------- + +@pytest.fixture +def client(tmp_path): + import web.api as api_module + api_module.DB_PATH = tmp_path / "test_fix006.db" + # Re-import app after setting DB_PATH so init_db uses the new path + from importlib import reload + import web.api + reload(web.api) + api_module.DB_PATH = tmp_path / "test_fix006.db" + from web.api import app + from fastapi.testclient import TestClient + c = TestClient(app) + c.post("/api/projects", json={"id": "testproj", "name": "Test Project", "path": "/testproj"}) + return c + + +# --------------------------------------------------------------------------- +# Tests: VALID_AUTH_TYPES validation +# --------------------------------------------------------------------------- + +def test_create_environment_ssh_key_auth_type_returns_201(client): + """Regression KIN-FIX-006: auth_type='ssh_key' must return 201, not 422.""" + r = client.post("/api/projects/testproj/environments", json={ + "name": "prod-ssh", + "host": "10.0.0.1", + "username": "deploy", + "auth_type": "ssh_key", + "auth_value": "-----BEGIN RSA PRIVATE KEY-----", + }) + assert r.status_code == 201, ( + f"auth_type='ssh_key' must be accepted (201), got {r.status_code}: {r.text}" + ) + + +def test_create_environment_key_auth_type_still_valid(client): + """auth_type='key' must still return 201 after the fix.""" + r = client.post("/api/projects/testproj/environments", json={ + "name": "prod-key", + "host": "10.0.0.2", + "username": "deploy", + "auth_type": "key", + "auth_value": "keydata", + }) + assert r.status_code == 201, ( + f"auth_type='key' must still be valid (201), got {r.status_code}: {r.text}" + ) + + +def test_create_environment_password_auth_type_still_valid(client): + """auth_type='password' must still return 201 after the fix.""" + r = client.post("/api/projects/testproj/environments", json={ + "name": "prod-pass", + "host": "10.0.0.3", + "username": "root", + "auth_type": "password", + "auth_value": "s3cr3t", + }) + assert r.status_code == 201, ( + f"auth_type='password' must still be valid (201), got {r.status_code}: {r.text}" + ) + + +def test_create_environment_invalid_auth_type_returns_422(client): + """Invalid auth_type (e.g. 'ftp') must return 422 Unprocessable Entity.""" + r = client.post("/api/projects/testproj/environments", json={ + "name": "prod-ftp", + "host": "10.0.0.4", + "username": "ftpuser", + "auth_type": "ftp", + "auth_value": "password123", + }) + assert r.status_code == 422, ( + f"auth_type='ftp' must be rejected (422), got {r.status_code}: {r.text}" + ) + + +def test_create_environment_empty_auth_type_returns_422(client): + """Empty string auth_type must return 422.""" + r = client.post("/api/projects/testproj/environments", json={ + "name": "prod-empty", + "host": "10.0.0.5", + "username": "root", + "auth_type": "", + }) + assert r.status_code == 422, ( + f"auth_type='' must be rejected (422), got {r.status_code}: {r.text}" + ) + + +def test_create_environment_default_auth_type_is_password(client): + """Default auth_type (omitted) must be 'password' and return 201.""" + r = client.post("/api/projects/testproj/environments", json={ + "name": "prod-default", + "host": "10.0.0.6", + "username": "root", + "auth_value": "pass", + # auth_type intentionally omitted — defaults to 'password' + }) + assert r.status_code == 201, ( + f"Default auth_type must be accepted (201), got {r.status_code}: {r.text}" + ) + + +# --------------------------------------------------------------------------- +# Test: VALID_AUTH_TYPES content (unit-level) +# --------------------------------------------------------------------------- + +def test_valid_auth_types_contains_ssh_key(): + """Unit: VALID_AUTH_TYPES set must include 'ssh_key'.""" + from web.api import VALID_AUTH_TYPES + assert "ssh_key" in VALID_AUTH_TYPES, ( + f"VALID_AUTH_TYPES must contain 'ssh_key', got: {VALID_AUTH_TYPES}" + ) + + +def test_valid_auth_types_contains_key(): + """Unit: VALID_AUTH_TYPES set must include 'key'.""" + from web.api import VALID_AUTH_TYPES + assert "key" in VALID_AUTH_TYPES, ( + f"VALID_AUTH_TYPES must contain 'key', got: {VALID_AUTH_TYPES}" + ) + + +def test_valid_auth_types_contains_password(): + """Unit: VALID_AUTH_TYPES set must include 'password'.""" + from web.api import VALID_AUTH_TYPES + assert "password" in VALID_AUTH_TYPES, ( + f"VALID_AUTH_TYPES must contain 'password', got: {VALID_AUTH_TYPES}" + ) + + +def test_valid_auth_types_excludes_ftp(): + """Unit: VALID_AUTH_TYPES must NOT include 'ftp'.""" + from web.api import VALID_AUTH_TYPES + assert "ftp" not in VALID_AUTH_TYPES, ( + f"VALID_AUTH_TYPES must not contain 'ftp', got: {VALID_AUTH_TYPES}" + ) diff --git a/tests/test_models.py b/tests/test_models.py index 9982e39..3a06e6d 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -1,8 +1,10 @@ """Tests for core/models.py — all functions, in-memory SQLite.""" +import re import pytest from core.db import init_db from core import models +from core.models import TASK_CATEGORIES @pytest.fixture @@ -53,6 +55,123 @@ def test_update_project_tech_stack_json(conn): assert updated["tech_stack"] == ["python", "fastapi"] +# -- project_type and SSH fields (KIN-071) -- + +def test_create_operations_project(conn): + """KIN-071: operations project stores SSH fields. KIN-ARCH-005: path не передаётся.""" + p = models.create_project( + conn, "srv1", "My Server", + project_type="operations", + ssh_host="10.0.0.1", + ssh_user="root", + ssh_key_path="~/.ssh/id_rsa", + ssh_proxy_jump="jumpt", + ) + assert p["project_type"] == "operations" + assert p["ssh_host"] == "10.0.0.1" + assert p["ssh_user"] == "root" + assert p["ssh_key_path"] == "~/.ssh/id_rsa" + assert p["ssh_proxy_jump"] == "jumpt" + assert p["path"] is None + + +def test_create_development_project_defaults(conn): + """KIN-071: development is default project_type.""" + p = models.create_project(conn, "devp", "Dev Project", "/path") + assert p["project_type"] == "development" + assert p["ssh_host"] is None + + +def test_update_project_ssh_fields(conn): + """KIN-071: update_project can set SSH fields.""" + models.create_project(conn, "srv2", "Server 2", project_type="operations") + updated = models.update_project(conn, "srv2", ssh_host="192.168.1.1", ssh_user="pelmen") + assert updated["ssh_host"] == "192.168.1.1" + assert updated["ssh_user"] == "pelmen" + assert updated["path"] is None + + +# --------------------------------------------------------------------------- +# KIN-ARCH-003 — path nullable для operations-проектов +# Исправляет баг: workaround с пустой строкой ("") для operations-проектов +# --------------------------------------------------------------------------- + +def test_kin_arch_003_operations_project_without_path_stores_null(conn): + """KIN-ARCH-003: operations-проект без path сохраняется с path=NULL, не пустой строкой. + + До фикса: workaround — передавать path='' чтобы обойти NOT NULL constraint. + После фикса: path=None (NULL в БД) допустим для operations-проектов. + """ + p = models.create_project( + conn, "ops_null", "Ops Null Path", + project_type="operations", + ssh_host="10.0.0.1", + ) + assert p["path"] is None, ( + "KIN-ARCH-003 регрессия: path должен быть NULL, а не пустой строкой" + ) + + +def test_kin_arch_003_check_constraint_rejects_null_path_for_development(conn): + """KIN-ARCH-003: CHECK constraint (path IS NOT NULL OR project_type='operations') + отклоняет path=NULL для development-проектов.""" + import sqlite3 as _sqlite3 + with pytest.raises(_sqlite3.IntegrityError): + models.create_project( + conn, "dev_no_path", "Dev No Path", + path=None, project_type="development", + ) + + +# -- validate_completion_mode (KIN-063) -- + +def test_validate_completion_mode_valid_auto_complete(): + """validate_completion_mode принимает 'auto_complete'.""" + assert models.validate_completion_mode("auto_complete") == "auto_complete" + + +def test_validate_completion_mode_valid_review(): + """validate_completion_mode принимает 'review'.""" + assert models.validate_completion_mode("review") == "review" + + +def test_validate_completion_mode_invalid_fallback(): + """validate_completion_mode возвращает 'review' для невалидных значений (фоллбэк).""" + assert models.validate_completion_mode("auto") == "review" + assert models.validate_completion_mode("") == "review" + assert models.validate_completion_mode("unknown") == "review" + + +# -- get_effective_mode (KIN-063) -- + +def test_get_effective_mode_task_overrides_project(conn): + """Task execution_mode имеет приоритет над project execution_mode.""" + models.create_project(conn, "p1", "P1", "/p1", execution_mode="review") + models.create_task(conn, "P1-001", "p1", "Task", execution_mode="auto_complete") + mode = models.get_effective_mode(conn, "p1", "P1-001") + assert mode == "auto_complete" + + +def test_get_effective_mode_falls_back_to_project(conn): + """Если задача без execution_mode — применяется project execution_mode.""" + models.create_project(conn, "p1", "P1", "/p1", execution_mode="auto_complete") + models.create_task(conn, "P1-001", "p1", "Task") # execution_mode=None + mode = models.get_effective_mode(conn, "p1", "P1-001") + assert mode == "auto_complete" + + +def test_get_effective_mode_project_review_overrides_default(conn): + """Project execution_mode='review' + task без override → возвращает 'review'. + + Сценарий: PM хотел auto_complete, но проект настроен на review человеком. + get_effective_mode должен вернуть project-level 'review'. + """ + models.create_project(conn, "p1", "P1", "/p1", execution_mode="review") + models.create_task(conn, "P1-001", "p1", "Task") # нет task-level override + mode = models.get_effective_mode(conn, "p1", "P1-001") + assert mode == "review" + + # -- Tasks -- def test_create_and_get_task(conn): @@ -161,6 +280,87 @@ def test_add_and_get_modules(conn): assert len(mods) == 1 +def test_add_module_created_true_for_new_module(conn): + """KIN-081: add_module возвращает _created=True для нового модуля (INSERT).""" + models.create_project(conn, "p1", "P1", "/p1") + m = models.add_module(conn, "p1", "api", "backend", "src/api/") + assert m["_created"] is True + assert m["name"] == "api" + + +def test_add_module_created_false_for_duplicate_name(conn): + """KIN-081: add_module возвращает _created=False при дублировании по имени (INSERT OR IGNORE). + + UNIQUE constraint — (project_id, name). Второй INSERT с тем же name игнорируется, + возвращается существующая запись с _created=False. + """ + models.create_project(conn, "p1", "P1", "/p1") + m1 = models.add_module(conn, "p1", "api", "backend", "src/api/") + assert m1["_created"] is True + + # Same name, different path — should be ignored + m2 = models.add_module(conn, "p1", "api", "frontend", "src/api-v2/") + assert m2["_created"] is False + assert m2["name"] == "api" + # Only one module in DB + assert len(models.get_modules(conn, "p1")) == 1 + + +def test_add_module_duplicate_returns_original_row(conn): + """KIN-081: при дублировании add_module возвращает оригинальную запись (не новые данные).""" + models.create_project(conn, "p1", "P1", "/p1") + m1 = models.add_module(conn, "p1", "api", "backend", "src/api/", + description="original desc") + m2 = models.add_module(conn, "p1", "api", "frontend", "src/api-v2/", + description="new desc") + # Should return original row, not updated one + assert m2["type"] == "backend" + assert m2["description"] == "original desc" + assert m2["id"] == m1["id"] + + +def test_add_module_same_name_different_projects_are_independent(conn): + """KIN-081: два проекта могут иметь одноимённые модули — UNIQUE per project_id.""" + models.create_project(conn, "p1", "P1", "/p1") + models.create_project(conn, "p2", "P2", "/p2") + m1 = models.add_module(conn, "p1", "api", "backend", "src/api/") + m2 = models.add_module(conn, "p2", "api", "backend", "src/api/") + assert m1["_created"] is True + assert m2["_created"] is True + assert m1["id"] != m2["id"] + + +# -- delete_project -- + +def test_delete_project_removes_project_record(conn): + """KIN-081: delete_project удаляет запись из таблицы projects.""" + models.create_project(conn, "p1", "P1", "/p1") + assert models.get_project(conn, "p1") is not None + models.delete_project(conn, "p1") + assert models.get_project(conn, "p1") is None + + +def test_delete_project_cascades_to_related_tables(conn): + """KIN-081: delete_project удаляет связанные modules, decisions, tasks, agent_logs.""" + models.create_project(conn, "p1", "P1", "/p1") + models.add_module(conn, "p1", "api", "backend", "src/api/") + models.add_decision(conn, "p1", "gotcha", "Bug X", "desc") + models.create_task(conn, "P1-001", "p1", "Task") + models.log_agent_run(conn, "p1", "developer", "implement", task_id="P1-001") + + models.delete_project(conn, "p1") + + assert conn.execute("SELECT COUNT(*) FROM modules WHERE project_id='p1'").fetchone()[0] == 0 + assert conn.execute("SELECT COUNT(*) FROM decisions WHERE project_id='p1'").fetchone()[0] == 0 + assert conn.execute("SELECT COUNT(*) FROM tasks WHERE project_id='p1'").fetchone()[0] == 0 + assert conn.execute("SELECT COUNT(*) FROM agent_logs WHERE project_id='p1'").fetchone()[0] == 0 + + +def test_delete_project_nonexistent_does_not_raise(conn): + """KIN-081: delete_project на несуществующий проект не бросает исключение.""" + models.delete_project(conn, "nonexistent") + + # -- Agent Logs -- def test_log_agent_run(conn): @@ -238,3 +438,299 @@ def test_cost_summary(conn): def test_cost_summary_empty(conn): models.create_project(conn, "p1", "P1", "/p1") assert models.get_cost_summary(conn, days=7) == [] + + +# -- add_decision_if_new -- + +def test_add_decision_if_new_adds_new_decision(conn): + models.create_project(conn, "p1", "P1", "/p1") + d = models.add_decision_if_new(conn, "p1", "gotcha", "Use WAL mode", "description") + assert d is not None + assert d["title"] == "Use WAL mode" + assert d["type"] == "gotcha" + + +def test_add_decision_if_new_skips_exact_duplicate(conn): + models.create_project(conn, "p1", "P1", "/p1") + models.add_decision(conn, "p1", "gotcha", "Use WAL mode", "desc1") + result = models.add_decision_if_new(conn, "p1", "gotcha", "Use WAL mode", "desc2") + assert result is None + # Existing decision not duplicated + assert len(models.get_decisions(conn, "p1")) == 1 + + +def test_add_decision_if_new_skips_case_insensitive_duplicate(conn): + models.create_project(conn, "p1", "P1", "/p1") + models.add_decision(conn, "p1", "decision", "Use UUID for task IDs", "desc") + result = models.add_decision_if_new(conn, "p1", "decision", "use uuid for task ids", "other desc") + assert result is None + assert len(models.get_decisions(conn, "p1")) == 1 + + +def test_add_decision_if_new_allows_same_title_different_type(conn): + models.create_project(conn, "p1", "P1", "/p1") + models.add_decision(conn, "p1", "gotcha", "SQLite WAL", "desc") + result = models.add_decision_if_new(conn, "p1", "convention", "SQLite WAL", "other desc") + assert result is not None + assert len(models.get_decisions(conn, "p1")) == 2 + + +def test_add_decision_if_new_skips_whitespace_duplicate(conn): + models.create_project(conn, "p1", "P1", "/p1") + models.add_decision(conn, "p1", "convention", "Run tests after each change", "desc") + result = models.add_decision_if_new(conn, "p1", "convention", " Run tests after each change ", "desc2") + assert result is None + assert len(models.get_decisions(conn, "p1")) == 1 + + +# -- next_task_id (KIN-OBS-009) -- + +def test_next_task_id_with_category_first(conn): + """Первая задача с category='SEC' → 'VDOL-SEC-001'.""" + models.create_project(conn, "vdol", "VDOL", "/vdol") + task_id = models.next_task_id(conn, "vdol", category="SEC") + assert task_id == "VDOL-SEC-001" + + +def test_next_task_id_with_category_increments(conn): + """Вторая задача с category='SEC' → 'VDOL-SEC-002'.""" + models.create_project(conn, "vdol", "VDOL", "/vdol") + models.create_task(conn, "VDOL-SEC-001", "vdol", "Task 1", category="SEC") + task_id = models.next_task_id(conn, "vdol", category="SEC") + assert task_id == "VDOL-SEC-002" + + +def test_next_task_id_category_counters_independent(conn): + """Счётчики категорий независимы: SEC-002 не влияет на UI-001.""" + models.create_project(conn, "vdol", "VDOL", "/vdol") + models.create_task(conn, "VDOL-SEC-001", "vdol", "Sec Task 1", category="SEC") + models.create_task(conn, "VDOL-SEC-002", "vdol", "Sec Task 2", category="SEC") + task_id = models.next_task_id(conn, "vdol", category="UI") + assert task_id == "VDOL-UI-001" + + +def test_next_task_id_without_category_backward_compat(conn): + """Задача без category → 'VDOL-001' (backward compat).""" + models.create_project(conn, "vdol", "VDOL", "/vdol") + task_id = models.next_task_id(conn, "vdol") + assert task_id == "VDOL-001" + + +def test_next_task_id_mixed_formats_no_collision(conn): + """Смешанный проект: счётчики старого и нового форматов не пересекаются.""" + models.create_project(conn, "kin", "KIN", "/kin") + models.create_task(conn, "KIN-001", "kin", "Old style task") + models.create_task(conn, "KIN-002", "kin", "Old style task 2") + # Новый формат с категорией не мешает старому + cat_id = models.next_task_id(conn, "kin", category="OBS") + assert cat_id == "KIN-OBS-001" + # Старый формат не мешает новому + old_id = models.next_task_id(conn, "kin") + assert old_id == "KIN-003" + + +# -- Obsidian sync regex (KIN-OBS-009, решение #75) -- + +_OBSIDIAN_TASK_PATTERN = re.compile( + r"^[-*]\s+\[([xX ])\]\s+([A-Z][A-Z0-9]*-(?:[A-Z][A-Z0-9]*-)?\d+)\s+(.+)$" +) + + +def test_obsidian_regex_matches_old_format(): + """Старый формат KIN-001 матчится.""" + m = _OBSIDIAN_TASK_PATTERN.match("- [x] KIN-001 Fix login bug") + assert m is not None + assert m.group(2) == "KIN-001" + + +def test_obsidian_regex_matches_new_format(): + """Новый формат VDOL-SEC-001 матчится.""" + m = _OBSIDIAN_TASK_PATTERN.match("- [ ] VDOL-SEC-001 Security audit") + assert m is not None + assert m.group(2) == "VDOL-SEC-001" + + +def test_obsidian_regex_matches_obs_format(): + """Формат KIN-OBS-009 матчится (проверяем задачу этой фичи).""" + m = _OBSIDIAN_TASK_PATTERN.match("* [X] KIN-OBS-009 Task ID по категориям") + assert m is not None + assert m.group(2) == "KIN-OBS-009" + + +def test_obsidian_regex_no_match_lowercase(): + """Нижний регистр не матчится.""" + assert _OBSIDIAN_TASK_PATTERN.match("- [x] proj-001 lowercase id") is None + + +def test_obsidian_regex_no_match_numeric_prefix(): + """Числовой префикс не матчится.""" + assert _OBSIDIAN_TASK_PATTERN.match("- [x] 123-abc invalid format") is None + + +def test_obsidian_regex_done_state(conn): + """Статус done/pending корректно извлекается.""" + m_done = _OBSIDIAN_TASK_PATTERN.match("- [x] KIN-UI-003 Done task") + m_pending = _OBSIDIAN_TASK_PATTERN.match("- [ ] KIN-UI-004 Pending task") + assert m_done.group(1) == "x" + assert m_pending.group(1) == " " + + +# -- next_task_id для всех 12 категорий (KIN-OBS-009) -- + +@pytest.mark.parametrize("cat", TASK_CATEGORIES) +def test_next_task_id_all_categories_generate_correct_format(conn, cat): + """next_task_id генерирует ID формата PROJ-CAT-001 для каждой из 12 категорий.""" + models.create_project(conn, "vdol", "VDOL", "/vdol") + task_id = models.next_task_id(conn, "vdol", category=cat) + assert task_id == f"VDOL-{cat}-001" + + +# -- update_task category не ломает brief (KIN-OBS-009, решение #74) -- + +def test_update_task_category_preserves_brief(conn): + """update_task(category=...) не перетирает существующее поле brief.""" + models.create_project(conn, "p1", "P1", "/p1") + models.create_task(conn, "P1-001", "p1", "Task", brief={"summary": "important context"}) + updated = models.update_task(conn, "P1-001", category="SEC") + assert updated["category"] == "SEC" + assert updated["brief"] == {"summary": "important context"} + + +def test_update_task_category_preserves_status_and_priority(conn): + """update_task(category=...) не меняет остальные поля задачи.""" + models.create_project(conn, "p1", "P1", "/p1") + models.create_task(conn, "P1-001", "p1", "Task", status="in_progress", priority=3) + updated = models.update_task(conn, "P1-001", category="UI") + assert updated["category"] == "UI" + assert updated["status"] == "in_progress" + assert updated["priority"] == 3 + + +# -- KIN-ARCH-006: autocommit_enabled и obsidian_vault_path в SCHEMA -- + +def test_schema_project_has_autocommit_enabled_column(conn): + """KIN-ARCH-006: таблица projects содержит колонку autocommit_enabled.""" + cols = {r[1] for r in conn.execute("PRAGMA table_info(projects)").fetchall()} + assert "autocommit_enabled" in cols + + +def test_schema_project_has_obsidian_vault_path_column(conn): + """KIN-ARCH-006: таблица projects содержит колонку obsidian_vault_path.""" + cols = {r[1] for r in conn.execute("PRAGMA table_info(projects)").fetchall()} + assert "obsidian_vault_path" in cols + + +def test_autocommit_enabled_default_is_zero(conn): + """KIN-ARCH-006: autocommit_enabled по умолчанию равен 0.""" + models.create_project(conn, "p1", "P1", "/p1") + p = models.get_project(conn, "p1") + assert p["autocommit_enabled"] == 0 + + +def test_obsidian_vault_path_default_is_none(conn): + """KIN-ARCH-006: obsidian_vault_path по умолчанию равен NULL.""" + models.create_project(conn, "p1", "P1", "/p1") + p = models.get_project(conn, "p1") + assert p["obsidian_vault_path"] is None + + +def test_autocommit_enabled_can_be_set_to_one(conn): + """KIN-ARCH-006: autocommit_enabled можно установить в 1 через update_project.""" + models.create_project(conn, "p1", "P1", "/p1") + updated = models.update_project(conn, "p1", autocommit_enabled=1) + assert updated["autocommit_enabled"] == 1 + + +def test_obsidian_vault_path_can_be_set(conn): + """KIN-ARCH-006: obsidian_vault_path можно установить через update_project.""" + models.create_project(conn, "p1", "P1", "/p1") + updated = models.update_project(conn, "p1", obsidian_vault_path="/vault/my-notes") + assert updated["obsidian_vault_path"] == "/vault/my-notes" + + +# --------------------------------------------------------------------------- +# KIN-090: Task Attachments +# --------------------------------------------------------------------------- + +@pytest.fixture +def task_conn(conn): + """conn with seeded project and task for attachment tests.""" + models.create_project(conn, "prj", "Project", "/tmp/prj") + models.create_task(conn, "PRJ-001", "prj", "Fix bug") + return conn + + +def test_create_attachment_returns_dict(task_conn): + """KIN-090: create_attachment возвращает dict со всеми полями.""" + att = models.create_attachment( + task_conn, "PRJ-001", "screenshot.png", + "/tmp/prj/.kin/attachments/PRJ-001/screenshot.png", + "image/png", 1024, + ) + assert att["id"] is not None + assert att["task_id"] == "PRJ-001" + assert att["filename"] == "screenshot.png" + assert att["path"] == "/tmp/prj/.kin/attachments/PRJ-001/screenshot.png" + assert att["mime_type"] == "image/png" + assert att["size"] == 1024 + assert att["created_at"] is not None + + +def test_create_attachment_persists_in_sqlite(task_conn): + """KIN-090: AC4 — данные вложения персистируются в SQLite.""" + att = models.create_attachment( + task_conn, "PRJ-001", "bug.png", + "/tmp/prj/.kin/attachments/PRJ-001/bug.png", + "image/png", 512, + ) + fetched = models.get_attachment(task_conn, att["id"]) + assert fetched is not None + assert fetched["filename"] == "bug.png" + assert fetched["size"] == 512 + + +def test_list_attachments_empty_for_new_task(task_conn): + """KIN-090: list_attachments возвращает [] для задачи без вложений.""" + result = models.list_attachments(task_conn, "PRJ-001") + assert result == [] + + +def test_list_attachments_returns_all_for_task(task_conn): + """KIN-090: list_attachments возвращает все вложения задачи.""" + models.create_attachment(task_conn, "PRJ-001", "a.png", + "/tmp/prj/.kin/attachments/PRJ-001/a.png", "image/png", 100) + models.create_attachment(task_conn, "PRJ-001", "b.jpg", + "/tmp/prj/.kin/attachments/PRJ-001/b.jpg", "image/jpeg", 200) + result = models.list_attachments(task_conn, "PRJ-001") + assert len(result) == 2 + filenames = {a["filename"] for a in result} + assert filenames == {"a.png", "b.jpg"} + + +def test_list_attachments_isolated_by_task(task_conn): + """KIN-090: list_attachments не возвращает вложения других задач.""" + models.create_task(task_conn, "PRJ-002", "prj", "Other task") + models.create_attachment(task_conn, "PRJ-001", "a.png", + "/tmp/.kin/PRJ-001/a.png", "image/png", 100) + models.create_attachment(task_conn, "PRJ-002", "b.png", + "/tmp/.kin/PRJ-002/b.png", "image/png", 100) + assert len(models.list_attachments(task_conn, "PRJ-001")) == 1 + assert len(models.list_attachments(task_conn, "PRJ-002")) == 1 + + +def test_get_attachment_not_found_returns_none(task_conn): + """KIN-090: get_attachment возвращает None если вложение не найдено.""" + assert models.get_attachment(task_conn, 99999) is None + + +def test_delete_attachment_returns_true(task_conn): + """KIN-090: delete_attachment возвращает True при успешном удалении.""" + att = models.create_attachment(task_conn, "PRJ-001", "del.png", + "/tmp/del.png", "image/png", 50) + assert models.delete_attachment(task_conn, att["id"]) is True + assert models.get_attachment(task_conn, att["id"]) is None + + +def test_delete_attachment_not_found_returns_false(task_conn): + """KIN-090: delete_attachment возвращает False если запись не найдена.""" + assert models.delete_attachment(task_conn, 99999) is False diff --git a/tests/test_obsidian_sync.py b/tests/test_obsidian_sync.py new file mode 100644 index 0000000..838c5a2 --- /dev/null +++ b/tests/test_obsidian_sync.py @@ -0,0 +1,307 @@ +"""Tests for core/obsidian_sync.py — KIN-013.""" + +import sqlite3 +import tempfile +from pathlib import Path + +import pytest + +from core.db import init_db +from core.obsidian_sync import ( + export_decisions_to_md, + parse_task_checkboxes, + sync_obsidian, +) +from core import models + + +# --------------------------------------------------------------------------- +# 0. Migration — obsidian_vault_path column must exist after init_db +# --------------------------------------------------------------------------- + +def test_migration_obsidian_vault_path_column_exists(): + """init_db создаёт или мигрирует колонку obsidian_vault_path в таблице projects.""" + conn = init_db(db_path=":memory:") + cols = {r[1] for r in conn.execute("PRAGMA table_info(projects)").fetchall()} + conn.close() + assert "obsidian_vault_path" in cols + + +@pytest.fixture +def tmp_vault(tmp_path): + """Returns a temporary vault root directory.""" + return tmp_path / "vault" + + +@pytest.fixture +def db(tmp_path): + """Returns an in-memory SQLite connection with schema + test data.""" + db_path = tmp_path / "test.db" + conn = init_db(db_path) + models.create_project(conn, "proj1", "Test Project", "/tmp/proj1") + yield conn + conn.close() + + +# --------------------------------------------------------------------------- +# 1. export creates files with correct frontmatter +# --------------------------------------------------------------------------- + +def test_export_decisions_creates_md_files(tmp_vault): + decisions = [ + { + "id": 42, + "project_id": "proj1", + "type": "gotcha", + "category": "testing", + "title": "Proxy через SSH не работает без ssh-agent", + "description": "При подключении через ProxyJump ssh-agent должен быть запущен.", + "tags": ["testing", "mock", "subprocess"], + "created_at": "2026-03-10T12:00:00", + } + ] + tmp_vault.mkdir(parents=True) + created = export_decisions_to_md("proj1", decisions, tmp_vault) + + assert len(created) == 1 + md_file = created[0] + assert md_file.exists() + + content = md_file.read_text(encoding="utf-8") + assert "kin_decision_id: 42" in content + assert "project: proj1" in content + assert "type: gotcha" in content + assert "category: testing" in content + assert "2026-03-10" in content + assert "# Proxy через SSH не работает без ssh-agent" in content + assert "При подключении через ProxyJump" in content + + +# --------------------------------------------------------------------------- +# 2. export is idempotent (overwrite, not duplicate) +# --------------------------------------------------------------------------- + +def test_export_idempotent(tmp_vault): + decisions = [ + { + "id": 1, + "project_id": "p", + "type": "decision", + "category": None, + "title": "Use SQLite", + "description": "SQLite is the source of truth.", + "tags": [], + "created_at": "2026-01-01", + } + ] + tmp_vault.mkdir(parents=True) + + export_decisions_to_md("p", decisions, tmp_vault) + export_decisions_to_md("p", decisions, tmp_vault) + + out_dir = tmp_vault / "p" / "decisions" + files = list(out_dir.glob("*.md")) + assert len(files) == 1 + + +# --------------------------------------------------------------------------- +# 3. parse_task_checkboxes — done checkbox +# --------------------------------------------------------------------------- + +def test_parse_task_checkboxes_done(tmp_vault): + tasks_dir = tmp_vault / "proj1" / "tasks" + tasks_dir.mkdir(parents=True) + (tasks_dir / "kanban.md").write_text( + "- [x] KIN-001 Implement login\n- [ ] KIN-002 Add tests\n", + encoding="utf-8", + ) + + results = parse_task_checkboxes(tmp_vault, "proj1") + done_items = [r for r in results if r["task_id"] == "KIN-001"] + assert len(done_items) == 1 + assert done_items[0]["done"] is True + assert done_items[0]["title"] == "Implement login" + + +# --------------------------------------------------------------------------- +# 4. parse_task_checkboxes — pending checkbox +# --------------------------------------------------------------------------- + +def test_parse_task_checkboxes_pending(tmp_vault): + tasks_dir = tmp_vault / "proj1" / "tasks" + tasks_dir.mkdir(parents=True) + (tasks_dir / "kanban.md").write_text( + "- [ ] KIN-002 Add tests\n", + encoding="utf-8", + ) + + results = parse_task_checkboxes(tmp_vault, "proj1") + pending = [r for r in results if r["task_id"] == "KIN-002"] + assert len(pending) == 1 + assert pending[0]["done"] is False + + +# --------------------------------------------------------------------------- +# 5. parse_task_checkboxes — lines without task ID are skipped +# --------------------------------------------------------------------------- + +def test_parse_task_checkboxes_no_id(tmp_vault): + tasks_dir = tmp_vault / "proj1" / "tasks" + tasks_dir.mkdir(parents=True) + (tasks_dir / "notes.md").write_text( + "- [x] Some task without ID\n" + "- [ ] Another line without identifier\n" + "- [x] KIN-003 With ID\n", + encoding="utf-8", + ) + + results = parse_task_checkboxes(tmp_vault, "proj1") + assert all(r["task_id"].startswith("KIN-") for r in results) + assert len(results) == 1 + assert results[0]["task_id"] == "KIN-003" + + +# --------------------------------------------------------------------------- +# 6. sync_obsidian updates task status when done=True +# --------------------------------------------------------------------------- + +def test_sync_updates_task_status(db, tmp_vault): + tmp_vault.mkdir(parents=True) + models.update_project(db, "proj1", obsidian_vault_path=str(tmp_vault)) + + task = models.create_task(db, "PROJ1-001", "proj1", "Do something", status="in_progress") + assert task["status"] == "in_progress" + + # Write checkbox file + tasks_dir = tmp_vault / "proj1" / "tasks" + tasks_dir.mkdir(parents=True) + (tasks_dir / "sprint.md").write_text( + "- [x] PROJ1-001 Do something\n", + encoding="utf-8", + ) + + result = sync_obsidian(db, "proj1") + + assert result["tasks_updated"] == 1 + assert not result["errors"] + updated = models.get_task(db, "PROJ1-001") + assert updated["status"] == "done" + + +# --------------------------------------------------------------------------- +# 7. sync_obsidian raises ValueError when vault_path not set +# --------------------------------------------------------------------------- + +def test_sync_no_vault_path(db): + # project exists but obsidian_vault_path is NULL + with pytest.raises(ValueError, match="obsidian_vault_path not set"): + sync_obsidian(db, "proj1") + + +# --------------------------------------------------------------------------- +# 8. export — frontmatter обёрнут в разделители --- +# --------------------------------------------------------------------------- + +def test_export_frontmatter_has_yaml_delimiters(tmp_vault): + """Экспортированный файл начинается с '---' и содержит закрывающий '---'.""" + decisions = [ + { + "id": 99, + "project_id": "p", + "type": "decision", + "category": None, + "title": "YAML Delimiter Test", + "description": "Verifying frontmatter delimiters.", + "tags": [], + "created_at": "2026-01-01", + } + ] + tmp_vault.mkdir(parents=True) + created = export_decisions_to_md("p", decisions, tmp_vault) + content = created[0].read_text(encoding="utf-8") + + assert content.startswith("---\n"), "Frontmatter должен начинаться с '---\\n'" + # первые --- открывают, вторые --- закрывают frontmatter + parts = content.split("---\n") + assert len(parts) >= 3, "Должно быть минимум два разделителя '---'" + + +# --------------------------------------------------------------------------- +# 9. sync_obsidian — несуществующий vault_path → директория создаётся автоматически +# KIN-070: Регрессионный тест на автоматическое создание vault directory +# --------------------------------------------------------------------------- + +def test_kin070_sync_creates_missing_vault_directory(db, tmp_path): + """KIN-070: Если vault_path не существует, sync автоматически создаёт директорию. + + Проверяет что: + - Директория создаётся без ошибок + - sync_obsidian не падает с ошибкой + - Возвращаемый результат содержит errors=[] + """ + nonexistent = tmp_path / "ghost_vault" + models.update_project(db, "proj1", obsidian_vault_path=str(nonexistent)) + + result = sync_obsidian(db, "proj1") + + assert result["errors"] == [] + assert nonexistent.is_dir() # директория автоматически создана + assert result["exported_decisions"] == 0 # нет decisions в DB + assert result["tasks_updated"] == 0 + + +# --------------------------------------------------------------------------- +# 10. sync_obsidian + decisions: несуществующий vault + decisions в БД → export success +# KIN-070: Проверяет что decisions экспортируются когда vault создаётся автоматически +# --------------------------------------------------------------------------- + +def test_kin070_sync_creates_vault_and_exports_decisions(db, tmp_path): + """KIN-070: sync экспортирует decisions и автоматически создаёт vault_path. + + Проверяет что: + - vault директория создаётся автоматически + - decisions экспортируются в .md-файлы (exported_decisions > 0) + - errors == [] (нет ошибок) + """ + nonexistent = tmp_path / "missing_vault" + models.update_project(db, "proj1", obsidian_vault_path=str(nonexistent)) + + # Создаём decision в БД + models.add_decision( + db, + project_id="proj1", + type="decision", + title="Use SQLite for sync state", + description="SQLite will be the single source of truth.", + tags=["database", "sync"], + ) + + result = sync_obsidian(db, "proj1") + + # Проверяем успешный экспорт + assert result["errors"] == [] + assert nonexistent.is_dir() # директория создана + assert result["exported_decisions"] == 1 # одно decision экспортировано + assert result["tasks_updated"] == 0 + + # Проверяем что .md-файл создан в правильной директории + decisions_dir = nonexistent / "proj1" / "decisions" + assert decisions_dir.is_dir() + md_files = list(decisions_dir.glob("*.md")) + assert len(md_files) == 1 + + +# --------------------------------------------------------------------------- +# 11. sync_obsidian — пустой vault → 0 экспортов, 0 обновлений, нет ошибок +# --------------------------------------------------------------------------- + +def test_sync_empty_vault_no_errors(db, tmp_vault): + """Пустой vault (нет decisions, нет task-файлов) → exported=0, updated=0, errors=[].""" + tmp_vault.mkdir(parents=True) + models.update_project(db, "proj1", obsidian_vault_path=str(tmp_vault)) + + result = sync_obsidian(db, "proj1") + + assert result["exported_decisions"] == 0 + assert result["tasks_updated"] == 0 + assert result["errors"] == [] diff --git a/tests/test_phases.py b/tests/test_phases.py new file mode 100644 index 0000000..84ab948 --- /dev/null +++ b/tests/test_phases.py @@ -0,0 +1,369 @@ +"""Tests for core/phases.py — Research Phase Pipeline (KIN-059). + +Covers: + - validate_roles: фильтрация, дедубликация, удаление architect + - build_phase_order: канонический порядок + auto-architect + - create_project_with_phases: создание + первая фаза active + - approve_phase: переход статусов, активация следующей, sequential enforcement + - reject_phase: статус rejected, защита от неактивных фаз + - revise_phase: цикл revise→running, счётчик, сохранение комментария +""" + +import pytest +from core.db import init_db +from core import models +from core.phases import ( + RESEARCH_ROLES, + approve_phase, + build_phase_order, + create_project_with_phases, + reject_phase, + revise_phase, + validate_roles, +) + + +@pytest.fixture +def conn(): + """KIN-059: изолированная in-memory БД для каждого теста.""" + c = init_db(db_path=":memory:") + yield c + c.close() + + +# --------------------------------------------------------------------------- +# validate_roles +# --------------------------------------------------------------------------- + + +def test_validate_roles_filters_unknown_roles(): + """KIN-059: неизвестные роли отфильтровываются из списка.""" + result = validate_roles(["business_analyst", "wizard", "ghost"]) + assert result == ["business_analyst"] + + +def test_validate_roles_strips_architect(): + """KIN-059: architect убирается из входных ролей — добавляется автоматически позже.""" + result = validate_roles(["architect", "tech_researcher"]) + assert "architect" not in result + assert "tech_researcher" in result + + +def test_validate_roles_deduplicates(): + """KIN-059: дублирующиеся роли удаляются, остаётся одна копия.""" + result = validate_roles(["business_analyst", "business_analyst", "tech_researcher"]) + assert result.count("business_analyst") == 1 + + +def test_validate_roles_empty_input_returns_empty(): + """KIN-059: пустой список ролей → пустой результат.""" + assert validate_roles([]) == [] + + +def test_validate_roles_only_architect_returns_empty(): + """KIN-059: только architect во входе → пустой результат (architect не researcher).""" + assert validate_roles(["architect"]) == [] + + +def test_validate_roles_strips_and_lowercases(): + """KIN-059: роли нормализуются: trim + lowercase.""" + result = validate_roles([" Tech_Researcher ", "MARKETER"]) + assert "tech_researcher" in result + assert "marketer" in result + + +# --------------------------------------------------------------------------- +# build_phase_order +# --------------------------------------------------------------------------- + + +@pytest.mark.parametrize("roles,expected", [ + ( + ["business_analyst"], + ["business_analyst", "architect"], + ), + ( + ["tech_researcher"], + ["tech_researcher", "architect"], + ), + ( + ["marketer", "business_analyst"], + ["business_analyst", "marketer", "architect"], + ), + ( + ["ux_designer", "market_researcher", "tech_researcher"], + ["market_researcher", "tech_researcher", "ux_designer", "architect"], + ), +]) +def test_build_phase_order_canonical_order_and_appends_architect(roles, expected): + """KIN-059: роли сортируются в канонический порядок, architect добавляется последним.""" + assert build_phase_order(roles) == expected + + +def test_build_phase_order_no_architect_if_no_researcher(): + """KIN-059: architect не добавляется если нет ни одного researcher.""" + result = build_phase_order([]) + assert result == [] + assert "architect" not in result + + +def test_build_phase_order_architect_always_last(): + """KIN-059: architect всегда последний независимо от набора ролей.""" + result = build_phase_order(["marketer", "legal_researcher", "business_analyst"]) + assert result[-1] == "architect" + + +# --------------------------------------------------------------------------- +# create_project_with_phases +# --------------------------------------------------------------------------- + + +def test_create_project_with_phases_creates_project_and_phases(conn): + """KIN-059: создание проекта с researcher-ролями создаёт и проект, и записи фаз.""" + result = create_project_with_phases( + conn, "proj1", "Project 1", "/path", + description="Тестовый проект", selected_roles=["business_analyst"], + ) + assert result["project"]["id"] == "proj1" + # business_analyst + architect = 2 фазы + assert len(result["phases"]) == 2 + + +def test_create_project_with_phases_first_phase_is_active(conn): + """KIN-059: первая фаза сразу переходит в status=active и получает task_id.""" + result = create_project_with_phases( + conn, "proj1", "P1", "/path", + description="Desc", selected_roles=["tech_researcher"], + ) + first = result["phases"][0] + assert first["status"] == "active" + assert first["task_id"] is not None + + +def test_create_project_with_phases_other_phases_remain_pending(conn): + """KIN-059: все фазы кроме первой остаются pending — не активируются без approve.""" + result = create_project_with_phases( + conn, "proj1", "P1", "/path", + description="Desc", selected_roles=["market_researcher", "tech_researcher"], + ) + # market_researcher, tech_researcher, architect → 3 фазы + for phase in result["phases"][1:]: + assert phase["status"] == "pending" + + +def test_create_project_with_phases_raises_if_no_roles(conn): + """KIN-059: ValueError при попытке создать проект без researcher-ролей.""" + with pytest.raises(ValueError, match="[Aa]t least one research role"): + create_project_with_phases( + conn, "proj1", "P1", "/path", + description="Desc", selected_roles=[], + ) + + +def test_create_project_with_phases_architect_auto_added_last(conn): + """KIN-059: architect автоматически добавляется последним без явного указания.""" + result = create_project_with_phases( + conn, "proj1", "P1", "/path", + description="Desc", selected_roles=["business_analyst"], + ) + roles = [ph["role"] for ph in result["phases"]] + assert "architect" in roles + assert roles[-1] == "architect" + + +@pytest.mark.parametrize("roles", [ + ["business_analyst"], + ["market_researcher", "tech_researcher"], + ["legal_researcher", "ux_designer", "marketer"], + ["business_analyst", "market_researcher", "legal_researcher", + "tech_researcher", "ux_designer", "marketer"], +]) +def test_create_project_with_phases_architect_added_for_any_combination(conn, roles): + """KIN-059: architect добавляется при любом наборе researcher-ролей.""" + result = create_project_with_phases( + conn, "proj1", "P1", "/path", + description="Desc", selected_roles=roles, + ) + phase_roles = [ph["role"] for ph in result["phases"]] + assert "architect" in phase_roles + assert phase_roles[-1] == "architect" + + +# --------------------------------------------------------------------------- +# approve_phase +# --------------------------------------------------------------------------- + + +def test_approve_phase_sets_status_approved(conn): + """KIN-059: approve_phase устанавливает status=approved для текущей фазы.""" + result = create_project_with_phases( + conn, "proj1", "P1", "/path", + description="Desc", selected_roles=["business_analyst"], + ) + phase_id = result["phases"][0]["id"] + out = approve_phase(conn, phase_id) + assert out["phase"]["status"] == "approved" + + +def test_approve_phase_activates_next_phase(conn): + """KIN-059: следующая фаза активируется только после approve предыдущей.""" + result = create_project_with_phases( + conn, "proj1", "P1", "/path", + description="Desc", selected_roles=["business_analyst"], + ) + first_phase_id = result["phases"][0]["id"] + out = approve_phase(conn, first_phase_id) + next_phase = out["next_phase"] + assert next_phase is not None + assert next_phase["status"] == "active" + assert next_phase["role"] == "architect" + + +def test_approve_phase_last_returns_no_next(conn): + """KIN-059: approve последней фазы возвращает next_phase=None (workflow завершён).""" + result = create_project_with_phases( + conn, "proj1", "P1", "/path", + description="Desc", selected_roles=["business_analyst"], + ) + # Approve business_analyst → architect активируется + first_id = result["phases"][0]["id"] + mid = approve_phase(conn, first_id) + architect_id = mid["next_phase"]["id"] + # Approve architect → no next + final = approve_phase(conn, architect_id) + assert final["next_phase"] is None + + +def test_approve_phase_not_active_raises(conn): + """KIN-059: approve фазы в статусе != active бросает ValueError.""" + models.create_project(conn, "proj1", "P1", "/path", description="Desc") + phase = models.create_phase(conn, "proj1", "business_analyst", 0) + # Фаза в статусе pending, не active + with pytest.raises(ValueError, match="not active"): + approve_phase(conn, phase["id"]) + + +def test_pending_phase_not_started_without_approve(conn): + """KIN-059: следующая фаза не стартует без approve предыдущей (нет автоактивации).""" + result = create_project_with_phases( + conn, "proj1", "P1", "/path", + description="Desc", selected_roles=["market_researcher", "tech_researcher"], + ) + # Вторая фаза (tech_researcher) должна оставаться pending + second_phase = result["phases"][1] + assert second_phase["status"] == "pending" + assert second_phase["task_id"] is None + + +# --------------------------------------------------------------------------- +# reject_phase +# --------------------------------------------------------------------------- + + +def test_reject_phase_sets_status_rejected(conn): + """KIN-059: reject_phase устанавливает status=rejected для фазы.""" + result = create_project_with_phases( + conn, "proj1", "P1", "/path", + description="Desc", selected_roles=["tech_researcher"], + ) + phase_id = result["phases"][0]["id"] + out = reject_phase(conn, phase_id, reason="Не релевантно") + assert out["status"] == "rejected" + + +def test_reject_phase_not_active_raises(conn): + """KIN-059: reject_phase для pending-фазы бросает ValueError.""" + models.create_project(conn, "proj1", "P1", "/path", description="Desc") + phase = models.create_phase(conn, "proj1", "tech_researcher", 0) + with pytest.raises(ValueError, match="not active"): + reject_phase(conn, phase["id"], reason="test") + + +# --------------------------------------------------------------------------- +# revise_phase +# --------------------------------------------------------------------------- + + +def test_revise_phase_sets_status_revising(conn): + """KIN-059: revise_phase устанавливает статус revising для фазы.""" + result = create_project_with_phases( + conn, "proj1", "P1", "/path", + description="Desc", selected_roles=["ux_designer"], + ) + phase_id = result["phases"][0]["id"] + out = revise_phase(conn, phase_id, comment="Нужно больше деталей") + assert out["phase"]["status"] == "revising" + + +def test_revise_phase_creates_new_task_with_comment(conn): + """KIN-059: revise_phase создаёт новую задачу с revise_comment в brief.""" + result = create_project_with_phases( + conn, "proj1", "P1", "/path", + description="Desc", selected_roles=["marketer"], + ) + phase_id = result["phases"][0]["id"] + comment = "Добавь анализ конкурентов" + out = revise_phase(conn, phase_id, comment=comment) + new_task = out["new_task"] + assert new_task is not None + assert new_task["brief"]["revise_comment"] == comment + + +def test_revise_phase_increments_revise_count(conn): + """KIN-059: revise_phase увеличивает счётчик revise_count с каждым вызовом.""" + result = create_project_with_phases( + conn, "proj1", "P1", "/path", + description="Desc", selected_roles=["marketer"], + ) + phase_id = result["phases"][0]["id"] + out1 = revise_phase(conn, phase_id, comment="Первая ревизия") + assert out1["phase"]["revise_count"] == 1 + out2 = revise_phase(conn, phase_id, comment="Вторая ревизия") + assert out2["phase"]["revise_count"] == 2 + + +def test_revise_phase_saves_comment_on_phase(conn): + """KIN-059: revise_phase сохраняет комментарий в поле revise_comment фазы.""" + result = create_project_with_phases( + conn, "proj1", "P1", "/path", + description="Desc", selected_roles=["business_analyst"], + ) + phase_id = result["phases"][0]["id"] + comment = "Уточни целевую аудиторию" + out = revise_phase(conn, phase_id, comment=comment) + assert out["phase"]["revise_comment"] == comment + + +def test_revise_phase_pending_raises(conn): + """KIN-059: revise_phase для pending-фазы бросает ValueError.""" + models.create_project(conn, "proj1", "P1", "/path", description="Desc") + phase = models.create_phase(conn, "proj1", "marketer", 0) + with pytest.raises(ValueError, match="cannot be revised"): + revise_phase(conn, phase["id"], comment="test") + + +def test_revise_phase_revising_status_allows_another_revise(conn): + """KIN-059: фаза в статусе revising допускает повторный вызов revise (цикл).""" + result = create_project_with_phases( + conn, "proj1", "P1", "/path", + description="Desc", selected_roles=["business_analyst"], + ) + phase_id = result["phases"][0]["id"] + revise_phase(conn, phase_id, comment="Первая ревизия") + # Фаза теперь revising — повторный revise должен проходить + out = revise_phase(conn, phase_id, comment="Вторая ревизия") + assert out["phase"]["revise_count"] == 2 + + +def test_revise_phase_updates_task_id_to_new_task(conn): + """KIN-059: после revise phase.task_id указывает на новую задачу.""" + result = create_project_with_phases( + conn, "proj1", "P1", "/path", + description="Desc", selected_roles=["market_researcher"], + ) + phase = result["phases"][0] + original_task_id = phase["task_id"] + out = revise_phase(conn, phase["id"], comment="Пересмотреть") + new_task_id = out["phase"]["task_id"] + assert new_task_id != original_task_id + assert new_task_id == out["new_task"]["id"] diff --git a/tests/test_runner.py b/tests/test_runner.py index bd7ac9b..ad6e5eb 100644 --- a/tests/test_runner.py +++ b/tests/test_runner.py @@ -6,7 +6,12 @@ import pytest from unittest.mock import patch, MagicMock from core.db import init_db from core import models -from agents.runner import run_agent, run_pipeline, run_audit, _try_parse_json +from agents.runner import ( + run_agent, run_pipeline, run_audit, _try_parse_json, _run_learning_extraction, + _build_claude_env, _resolve_claude_cmd, _EXTRA_PATH_DIRS, _run_autocommit, + _parse_agent_blocked, _get_changed_files, _save_sysadmin_output, + check_claude_auth, ClaudeAuthError, +) @pytest.fixture @@ -155,8 +160,9 @@ class TestRunAgent: # --------------------------------------------------------------------------- class TestRunPipeline: + @patch("agents.runner._run_autocommit") # gotcha #41: мокируем в тестах не о autocommit @patch("agents.runner.subprocess.run") - def test_successful_pipeline(self, mock_run, conn): + def test_successful_pipeline(self, mock_run, mock_autocommit, conn): mock_run.return_value = _mock_claude_success({"result": "done"}) steps = [ @@ -298,13 +304,13 @@ class TestAutoMode: @patch("agents.runner.run_hooks") @patch("agents.runner.subprocess.run") def test_auto_mode_generates_followups(self, mock_run, mock_hooks, mock_followup, conn): - """Auto mode должен вызывать generate_followups после task_auto_approved.""" + """Auto_complete mode должен вызывать generate_followups (последний шаг — tester).""" mock_run.return_value = _mock_claude_success({"result": "done"}) mock_hooks.return_value = [] mock_followup.return_value = {"created": [], "pending_actions": []} - models.update_project(conn, "vdol", execution_mode="auto") - steps = [{"role": "debugger", "brief": "find"}] + models.update_project(conn, "vdol", execution_mode="auto_complete") + steps = [{"role": "debugger", "brief": "find"}, {"role": "tester", "brief": "test"}] result = run_pipeline(conn, "VDOL-001", steps) assert result["success"] is True @@ -334,15 +340,15 @@ class TestAutoMode: @patch("agents.runner.run_hooks") @patch("agents.runner.subprocess.run") def test_auto_mode_skips_followups_for_followup_tasks(self, mock_run, mock_hooks, mock_followup, conn): - """Auto mode НЕ должен генерировать followups для followup-задач (предотвращение рекурсии).""" + """Auto_complete mode НЕ должен генерировать followups для followup-задач (предотвращение рекурсии).""" mock_run.return_value = _mock_claude_success({"result": "done"}) mock_hooks.return_value = [] mock_followup.return_value = {"created": [], "pending_actions": []} - models.update_project(conn, "vdol", execution_mode="auto") + models.update_project(conn, "vdol", execution_mode="auto_complete") models.update_task(conn, "VDOL-001", brief={"source": "followup:VDOL-000"}) - steps = [{"role": "debugger", "brief": "find"}] + steps = [{"role": "debugger", "brief": "find"}, {"role": "tester", "brief": "test"}] result = run_pipeline(conn, "VDOL-001", steps) assert result["success"] is True @@ -352,13 +358,13 @@ class TestAutoMode: @patch("agents.runner.run_hooks") @patch("agents.runner.subprocess.run") def test_auto_mode_fires_task_done_event(self, mock_run, mock_hooks, mock_followup, conn): - """Auto mode должен вызывать run_hooks с event='task_done' после task_auto_approved.""" + """Auto_complete mode должен вызывать run_hooks с event='task_done' (последний шаг — tester).""" mock_run.return_value = _mock_claude_success({"result": "done"}) mock_hooks.return_value = [] mock_followup.return_value = {"created": [], "pending_actions": []} - models.update_project(conn, "vdol", execution_mode="auto") - steps = [{"role": "debugger", "brief": "find"}] + models.update_project(conn, "vdol", execution_mode="auto_complete") + steps = [{"role": "debugger", "brief": "find"}, {"role": "tester", "brief": "test"}] result = run_pipeline(conn, "VDOL-001", steps) assert result["success"] is True @@ -371,7 +377,7 @@ class TestAutoMode: @patch("agents.runner.run_hooks") @patch("agents.runner.subprocess.run") def test_auto_mode_resolves_pending_actions(self, mock_run, mock_hooks, mock_followup, mock_resolve, conn): - """Auto mode должен авто-резолвить pending_actions из followup generation.""" + """Auto_complete mode должен авто-резолвить pending_actions (последний шаг — tester).""" mock_run.return_value = _mock_claude_success({"result": "done"}) mock_hooks.return_value = [] @@ -380,23 +386,140 @@ class TestAutoMode: mock_followup.return_value = {"created": [], "pending_actions": pending} mock_resolve.return_value = [{"resolved": "rerun", "result": {}}] - models.update_project(conn, "vdol", execution_mode="auto") - steps = [{"role": "debugger", "brief": "find"}] + models.update_project(conn, "vdol", execution_mode="auto_complete") + steps = [{"role": "debugger", "brief": "find"}, {"role": "tester", "brief": "test"}] result = run_pipeline(conn, "VDOL-001", steps) assert result["success"] is True mock_resolve.assert_called_once_with(conn, "VDOL-001", pending) +# --------------------------------------------------------------------------- +# KIN-080: Guard — не перезаписывать статус, если пользователь изменил вручную +# --------------------------------------------------------------------------- + +class TestPipelineStatusGuard: + """Тесты guard-check: pipeline не должен перезаписывать статус задачи, + если пользователь вручную изменил его на 'done' или 'cancelled' пока + pipeline выполнялся.""" + + @patch("agents.runner._run_autocommit") + @patch("agents.runner._run_learning_extraction") + @patch("agents.runner._get_changed_files") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_pipeline_preserves_done_status_set_during_execution( + self, mock_run, mock_hooks, mock_get_files, mock_learn, mock_autocommit, conn + ): + """Guard: если пользователь вручную поставил 'done' пока pipeline работал — + итоговый статус должен остаться 'done', а не перезаписаться в 'review'.""" + def side_effect(*args, **kwargs): + # Имитируем ручную смену статуса во время выполнения агента + models.update_task(conn, "VDOL-001", status="done") + return _mock_claude_success({"result": "done"}) + + mock_run.side_effect = side_effect + mock_hooks.return_value = [] + mock_get_files.return_value = [] + mock_learn.return_value = {"added": 0, "skipped": 0} + + steps = [{"role": "debugger", "brief": "find"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + task = models.get_task(conn, "VDOL-001") + assert task["status"] == "done" # guard НЕ перезаписал в "review" + + @patch("agents.runner._run_autocommit") + @patch("agents.runner._run_learning_extraction") + @patch("agents.runner._get_changed_files") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_pipeline_preserves_cancelled_status_set_during_execution( + self, mock_run, mock_hooks, mock_get_files, mock_learn, mock_autocommit, conn + ): + """Guard: если пользователь вручную поставил 'cancelled' пока pipeline работал — + итоговый статус должен остаться 'cancelled'.""" + def side_effect(*args, **kwargs): + models.update_task(conn, "VDOL-001", status="cancelled") + return _mock_claude_success({"result": "done"}) + + mock_run.side_effect = side_effect + mock_hooks.return_value = [] + mock_get_files.return_value = [] + mock_learn.return_value = {"added": 0, "skipped": 0} + + steps = [{"role": "debugger", "brief": "find"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + task = models.get_task(conn, "VDOL-001") + assert task["status"] == "cancelled" # guard НЕ перезаписал в "review" + + @patch("agents.runner._run_autocommit") + @patch("agents.runner._run_learning_extraction") + @patch("agents.runner._get_changed_files") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_pipeline_sets_review_when_no_manual_override( + self, mock_run, mock_hooks, mock_get_files, mock_learn, mock_autocommit, conn + ): + """Нормальный случай: задача в in_progress, пользователь не трогал статус — + после pipeline устанавливается 'review'.""" + mock_run.return_value = _mock_claude_success({"result": "done"}) + mock_hooks.return_value = [] + mock_get_files.return_value = [] + mock_learn.return_value = {"added": 0, "skipped": 0} + + steps = [{"role": "debugger", "brief": "find"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + task = models.get_task(conn, "VDOL-001") + assert task["status"] == "review" + + @patch("agents.runner._run_autocommit") + @patch("agents.runner._run_learning_extraction") + @patch("agents.runner._get_changed_files") + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_auto_mode_preserves_done_status_set_during_execution( + self, mock_run, mock_hooks, mock_followup, mock_get_files, mock_learn, mock_autocommit, conn + ): + """Guard в auto_complete mode: если пользователь вручную поставил 'done' + пока pipeline работал — guard пропускает обновление (уже done).""" + def side_effect(*args, **kwargs): + models.update_task(conn, "VDOL-001", status="done") + return _mock_claude_success({"result": "done"}) + + mock_run.side_effect = side_effect + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + mock_get_files.return_value = [] + mock_learn.return_value = {"added": 0, "skipped": 0} + + models.update_project(conn, "vdol", execution_mode="auto_complete") + steps = [{"role": "tester", "brief": "verify"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + task = models.get_task(conn, "VDOL-001") + assert task["status"] == "done" + + # --------------------------------------------------------------------------- # Retry on permission error # --------------------------------------------------------------------------- class TestRetryOnPermissionError: + @patch("agents.runner._run_autocommit") + @patch("agents.runner._run_learning_extraction") + @patch("agents.runner._get_changed_files") # KIN-003: prevents git subprocess calls @patch("core.followup.generate_followups") @patch("agents.runner.run_hooks") @patch("agents.runner.subprocess.run") - def test_retry_on_permission_error_auto_mode(self, mock_run, mock_hooks, mock_followup, conn): + def test_retry_on_permission_error_auto_mode(self, mock_run, mock_hooks, mock_followup, mock_get_files, mock_learn, mock_autocommit, conn): """Auto mode: retry при permission error должен срабатывать.""" permission_fail = _mock_claude_failure("permission denied: cannot write file") retry_success = _mock_claude_success({"result": "fixed"}) @@ -404,8 +527,10 @@ class TestRetryOnPermissionError: mock_run.side_effect = [permission_fail, retry_success] mock_hooks.return_value = [] mock_followup.return_value = {"created": [], "pending_actions": []} + mock_learn.return_value = {"added": 0, "skipped": 0} + mock_get_files.return_value = [] - models.update_project(conn, "vdol", execution_mode="auto") + models.update_project(conn, "vdol", execution_mode="auto_complete") steps = [{"role": "debugger", "brief": "find"}] result = run_pipeline(conn, "VDOL-001", steps) @@ -472,12 +597,13 @@ class TestNonInteractive: call_kwargs = mock_run.call_args[1] assert call_kwargs.get("stdin") == subprocess.DEVNULL + @patch.dict("os.environ", {"KIN_AGENT_TIMEOUT": ""}, clear=False) @patch("agents.runner.subprocess.run") - def test_noninteractive_uses_300s_timeout(self, mock_run, conn): + def test_noninteractive_uses_600s_timeout(self, mock_run, conn): mock_run.return_value = _mock_claude_success({"result": "ok"}) run_agent(conn, "debugger", "VDOL-001", "vdol", noninteractive=True) call_kwargs = mock_run.call_args[1] - assert call_kwargs.get("timeout") == 300 + assert call_kwargs.get("timeout") == 600 @patch.dict("os.environ", {"KIN_NONINTERACTIVE": ""}) @patch("agents.runner.subprocess.run") @@ -504,7 +630,16 @@ class TestNonInteractive: run_agent(conn, "debugger", "VDOL-001", "vdol", noninteractive=False) call_kwargs = mock_run.call_args[1] assert call_kwargs.get("stdin") == subprocess.DEVNULL - assert call_kwargs.get("timeout") == 300 + assert call_kwargs.get("timeout") == 600 + + @patch.dict("os.environ", {"KIN_AGENT_TIMEOUT": "900"}) + @patch("agents.runner.subprocess.run") + def test_custom_timeout_via_env_var(self, mock_run, conn): + """KIN_AGENT_TIMEOUT overrides the default 600s timeout.""" + mock_run.return_value = _mock_claude_success({"result": "ok"}) + run_agent(conn, "debugger", "VDOL-001", "vdol") + call_kwargs = mock_run.call_args[1] + assert call_kwargs.get("timeout") == 900 @patch("agents.runner.subprocess.run") def test_allow_write_adds_skip_permissions(self, mock_run, conn): @@ -751,3 +886,1623 @@ class TestSilentFailedDiagnostics: assert result["success"] is True assert result.get("error") is None + + +# --------------------------------------------------------------------------- +# Auto-learning: _run_learning_extraction +# --------------------------------------------------------------------------- + +class TestRunLearningExtraction: + @patch("agents.runner.subprocess.run") + def test_extracts_and_saves_decisions(self, mock_run, conn): + """Успешный сценарий: learner возвращает JSON с decisions, они сохраняются в БД.""" + learner_output = json.dumps({ + "decisions": [ + {"type": "gotcha", "title": "SQLite WAL mode needed", "description": "Without WAL concurrent reads fail", "tags": ["sqlite", "db"]}, + {"type": "convention", "title": "Always run tests after change", "description": "Prevents regressions", "tags": ["testing"]}, + ] + }) + mock_run.return_value = _mock_claude_success({"result": learner_output}) + + step_results = [ + {"role": "debugger", "raw_output": "Found issue with sqlite concurrent access"}, + ] + result = _run_learning_extraction(conn, "VDOL-001", "vdol", step_results) + + assert result["added"] == 2 + assert result["skipped"] == 0 + + decisions = conn.execute("SELECT * FROM decisions WHERE project_id='vdol'").fetchall() + assert len(decisions) == 2 + titles = {d["title"] for d in decisions} + assert "SQLite WAL mode needed" in titles + assert "Always run tests after change" in titles + + @patch("agents.runner.subprocess.run") + def test_skips_duplicate_decisions(self, mock_run, conn): + """Дедупликация: если decision с таким title+type уже есть, пропускается.""" + from core import models as m + m.add_decision(conn, "vdol", "gotcha", "SQLite WAL mode needed", "existing desc") + + learner_output = json.dumps({ + "decisions": [ + {"type": "gotcha", "title": "SQLite WAL mode needed", "description": "duplicate", "tags": []}, + {"type": "convention", "title": "New convention here", "description": "new desc", "tags": []}, + ] + }) + mock_run.return_value = _mock_claude_success({"result": learner_output}) + + step_results = [{"role": "tester", "raw_output": "test output"}] + result = _run_learning_extraction(conn, "VDOL-001", "vdol", step_results) + + assert result["added"] == 1 + assert result["skipped"] == 1 + assert len(conn.execute("SELECT * FROM decisions WHERE project_id='vdol'").fetchall()) == 2 + + @patch("agents.runner.subprocess.run") + def test_limits_to_5_decisions(self, mock_run, conn): + """Learner не должен сохранять более 5 decisions даже если агент вернул больше.""" + decisions_list = [ + {"type": "decision", "title": f"Decision {i}", "description": f"desc {i}", "tags": []} + for i in range(8) + ] + learner_output = json.dumps({"decisions": decisions_list}) + mock_run.return_value = _mock_claude_success({"result": learner_output}) + + step_results = [{"role": "architect", "raw_output": "long output"}] + result = _run_learning_extraction(conn, "VDOL-001", "vdol", step_results) + + assert result["added"] == 5 + assert len(conn.execute("SELECT * FROM decisions WHERE project_id='vdol'").fetchall()) == 5 + + @patch("agents.runner.subprocess.run") + def test_non_json_output_returns_error(self, mock_run, conn): + """Если learner вернул не-JSON, функция возвращает error, не бросает исключение.""" + mock_run.return_value = _mock_claude_success({"result": "plain text, not json"}) + + step_results = [{"role": "debugger", "raw_output": "output"}] + result = _run_learning_extraction(conn, "VDOL-001", "vdol", step_results) + + assert result["added"] == 0 + assert "error" in result + assert len(conn.execute("SELECT * FROM decisions WHERE project_id='vdol'").fetchall()) == 0 + + @patch("agents.runner.subprocess.run") + def test_decisions_linked_to_task(self, mock_run, conn): + """Сохранённые decisions должны быть привязаны к task_id.""" + learner_output = json.dumps({ + "decisions": [ + {"type": "gotcha", "title": "Important gotcha", "description": "desc", "tags": []}, + ] + }) + mock_run.return_value = _mock_claude_success({"result": learner_output}) + + step_results = [{"role": "debugger", "raw_output": "output"}] + _run_learning_extraction(conn, "VDOL-001", "vdol", step_results) + + d = conn.execute("SELECT * FROM decisions WHERE project_id='vdol'").fetchone() + assert d["task_id"] == "VDOL-001" + + @patch("agents.runner._run_learning_extraction") + @patch("agents.runner.subprocess.run") + def test_pipeline_triggers_learning_after_completion(self, mock_run, mock_learn, conn): + """run_pipeline должен вызывать _run_learning_extraction после успешного завершения.""" + mock_run.return_value = _mock_claude_success({"result": "done"}) + mock_learn.return_value = {"added": 1, "skipped": 0} + + steps = [{"role": "debugger", "brief": "find bug"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + mock_learn.assert_called_once() + call_args = mock_learn.call_args[0] + assert call_args[1] == "VDOL-001" # task_id + assert call_args[2] == "vdol" # project_id + + @patch("agents.runner._run_learning_extraction") + @patch("agents.runner.subprocess.run") + def test_learning_error_does_not_break_pipeline(self, mock_run, mock_learn, conn): + """Если _run_learning_extraction бросает исключение, pipeline не падает.""" + mock_run.return_value = _mock_claude_success({"result": "done"}) + mock_learn.side_effect = Exception("learning failed") + + steps = [{"role": "debugger", "brief": "find bug"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + + def test_pipeline_dry_run_skips_learning(self, conn): + """Dry run не должен вызывать _run_learning_extraction.""" + steps = [{"role": "debugger", "brief": "find bug"}] + result = run_pipeline(conn, "VDOL-001", steps, dry_run=True) + + assert result["dry_run"] is True + # No decisions saved (dry run — no DB activity) + assert len(conn.execute("SELECT * FROM decisions WHERE project_id='vdol'").fetchall()) == 0 + + @patch("agents.runner.subprocess.run") + def test_empty_learner_output_returns_no_decisions(self, mock_run, conn): + """Пустой stdout от learner (subprocess вернул "") — не бросает исключение, возвращает error.""" + # Используем пустую строку как stdout (не dict), чтобы raw_output оказался пустым + mock_run.return_value = _mock_claude_success("") + + step_results = [{"role": "debugger", "raw_output": "output"}] + result = _run_learning_extraction(conn, "VDOL-001", "vdol", step_results) + + assert result["added"] == 0 + assert "error" in result + assert len(conn.execute("SELECT * FROM decisions WHERE project_id='vdol'").fetchall()) == 0 + + @patch("agents.runner.subprocess.run") + def test_empty_decisions_list_returns_zero_counts(self, mock_run, conn): + """Learner возвращает {"decisions": []} — added=0, skipped=0, без ошибки.""" + mock_run.return_value = _mock_claude_success({"result": json.dumps({"decisions": []})}) + + step_results = [{"role": "debugger", "raw_output": "output"}] + result = _run_learning_extraction(conn, "VDOL-001", "vdol", step_results) + + assert result["added"] == 0 + assert result["skipped"] == 0 + assert "error" not in result + + @patch("agents.runner.subprocess.run") + def test_decision_missing_title_is_skipped(self, mock_run, conn): + """Decision без title молча пропускается, не вызывает исключение.""" + learner_output = json.dumps({ + "decisions": [ + {"type": "gotcha", "description": "no title here", "tags": []}, + {"type": "convention", "title": "Valid decision", "description": "desc", "tags": []}, + ] + }) + mock_run.return_value = _mock_claude_success({"result": learner_output}) + + step_results = [{"role": "debugger", "raw_output": "output"}] + result = _run_learning_extraction(conn, "VDOL-001", "vdol", step_results) + + assert result["added"] == 1 + assert len(conn.execute("SELECT * FROM decisions WHERE project_id='vdol'").fetchall()) == 1 + + @patch("agents.runner.subprocess.run") + def test_decisions_field_not_list_returns_error(self, mock_run, conn): + """Если поле decisions не является списком — возвращается error dict.""" + mock_run.return_value = _mock_claude_success({"result": json.dumps({"decisions": "not a list"})}) + + step_results = [{"role": "debugger", "raw_output": "output"}] + result = _run_learning_extraction(conn, "VDOL-001", "vdol", step_results) + + assert result["added"] == 0 + assert "error" in result + + @patch("agents.runner.subprocess.run") + def test_logs_agent_run_to_db(self, mock_run, conn): + """KIN-060: _run_learning_extraction должна писать запись в agent_logs.""" + learner_output = json.dumps({ + "decisions": [ + {"type": "gotcha", "title": "Log test", "description": "desc", "tags": []}, + ] + }) + mock_run.return_value = _mock_claude_success({"result": learner_output}) + + step_results = [{"role": "debugger", "raw_output": "output"}] + _run_learning_extraction(conn, "VDOL-001", "vdol", step_results) + + logs = conn.execute( + "SELECT * FROM agent_logs WHERE agent_role='learner' AND project_id='vdol'" + ).fetchall() + assert len(logs) == 1 + log = logs[0] + assert log["task_id"] == "VDOL-001" + assert log["action"] == "learn" + assert log["model"] == "sonnet" + + @patch("agents.runner.subprocess.run") + def test_learner_cost_included_in_cost_summary(self, mock_run, conn): + """KIN-060: get_cost_summary() включает затраты learner-агента.""" + learner_output = json.dumps({"decisions": []}) + mock_run.return_value = _mock_claude_success({ + "result": learner_output, + "cost_usd": 0.042, + "usage": {"total_tokens": 3000}, + }) + + step_results = [{"role": "debugger", "raw_output": "output"}] + _run_learning_extraction(conn, "VDOL-001", "vdol", step_results) + + costs = models.get_cost_summary(conn, days=1) + assert len(costs) == 1 + assert costs[0]["project_id"] == "vdol" + assert costs[0]["total_cost_usd"] == pytest.approx(0.042) + assert costs[0]["total_tokens"] == 3000 + + # ----------------------------------------------------------------------- + # KIN-061: Regression — валидация поля type в decision + # ----------------------------------------------------------------------- + + @patch("agents.runner.subprocess.run") + def test_valid_type_gotcha_is_saved_as_is(self, mock_run, conn): + """KIN-061: валидный тип 'gotcha' сохраняется без изменений.""" + learner_output = json.dumps({ + "decisions": [ + {"type": "gotcha", "title": "Use WAL mode", "description": "Concurrent reads need WAL", "tags": []}, + ] + }) + mock_run.return_value = _mock_claude_success({"result": learner_output}) + + result = _run_learning_extraction(conn, "VDOL-001", "vdol", [{"role": "debugger", "raw_output": "x"}]) + + assert result["added"] == 1 + d = conn.execute("SELECT type FROM decisions WHERE project_id='vdol'").fetchone() + assert d["type"] == "gotcha" + + @patch("agents.runner.subprocess.run") + def test_invalid_type_falls_back_to_decision(self, mock_run, conn): + """KIN-061: невалидный тип 'unknown_type' заменяется на 'decision'.""" + learner_output = json.dumps({ + "decisions": [ + {"type": "unknown_type", "title": "Some title", "description": "Some desc", "tags": []}, + ] + }) + mock_run.return_value = _mock_claude_success({"result": learner_output}) + + result = _run_learning_extraction(conn, "VDOL-001", "vdol", [{"role": "debugger", "raw_output": "x"}]) + + assert result["added"] == 1 + d = conn.execute("SELECT type FROM decisions WHERE project_id='vdol'").fetchone() + assert d["type"] == "decision" + + @patch("agents.runner.subprocess.run") + def test_missing_type_falls_back_to_decision(self, mock_run, conn): + """KIN-061: отсутствующий ключ 'type' в decision заменяется на 'decision'.""" + learner_output = json.dumps({ + "decisions": [ + {"title": "No type key here", "description": "desc without type", "tags": []}, + ] + }) + mock_run.return_value = _mock_claude_success({"result": learner_output}) + + result = _run_learning_extraction(conn, "VDOL-001", "vdol", [{"role": "debugger", "raw_output": "x"}]) + + assert result["added"] == 1 + d = conn.execute("SELECT type FROM decisions WHERE project_id='vdol'").fetchone() + assert d["type"] == "decision" + + + # ----------------------------------------------------------------------- + # KIN-062: KIN_LEARNER_TIMEOUT — отдельный таймаут для learner-агента + # ----------------------------------------------------------------------- + + @patch.dict("os.environ", {"KIN_LEARNER_TIMEOUT": ""}, clear=False) + @patch("agents.runner.subprocess.run") + def test_learner_uses_120s_default_timeout(self, mock_run, conn): + """KIN-062: по умолчанию learner использует таймаут 120s (KIN_LEARNER_TIMEOUT не задан).""" + mock_run.return_value = _mock_claude_success({"result": json.dumps({"decisions": []})}) + + step_results = [{"role": "debugger", "raw_output": "output"}] + _run_learning_extraction(conn, "VDOL-001", "vdol", step_results) + + call_kwargs = mock_run.call_args[1] + assert call_kwargs.get("timeout") == 120 + + @patch.dict("os.environ", {"KIN_LEARNER_TIMEOUT": "300"}, clear=False) + @patch("agents.runner.subprocess.run") + def test_learner_uses_custom_timeout_from_env(self, mock_run, conn): + """KIN-062: KIN_LEARNER_TIMEOUT переопределяет дефолтный таймаут learner-агента.""" + mock_run.return_value = _mock_claude_success({"result": json.dumps({"decisions": []})}) + + step_results = [{"role": "debugger", "raw_output": "output"}] + _run_learning_extraction(conn, "VDOL-001", "vdol", step_results) + + call_kwargs = mock_run.call_args[1] + assert call_kwargs.get("timeout") == 300 + + @patch.dict("os.environ", {"KIN_LEARNER_TIMEOUT": "60", "KIN_AGENT_TIMEOUT": "900"}, clear=False) + @patch("agents.runner.subprocess.run") + def test_learner_timeout_independent_of_agent_timeout(self, mock_run, conn): + """KIN-062: KIN_LEARNER_TIMEOUT не зависит от KIN_AGENT_TIMEOUT.""" + mock_run.return_value = _mock_claude_success({"result": json.dumps({"decisions": []})}) + + step_results = [{"role": "debugger", "raw_output": "output"}] + _run_learning_extraction(conn, "VDOL-001", "vdol", step_results) + + call_kwargs = mock_run.call_args[1] + assert call_kwargs.get("timeout") == 60 + + +# --------------------------------------------------------------------------- +# KIN-056: Regression — web path timeout parity with CLI +# --------------------------------------------------------------------------- + +class TestRegressionKIN056: + """Регрессионные тесты KIN-056: агенты таймаутили через 300s из web, но не из CLI. + + Причина: noninteractive режим использовал timeout=300s. + Web API всегда устанавливает KIN_NONINTERACTIVE=1, поэтому таймаут был 300s. + Фикс: единый timeout=600s независимо от noninteractive (переопределяется KIN_AGENT_TIMEOUT). + + Каждый тест ПАДАЛ бы со старым кодом (timeout=300 для noninteractive) + и ПРОХОДИТ после фикса. + """ + + @patch.dict("os.environ", {"KIN_NONINTERACTIVE": "1", "KIN_AGENT_TIMEOUT": ""}) + @patch("agents.runner.subprocess.run") + def test_web_noninteractive_env_does_not_use_300s(self, mock_run, conn): + """Web путь устанавливает KIN_NONINTERACTIVE=1. До фикса это давало timeout=300s.""" + mock_run.return_value = _mock_claude_success({"result": "ok"}) + run_agent(conn, "debugger", "VDOL-001", "vdol") + call_kwargs = mock_run.call_args[1] + assert call_kwargs.get("timeout") != 300, ( + "Регрессия KIN-056: timeout не должен быть 300s в noninteractive режиме" + ) + + @patch.dict("os.environ", {"KIN_NONINTERACTIVE": "1", "KIN_AGENT_TIMEOUT": ""}) + @patch("agents.runner.subprocess.run") + def test_web_noninteractive_timeout_is_600(self, mock_run, conn): + """Web путь: KIN_NONINTERACTIVE=1 → timeout = 600s (не 300s).""" + mock_run.return_value = _mock_claude_success({"result": "ok"}) + run_agent(conn, "debugger", "VDOL-001", "vdol") + call_kwargs = mock_run.call_args[1] + assert call_kwargs.get("timeout") == 600 + + @patch("agents.runner.subprocess.run") + def test_web_and_cli_paths_use_same_timeout(self, mock_run, conn): + """Таймаут через web-путь (KIN_NONINTERACTIVE=1) == таймаут CLI (noninteractive=True).""" + mock_run.return_value = _mock_claude_success({"result": "ok"}) + + # Web path: env var KIN_NONINTERACTIVE=1, noninteractive param not set + with patch.dict("os.environ", {"KIN_NONINTERACTIVE": "1", "KIN_AGENT_TIMEOUT": ""}): + run_agent(conn, "debugger", "VDOL-001", "vdol", noninteractive=False) + web_timeout = mock_run.call_args[1].get("timeout") + + mock_run.reset_mock() + + # CLI path: noninteractive=True, no env var + with patch.dict("os.environ", {"KIN_NONINTERACTIVE": "", "KIN_AGENT_TIMEOUT": ""}): + run_agent(conn, "debugger", "VDOL-001", "vdol", noninteractive=True) + cli_timeout = mock_run.call_args[1].get("timeout") + + assert web_timeout == cli_timeout, ( + f"Таймаут web ({web_timeout}s) != CLI ({cli_timeout}s) — регрессия KIN-056" + ) + + @patch.dict("os.environ", {"KIN_NONINTERACTIVE": "1", "KIN_AGENT_TIMEOUT": "900"}) + @patch("agents.runner.subprocess.run") + def test_web_noninteractive_respects_kin_agent_timeout_override(self, mock_run, conn): + """Web путь: KIN_AGENT_TIMEOUT переопределяет дефолтный таймаут даже при KIN_NONINTERACTIVE=1.""" + mock_run.return_value = _mock_claude_success({"result": "ok"}) + run_agent(conn, "debugger", "VDOL-001", "vdol") + call_kwargs = mock_run.call_args[1] + assert call_kwargs.get("timeout") == 900 + + +# --------------------------------------------------------------------------- +# KIN-057: claude CLI в PATH при запуске через launchctl +# --------------------------------------------------------------------------- + +class TestClaudePath: + """Регрессионные тесты KIN-057: launchctl-демоны могут не видеть claude в PATH.""" + + def test_build_claude_env_contains_extra_paths(self): + """_build_claude_env должен добавить /opt/homebrew/bin и /usr/local/bin в PATH.""" + env = _build_claude_env() + path_dirs = env["PATH"].split(":") + for extra_dir in _EXTRA_PATH_DIRS: + assert extra_dir in path_dirs, ( + f"Регрессия KIN-057: {extra_dir} не найден в PATH, сгенерированном _build_claude_env" + ) + + def test_build_claude_env_no_duplicate_paths(self): + """_build_claude_env не должен дублировать уже существующие пути. + + Мокируем PATH на фиксированное значение, чтобы тест не зависел от + реального окружения (решение #48). + """ + fixed_path = "/usr/bin:/bin" + with patch.dict("os.environ", {"PATH": fixed_path}, clear=False): + env = _build_claude_env() + path_dirs = env["PATH"].split(":") + seen = set() + for d in path_dirs: + assert d not in seen, f"Дублирующийся PATH entry: {d}" + seen.add(d) + + def test_build_claude_env_preserves_existing_path(self): + """_build_claude_env должен сохранять уже существующие пути.""" + with patch.dict("os.environ", {"PATH": "/custom/bin:/usr/bin:/bin"}): + env = _build_claude_env() + path_dirs = env["PATH"].split(":") + assert "/custom/bin" in path_dirs + assert "/usr/bin" in path_dirs + + def test_resolve_claude_cmd_returns_string(self): + """_resolve_claude_cmd должен всегда возвращать строку.""" + cmd = _resolve_claude_cmd() + assert isinstance(cmd, str) + assert len(cmd) > 0 + + def test_resolve_claude_cmd_fallback_when_not_found(self): + """_resolve_claude_cmd должен вернуть 'claude' если CLI не найден в PATH.""" + with patch("agents.runner.shutil.which", return_value=None): + cmd = _resolve_claude_cmd() + assert cmd == "claude" + + def test_resolve_claude_cmd_returns_full_path_when_found(self): + """_resolve_claude_cmd должен вернуть полный путь если claude найден.""" + with patch("agents.runner.shutil.which", return_value="/opt/homebrew/bin/claude"): + cmd = _resolve_claude_cmd() + assert cmd == "/opt/homebrew/bin/claude" + + @patch("agents.runner.subprocess.run") + def test_run_claude_passes_env_to_subprocess(self, mock_run, conn): + """_run_claude должен передавать env= в subprocess.run (а не наследовать голый PATH).""" + mock_run.return_value = _mock_claude_success({"result": "ok"}) + run_agent(conn, "debugger", "VDOL-001", "vdol") + call_kwargs = mock_run.call_args[1] + assert "env" in call_kwargs, ( + "Регрессия KIN-057: subprocess.run должен получать явный env с расширенным PATH" + ) + assert call_kwargs["env"] is not None + + @patch("agents.runner.subprocess.run") + def test_run_claude_env_has_homebrew_in_path(self, mock_run, conn): + """env переданный в subprocess.run должен содержать /opt/homebrew/bin в PATH.""" + mock_run.return_value = _mock_claude_success({"result": "ok"}) + run_agent(conn, "debugger", "VDOL-001", "vdol") + call_kwargs = mock_run.call_args[1] + env = call_kwargs.get("env", {}) + assert "/opt/homebrew/bin" in env.get("PATH", ""), ( + "Регрессия KIN-057: /opt/homebrew/bin не найден в env['PATH'] subprocess.run" + ) + + @patch("agents.runner.subprocess.run") + def test_file_not_found_returns_127(self, mock_run, conn): + """Если claude не найден (FileNotFoundError), должен вернуться returncode 127.""" + mock_run.side_effect = FileNotFoundError("claude not found") + result = run_agent(conn, "debugger", "VDOL-001", "vdol") + assert result["success"] is False + assert "not found" in (result.get("error") or "").lower() + + @patch.dict("os.environ", {"PATH": ""}) + def test_launchctl_empty_path_build_env_adds_extra_dirs(self): + """Регрессия KIN-057: когда launchctl запускает с пустым PATH, + _build_claude_env должен добавить _EXTRA_PATH_DIRS чтобы claude был доступен. + Без фикса: os.environ["PATH"]="" → shutil.which("claude") → None → FileNotFoundError. + После фикса: _build_claude_env строит PATH с /opt/homebrew/bin и др. + """ + env = _build_claude_env() + path_dirs = env["PATH"].split(":") + # Явная проверка каждой критичной директории + for extra_dir in _EXTRA_PATH_DIRS: + assert extra_dir in path_dirs, ( + f"KIN-057: при пустом os PATH директория {extra_dir} должна быть добавлена" + ) + + @patch.dict("os.environ", {"PATH": ""}) + def test_launchctl_empty_path_shutil_which_fails_without_fix(self): + """Воспроизводит сломанное поведение: при PATH='' shutil.which возвращает None. + Это точно то, что происходило до фикса — launchctl не видел claude. + Тест документирует, ПОЧЕМУ нужен _build_claude_env вместо прямого os.environ. + """ + import shutil + # Без фикса: поиск с пустым PATH не найдёт claude + result_without_fix = shutil.which("claude", path="") + assert result_without_fix is None, ( + "Если этот assert упал — shutil.which нашёл claude в пустом PATH, " + "что невозможно. Ожидаем None — именно поэтому нужен _build_claude_env." + ) + # С фиксом: _resolve_claude_cmd строит расширенный PATH и находит claude + # (или возвращает fallback "claude", но не бросает FileNotFoundError) + cmd = _resolve_claude_cmd() + assert isinstance(cmd, str) and len(cmd) > 0, ( + "KIN-057: _resolve_claude_cmd должен возвращать строку даже при пустом os PATH" + ) + + +# --------------------------------------------------------------------------- +# KIN-063: TestCompletionMode — auto_complete + last-step role check +# --------------------------------------------------------------------------- + +class TestCompletionMode: + """auto_complete mode срабатывает только если последний шаг — tester или reviewer.""" + + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_auto_complete_with_tester_last_sets_done(self, mock_run, mock_hooks, mock_followup, conn): + """auto_complete + последний шаг tester → status=done (Decision #29).""" + mock_run.return_value = _mock_claude_success({"result": "ok"}) + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + + models.update_project(conn, "vdol", execution_mode="auto_complete") + steps = [{"role": "developer", "brief": "fix"}, {"role": "tester", "brief": "test"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + task = models.get_task(conn, "VDOL-001") + assert task["status"] == "done" + + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_auto_complete_with_reviewer_last_sets_done(self, mock_run, mock_hooks, mock_followup, conn): + """auto_complete + последний шаг reviewer → status=done.""" + mock_run.return_value = _mock_claude_success({"result": "ok"}) + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + + models.update_project(conn, "vdol", execution_mode="auto_complete") + steps = [{"role": "developer", "brief": "fix"}, {"role": "reviewer", "brief": "review"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + task = models.get_task(conn, "VDOL-001") + assert task["status"] == "done" + + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_auto_complete_without_tester_last_sets_review(self, mock_run, mock_hooks, mock_followup, conn): + """auto_complete + последний шаг НЕ tester/reviewer → status=review (Decision #29).""" + mock_run.return_value = _mock_claude_success({"result": "ok"}) + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + + models.update_project(conn, "vdol", execution_mode="auto_complete") + steps = [{"role": "developer", "brief": "fix"}, {"role": "debugger", "brief": "debug"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + task = models.get_task(conn, "VDOL-001") + assert task["status"] == "review", ( + "Регрессия KIN-063: auto_complete без tester/reviewer последним НЕ должен авто-завершать" + ) + + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_legacy_auto_mode_value_not_recognized(self, mock_run, mock_hooks, mock_followup, conn): + """Регрессия: старое значение 'auto' больше не является валидным режимом. + + После KIN-063 'auto' → 'auto_complete'. Если в DB осталось 'auto' (без миграции), + runner НЕ должен авто-завершать — это 'review'-ветка (безопасный fallback). + (Decision #29) + """ + mock_run.return_value = _mock_claude_success({"result": "ok"}) + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + + # Прямой SQL-апдейт, обходя validate_completion_mode, чтобы симулировать + # старую запись в БД без миграции + conn.execute("UPDATE projects SET execution_mode='auto' WHERE id='vdol'") + conn.commit() + steps = [{"role": "developer", "brief": "fix"}, {"role": "tester", "brief": "test"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + task = models.get_task(conn, "VDOL-001") + assert task["status"] == "review", ( + "Регрессия: 'auto' (старый формат) не должен срабатывать как auto_complete" + ) + + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_review_mode_with_tester_last_keeps_task_in_review(self, mock_run, mock_hooks, mock_followup, conn): + """review mode + последний шаг tester → task.status == 'review', НЕ done (ждёт ручного approve).""" + mock_run.return_value = _mock_claude_success({"result": "all tests pass"}) + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + + # Проект и задача остаются в дефолтном 'review' mode + steps = [{"role": "developer", "brief": "fix"}, {"role": "tester", "brief": "test"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + task = models.get_task(conn, "VDOL-001") + assert task["status"] == "review" + assert task["status"] != "done", ( + "KIN-063: review mode не должен авто-завершать задачу даже если tester последний" + ) + + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_project_review_overrides_no_task_completion_mode(self, mock_run, mock_hooks, mock_followup, conn): + """Project execution_mode='review' + задача без override → pipeline завершается в 'review'. + + Сценарий: PM выбрал auto_complete, но проект настроен на 'review' (ручной override человека). + Задача не имеет task-level execution_mode, поэтому get_effective_mode возвращает project-level 'review'. + """ + mock_run.return_value = _mock_claude_success({"result": "ok"}) + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + + # Проект явно в 'review', задача без execution_mode + models.update_project(conn, "vdol", execution_mode="review") + # task VDOL-001 создана без execution_mode (None) — fixture + steps = [{"role": "developer", "brief": "fix"}, {"role": "tester", "brief": "test"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + assert result["mode"] == "review" + task = models.get_task(conn, "VDOL-001") + assert task["status"] == "review", ( + "KIN-063: project-level 'review' должен применяться когда задача не имеет override" + ) + + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_auto_complete_not_broken_by_revise_comment(self, mock_run, mock_hooks, mock_followup, conn): + """Регрессия KIN-045: revise_comment в задаче не ломает auto_complete flow. + + Задача прошла ревизию (revise_comment != None, status=in_progress), + затем повторно запускается пайплайн в auto_complete режиме. + Последний шаг — tester → задача должна получить status='done'. + """ + mock_run.return_value = _mock_claude_success({"result": "all tests pass"}) + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + + models.update_project(conn, "vdol", execution_mode="auto_complete") + models.update_task( + conn, "VDOL-001", + status="in_progress", + revise_comment="Добавь тест для пустого массива", + ) + + steps = [{"role": "developer", "brief": "fix"}, {"role": "tester", "brief": "test"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + task = models.get_task(conn, "VDOL-001") + assert task["status"] == "done", ( + "KIN-045: revise_comment не должен мешать auto_complete авто-завершению" + ) + + +# --------------------------------------------------------------------------- +# KIN-048: _run_autocommit — флаг, git path, env= +# --------------------------------------------------------------------------- + +class TestAutocommit: + """KIN-048: _run_autocommit — autocommit_enabled флаг, shutil.which, env= regression.""" + + def test_disabled_project_skips_subprocess(self, conn): + """autocommit_enabled=0 (дефолт) → subprocess не вызывается.""" + with patch("agents.runner.subprocess.run") as mock_run: + _run_autocommit(conn, "VDOL-001", "vdol") + mock_run.assert_not_called() + + @patch("agents.runner.subprocess.run") + @patch("agents.runner.shutil.which") + def test_enabled_calls_git_add_and_commit(self, mock_which, mock_run, conn, tmp_path): + """autocommit_enabled=1 → вызываются git add -A и git commit с task_id и title.""" + mock_which.return_value = "/usr/bin/git" + mock_run.return_value = MagicMock(returncode=0) + models.update_project(conn, "vdol", autocommit_enabled=1, path=str(tmp_path)) + + _run_autocommit(conn, "VDOL-001", "vdol") + + assert mock_run.call_count == 2 + add_cmd = mock_run.call_args_list[0][0][0] + assert add_cmd == ["/usr/bin/git", "add", "-A"] + commit_cmd = mock_run.call_args_list[1][0][0] + assert commit_cmd[0] == "/usr/bin/git" + assert commit_cmd[1] == "commit" + assert "VDOL-001" in commit_cmd[-1] + assert "Fix bug" in commit_cmd[-1] + + @patch("agents.runner.subprocess.run") + def test_nothing_to_commit_no_exception(self, mock_run, conn, tmp_path): + """returncode=1 (nothing to commit) → исключение не бросается.""" + mock_run.return_value = MagicMock(returncode=1) + models.update_project(conn, "vdol", autocommit_enabled=1, path=str(tmp_path)) + + _run_autocommit(conn, "VDOL-001", "vdol") # must not raise + + @patch("agents.runner.subprocess.run") + def test_passes_env_to_subprocess(self, mock_run, conn, tmp_path): + """Regression #33: env= должен передаваться в subprocess.run.""" + mock_run.return_value = MagicMock(returncode=0) + models.update_project(conn, "vdol", autocommit_enabled=1, path=str(tmp_path)) + + _run_autocommit(conn, "VDOL-001", "vdol") + + for call in mock_run.call_args_list: + kwargs = call[1] + assert "env" in kwargs, "Regression #33: subprocess.run должен получать env=" + assert "/opt/homebrew/bin" in kwargs["env"].get("PATH", "") + + @patch("agents.runner.subprocess.run") + @patch("agents.runner.shutil.which") + def test_resolves_git_via_shutil_which(self, mock_which, mock_run, conn, tmp_path): + """Regression #32: git резолвится через shutil.which, а не hardcoded 'git'.""" + mock_which.return_value = "/opt/homebrew/bin/git" + mock_run.return_value = MagicMock(returncode=0) + models.update_project(conn, "vdol", autocommit_enabled=1, path=str(tmp_path)) + + _run_autocommit(conn, "VDOL-001", "vdol") + + git_which_calls = [c for c in mock_which.call_args_list if c[0][0] == "git"] + assert len(git_which_calls) > 0, "Regression #32: shutil.which должен вызываться для git" + first_cmd = mock_run.call_args_list[0][0][0] + assert first_cmd[0] == "/opt/homebrew/bin/git" + + @patch("agents.runner.subprocess.run") + @patch("agents.runner.shutil.which") + def test_git_not_found_no_crash_logs_warning(self, mock_which, mock_run, conn, tmp_path): + """shutil.which(git) → None → fallback 'git' → FileNotFoundError → no crash, WARNING logged.""" + mock_which.return_value = None # git не найден в PATH + mock_run.side_effect = FileNotFoundError("git: command not found") + models.update_project(conn, "vdol", autocommit_enabled=1, path=str(tmp_path)) + + with patch("agents.runner._logger") as mock_logger: + _run_autocommit(conn, "VDOL-001", "vdol") # не должен бросать исключение + + mock_logger.warning.assert_called_once() + + @patch("agents.runner._run_autocommit") + @patch("agents.runner.subprocess.run") + def test_autocommit_not_called_on_failed_pipeline(self, mock_run, mock_autocommit, conn): + """Pipeline failure → _run_autocommit must NOT be called (gotcha #41).""" + mock_run.return_value = _mock_claude_failure("compilation error") + + steps = [{"role": "debugger", "brief": "find"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is False + mock_autocommit.assert_not_called() + + +# --------------------------------------------------------------------------- +# KIN-055: execution_mode='review' при переводе задачи в статус review +# --------------------------------------------------------------------------- + +class TestReviewModeExecutionMode: + """Регрессия KIN-055: execution_mode должен быть 'review', а не NULL после pipeline в review mode.""" + + def test_task_execution_mode_is_null_before_pipeline(self, conn): + """Граничный случай: execution_mode IS NULL до запуска pipeline (задача только создана).""" + task = models.get_task(conn, "VDOL-001") + assert task["execution_mode"] is None, ( + "Задача должна иметь NULL execution_mode до выполнения pipeline" + ) + + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_review_mode_sets_execution_mode_review(self, mock_run, mock_hooks, conn): + """После pipeline в review mode task.execution_mode должно быть 'review', а не NULL.""" + mock_run.return_value = _mock_claude_success({"result": "done"}) + mock_hooks.return_value = [] + + steps = [{"role": "debugger", "brief": "find bug"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + task = models.get_task(conn, "VDOL-001") + assert task["status"] == "review" + # Регрессионная проверка KIN-055: execution_mode не должен быть NULL + assert task["execution_mode"] is not None, ( + "Регрессия KIN-055: execution_mode не должен быть NULL после перевода задачи в статус review" + ) + assert task["execution_mode"] == "review" + + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_review_mode_execution_mode_persisted_in_db(self, mock_run, mock_hooks, conn): + """execution_mode='review' должно сохраняться в SQLite напрямую, минуя ORM-слой.""" + mock_run.return_value = _mock_claude_success({"result": "done"}) + mock_hooks.return_value = [] + + steps = [{"role": "debugger", "brief": "find"}] + run_pipeline(conn, "VDOL-001", steps) + + row = conn.execute( + "SELECT execution_mode FROM tasks WHERE id='VDOL-001'" + ).fetchone() + assert row is not None + assert row["execution_mode"] == "review", ( + "Регрессия KIN-055: execution_mode должен быть 'review' в SQLite после pipeline" + ) + + +# --------------------------------------------------------------------------- +# KIN-021: Audit log for --dangerously-skip-permissions +# --------------------------------------------------------------------------- + +class TestAuditLogDangerousSkip: + @patch("agents.runner._run_autocommit") + @patch("agents.runner._run_learning_extraction") + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_audit_log_written_on_permission_retry( + self, mock_run, mock_hooks, mock_followup, mock_learn, mock_autocommit, conn + ): + """При retry с --dangerously-skip-permissions записывается событие в audit_log.""" + permission_fail = _mock_claude_failure("permission denied: cannot write file") + retry_success = _mock_claude_success({"result": "fixed"}) + + mock_run.side_effect = [permission_fail, retry_success] + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + mock_learn.return_value = {"added": 0, "skipped": 0} + + models.update_project(conn, "vdol", execution_mode="auto_complete") + steps = [{"role": "debugger", "brief": "find"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + + # Проверяем audit_log через прямой SQL + rows = conn.execute( + "SELECT * FROM audit_log WHERE task_id='VDOL-001'" + ).fetchall() + assert len(rows) == 1 + assert rows[0]["event_type"] == "dangerous_skip" + assert rows[0]["step_id"] == "debugger" + assert "debugger" in rows[0]["reason"] + + @patch("agents.runner._run_autocommit") + @patch("agents.runner._run_learning_extraction") + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_dangerously_skipped_flag_set_on_task( + self, mock_run, mock_hooks, mock_followup, mock_learn, mock_autocommit, conn + ): + """tasks.dangerously_skipped=1 после retry с --dangerously-skip-permissions.""" + permission_fail = _mock_claude_failure("permission denied: cannot write file") + retry_success = _mock_claude_success({"result": "fixed"}) + + mock_run.side_effect = [permission_fail, retry_success] + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + mock_learn.return_value = {"added": 0, "skipped": 0} + + models.update_project(conn, "vdol", execution_mode="auto_complete") + steps = [{"role": "debugger", "brief": "find"}] + run_pipeline(conn, "VDOL-001", steps) + + # Верификация через прямой SQL (минуя ORM) + row = conn.execute( + "SELECT dangerously_skipped FROM tasks WHERE id='VDOL-001'" + ).fetchone() + assert row is not None + assert row["dangerously_skipped"] == 1 + + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_no_audit_log_in_review_mode(self, mock_run, mock_hooks, conn): + """В review mode retry не происходит, audit_log остаётся пустым.""" + permission_fail = _mock_claude_failure("permission denied: cannot write file") + mock_run.return_value = permission_fail + mock_hooks.return_value = [] + + steps = [{"role": "debugger", "brief": "find"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is False + rows = conn.execute( + "SELECT * FROM audit_log WHERE task_id='VDOL-001'" + ).fetchall() + assert len(rows) == 0 + + @patch("agents.runner._run_autocommit") + @patch("agents.runner._run_learning_extraction") + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_audit_log_no_entry_on_normal_success( + self, mock_run, mock_hooks, mock_followup, mock_learn, mock_autocommit, conn + ): + """При успешном выполнении без retry audit_log не записывается.""" + mock_run.return_value = _mock_claude_success({"result": "done"}) + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + mock_learn.return_value = {"added": 0, "skipped": 0} + + models.update_project(conn, "vdol", execution_mode="auto_complete") + steps = [{"role": "tester", "brief": "test"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + rows = conn.execute( + "SELECT * FROM audit_log WHERE task_id='VDOL-001'" + ).fetchall() + assert len(rows) == 0 + + +# --------------------------------------------------------------------------- +# KIN-016: Blocked Protocol +# --------------------------------------------------------------------------- + +class TestParseAgentBlocked: + def test_returns_none_on_failure(self): + result = {"success": False, "output": {"status": "blocked", "reason": "no access"}} + assert _parse_agent_blocked(result) is None + + def test_returns_none_when_output_not_dict(self): + result = {"success": True, "output": "plain text output"} + assert _parse_agent_blocked(result) is None + + def test_returns_none_when_status_not_blocked(self): + result = {"success": True, "output": {"status": "done", "changes": []}} + assert _parse_agent_blocked(result) is None + + def test_detects_status_blocked(self): + result = {"success": True, "output": {"status": "blocked", "reason": "no file access"}} + blocked = _parse_agent_blocked(result) + assert blocked is not None + assert blocked["reason"] == "no file access" + assert blocked["blocked_at"] is not None + + def test_detects_verdict_blocked(self): + """reviewer.md uses verdict: blocked instead of status: blocked.""" + result = {"success": True, "output": {"verdict": "blocked", "blocked_reason": "unreadable"}} + blocked = _parse_agent_blocked(result) + assert blocked is not None + assert blocked["reason"] == "unreadable" + + def test_uses_provided_blocked_at(self): + result = {"success": True, "output": { + "status": "blocked", "reason": "out of scope", + "blocked_at": "2026-03-16T10:00:00", + }} + blocked = _parse_agent_blocked(result) + assert blocked["blocked_at"] == "2026-03-16T10:00:00" + + def test_falls_back_blocked_at_if_missing(self): + result = {"success": True, "output": {"status": "blocked", "reason": "x"}} + blocked = _parse_agent_blocked(result) + assert "T" in blocked["blocked_at"] # ISO-8601 with T separator + + def test_does_not_check_nested_status(self): + """Nested status='blocked' in sub-fields must NOT trigger blocked protocol.""" + result = {"success": True, "output": { + "status": "done", + "changes": [{"file": "a.py", "status": "blocked"}], # nested — must be ignored + }} + assert _parse_agent_blocked(result) is None + + +class TestPipelineBlockedProtocol: + @patch("agents.runner._run_autocommit") + @patch("agents.runner.subprocess.run") + def test_pipeline_stops_on_semantic_blocked(self, mock_run, mock_autocommit, conn): + """KIN-016: когда агент возвращает status='blocked', пайплайн останавливается.""" + # First step returns semantic blocked + mock_run.return_value = _mock_claude_success({ + "result": json.dumps({"status": "blocked", "reason": "cannot access external API"}), + }) + + steps = [ + {"role": "debugger", "brief": "find bug"}, + {"role": "tester", "brief": "verify"}, + ] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is False + assert result["steps_completed"] == 0 + assert "blocked" in result["error"] + assert result["blocked_by"] == "debugger" + assert result["blocked_reason"] == "cannot access external API" + + # Task marked as blocked with enriched fields + task = models.get_task(conn, "VDOL-001") + assert task["status"] == "blocked" + assert task["blocked_reason"] == "cannot access external API" + assert task["blocked_agent_role"] == "debugger" + assert task["blocked_pipeline_step"] == "1" + assert task["blocked_at"] is not None + + # Pipeline marked as failed + pipe = conn.execute("SELECT * FROM pipelines WHERE task_id='VDOL-001'").fetchone() + assert pipe["status"] == "failed" + + @patch("agents.runner._run_autocommit") + @patch("agents.runner.subprocess.run") + def test_pipeline_blocks_on_second_step(self, mock_run, mock_autocommit, conn): + """KIN-016: blocked на шаге 2 → steps_completed=1, pipeline_step='2'.""" + mock_run.side_effect = [ + _mock_claude_success({"result": json.dumps({"status": "done", "changes": []})}), + _mock_claude_success({"result": json.dumps({ + "status": "blocked", "reason": "test environment unavailable", + })}), + ] + + steps = [ + {"role": "backend_dev", "brief": "implement"}, + {"role": "tester", "brief": "test"}, + ] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is False + assert result["steps_completed"] == 1 + assert result["blocked_by"] == "tester" + + task = models.get_task(conn, "VDOL-001") + assert task["blocked_agent_role"] == "tester" + assert task["blocked_pipeline_step"] == "2" + + @patch("agents.runner._run_autocommit") + @patch("agents.runner.subprocess.run") + def test_reviewer_verdict_blocked_stops_pipeline(self, mock_run, mock_autocommit, conn): + """KIN-016: reviewer возвращает verdict='blocked' → пайплайн останавливается.""" + mock_run.return_value = _mock_claude_success({ + "result": json.dumps({ + "verdict": "blocked", "status": "blocked", + "reason": "cannot read implementation files", + }), + }) + + steps = [{"role": "reviewer", "brief": "review"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is False + assert result["blocked_by"] == "reviewer" + + task = models.get_task(conn, "VDOL-001") + assert task["status"] == "blocked" + assert task["blocked_agent_role"] == "reviewer" + + +# --------------------------------------------------------------------------- +# KIN-071: _save_sysadmin_output +# --------------------------------------------------------------------------- + +class TestSaveSysadminOutput: + """KIN-071: _save_sysadmin_output парсит и сохраняет decisions + modules.""" + + @pytest.fixture + def ops_conn(self): + c = init_db(":memory:") + models.create_project( + c, "srv", "Server", "", + project_type="operations", + ssh_host="10.0.0.1", + ) + models.create_task(c, "SRV-001", "srv", "Scan server") + yield c + c.close() + + def test_saves_decisions_and_modules(self, ops_conn): + """KIN-071: sysadmin output корректно сохраняет decisions и modules.""" + from agents.runner import _save_sysadmin_output + output = { + "status": "done", + "decisions": [ + {"type": "gotcha", "title": "Port 8080 open", "description": "nginx on 8080", "tags": ["server"]}, + {"type": "decision", "title": "Docker used", "description": "docker 24.0", "tags": ["docker"]}, + ], + "modules": [ + {"name": "nginx", "type": "service", "path": "/etc/nginx", "description": "web proxy"}, + ], + } + result = _save_sysadmin_output( + ops_conn, "srv", "SRV-001", + {"raw_output": json.dumps(output)} + ) + assert result["decisions_added"] == 2 + assert result["modules_added"] == 1 + + decisions = models.get_decisions(ops_conn, "srv") + assert len(decisions) == 2 + modules = models.get_modules(ops_conn, "srv") + assert len(modules) == 1 + assert modules[0]["name"] == "nginx" + + def test_idempotent_on_duplicate_decisions(self, ops_conn): + """KIN-071: повторный вызов не создаёт дублей.""" + from agents.runner import _save_sysadmin_output + output = { + "decisions": [ + {"type": "gotcha", "title": "Port 8080 open", "description": "nginx on 8080"}, + ], + "modules": [], + } + r1 = _save_sysadmin_output(ops_conn, "srv", "SRV-001", {"raw_output": json.dumps(output)}) + r2 = _save_sysadmin_output(ops_conn, "srv", "SRV-001", {"raw_output": json.dumps(output)}) + assert r1["decisions_added"] == 1 + assert r2["decisions_added"] == 0 # deduped + assert r2["decisions_skipped"] == 1 + + def test_idempotent_on_duplicate_modules(self, ops_conn): + """KIN-071: повторный вызов не создаёт дублей модулей.""" + from agents.runner import _save_sysadmin_output + output = { + "decisions": [], + "modules": [{"name": "nginx", "type": "service", "path": "/etc/nginx"}], + } + r1 = _save_sysadmin_output(ops_conn, "srv", "SRV-001", {"raw_output": json.dumps(output)}) + r2 = _save_sysadmin_output(ops_conn, "srv", "SRV-001", {"raw_output": json.dumps(output)}) + assert r1["modules_added"] == 1 + assert r2["modules_skipped"] == 1 + assert len(models.get_modules(ops_conn, "srv")) == 1 + + def test_handles_non_json_output(self, ops_conn): + """KIN-071: не-JSON вывод не вызывает исключения.""" + from agents.runner import _save_sysadmin_output + result = _save_sysadmin_output(ops_conn, "srv", "SRV-001", {"raw_output": "not json"}) + assert result["decisions_added"] == 0 + assert result["modules_added"] == 0 + + def test_handles_empty_output(self, ops_conn): + """KIN-071: пустой вывод не вызывает исключения.""" + from agents.runner import _save_sysadmin_output + result = _save_sysadmin_output(ops_conn, "srv", "SRV-001", {"raw_output": ""}) + assert result["decisions_added"] == 0 + + def test_full_sysadmin_output_format_saves_docker_and_systemctl_as_decisions(self, ops_conn): + """KIN-071: полный формат вывода sysadmin (docker ps + systemctl) → decisions + modules.""" + from agents.runner import _save_sysadmin_output + # Симуляция реального вывода sysadmin-агента после docker ps и systemctl + output = { + "status": "done", + "summary": "Ubuntu 22.04, nginx + postgres + app in docker", + "os": "Ubuntu 22.04 LTS, kernel 5.15.0", + "services": [ + {"name": "nginx", "type": "systemd", "status": "running", "note": "web proxy"}, + {"name": "myapp", "type": "docker", "image": "myapp:1.2.3", "ports": ["80:8080"]}, + {"name": "postgres", "type": "docker", "image": "postgres:15", "ports": ["5432:5432"]}, + ], + "open_ports": [ + {"port": 80, "proto": "tcp", "process": "nginx"}, + {"port": 5432, "proto": "tcp", "process": "postgres"}, + ], + "decisions": [ + { + "type": "gotcha", + "title": "nginx proxies to docker app on 8080", + "description": "nginx.conf proxy_pass http://localhost:8080", + "tags": ["nginx", "docker"], + }, + { + "type": "decision", + "title": "postgres data on /var/lib/postgresql", + "description": "Volume mount /var/lib/postgresql/data persists DB", + "tags": ["postgres", "storage"], + }, + ], + "modules": [ + { + "name": "nginx", + "type": "service", + "path": "/etc/nginx", + "description": "Reverse proxy", + "owner_role": "sysadmin", + }, + { + "name": "myapp", + "type": "docker", + "path": "/opt/myapp", + "description": "Main application container", + }, + { + "name": "postgres", + "type": "docker", + "path": "/var/lib/postgresql", + "description": "Database", + }, + ], + } + result = _save_sysadmin_output(ops_conn, "srv", "SRV-001", {"raw_output": json.dumps(output)}) + + assert result["decisions_added"] == 2 + assert result["modules_added"] == 3 + + decisions = models.get_decisions(ops_conn, "srv") + d_titles = {d["title"] for d in decisions} + assert "nginx proxies to docker app on 8080" in d_titles + assert "postgres data on /var/lib/postgresql" in d_titles + + modules = models.get_modules(ops_conn, "srv") + m_names = {m["name"] for m in modules} + assert {"nginx", "myapp", "postgres"} == m_names + + def test_invalid_decision_type_normalized_to_decision(self, ops_conn): + """KIN-071: тип 'workaround' не входит в VALID_DECISION_TYPES → нормализуется в 'decision'.""" + from agents.runner import _save_sysadmin_output + output = { + "decisions": [ + { + "type": "workaround", + "title": "Use /proc/net for port list", + "description": "ss not installed, fallback to /proc/net/tcp", + "tags": ["networking"], + }, + ], + "modules": [], + } + _save_sysadmin_output(ops_conn, "srv", "SRV-001", {"raw_output": json.dumps(output)}) + decisions = models.get_decisions(ops_conn, "srv") + assert len(decisions) == 1 + assert decisions[0]["type"] == "decision" + + def test_decision_missing_title_skipped(self, ops_conn): + """KIN-071: decision без title пропускается.""" + from agents.runner import _save_sysadmin_output + output = { + "decisions": [ + {"type": "gotcha", "title": "", "description": "Something"}, + ], + "modules": [], + } + result = _save_sysadmin_output(ops_conn, "srv", "SRV-001", {"raw_output": json.dumps(output)}) + assert result["decisions_added"] == 0 + + def test_module_missing_name_skipped(self, ops_conn): + """KIN-071: module без name пропускается.""" + from agents.runner import _save_sysadmin_output + output = { + "decisions": [], + "modules": [ + {"name": "", "type": "service", "path": "/etc/something"}, + ], + } + result = _save_sysadmin_output(ops_conn, "srv", "SRV-001", {"raw_output": json.dumps(output)}) + assert result["modules_added"] == 0 + +# --------------------------------------------------------------------------- +# KIN-003: _get_changed_files — вычисление изменённых git-файлов +# --------------------------------------------------------------------------- + +class TestGetChangedFiles: + """Тесты для _get_changed_files(project_path) из agents/runner.py (KIN-003).""" + + @patch("agents.runner.subprocess.run") + def test_returns_files_from_git_diff(self, mock_run): + """Возвращает список файлов из git diff --name-only.""" + proc = MagicMock() + proc.returncode = 0 + proc.stdout = "web/frontend/App.vue\ncore/models.py\n" + mock_run.return_value = proc + + result = _get_changed_files("/tmp/fake-project") + + assert isinstance(result, list) + assert "web/frontend/App.vue" in result + assert "core/models.py" in result + + @patch("agents.runner.subprocess.run") + def test_returns_empty_list_on_exception(self, mock_run): + """При ошибке git (не найден, не репозиторий) возвращает [].""" + mock_run.side_effect = Exception("git not found") + + result = _get_changed_files("/tmp/fake-project") + + assert result == [] + + @patch("agents.runner.subprocess.run") + def test_deduplicates_files_from_multiple_git_commands(self, mock_run): + """Один файл из нескольких git-команд появляется в результате только один раз.""" + proc = MagicMock() + proc.returncode = 0 + proc.stdout = "web/frontend/App.vue\n" + mock_run.return_value = proc # все 3 git-команды возвращают одно и то же + + result = _get_changed_files("/tmp/fake-project") + + assert result.count("web/frontend/App.vue") == 1, ( + "Дубликаты из разных git-команд должны дедуплицироваться" + ) + + @patch("agents.runner.subprocess.run") + def test_combines_files_from_different_git_commands(self, mock_run): + """Файлы из трёх разных git-команд объединяются в один список.""" + mock_run.side_effect = [ + MagicMock(returncode=0, stdout="web/frontend/App.vue\n"), + MagicMock(returncode=0, stdout="core/models.py\n"), + MagicMock(returncode=0, stdout="agents/runner.py\n"), + ] + + result = _get_changed_files("/tmp/fake-project") + + assert "web/frontend/App.vue" in result + assert "core/models.py" in result + assert "agents/runner.py" in result + + @patch("agents.runner.subprocess.run") + def test_skips_failed_git_command_and_continues(self, mock_run): + """Упавшая git-команда (returncode != 0) не блокирует остальные.""" + fail_proc = MagicMock(returncode=1, stdout="") + success_proc = MagicMock(returncode=0, stdout="core/models.py\n") + mock_run.side_effect = [fail_proc, success_proc, fail_proc] + + result = _get_changed_files("/tmp/fake-project") + + assert "core/models.py" in result + + @patch("agents.runner.subprocess.run") + def test_strips_whitespace_from_file_paths(self, mock_run): + """Пробелы и переносы вокруг имён файлов обрезаются.""" + proc = MagicMock() + proc.returncode = 0 + proc.stdout = " web/frontend/App.vue \n core/models.py \n" + mock_run.return_value = proc + + result = _get_changed_files("/tmp/fake-project") + + assert "web/frontend/App.vue" in result + assert "core/models.py" in result + assert " web/frontend/App.vue " not in result + + +# --------------------------------------------------------------------------- +# KIN-003: run_pipeline — передача changed_files в run_hooks +# --------------------------------------------------------------------------- + +class TestPipelineChangedFiles: + """Интеграционные тесты: pipeline вычисляет changed_files и передаёт в run_hooks.""" + + @patch("agents.runner._get_changed_files") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_pipeline_passes_changed_files_to_run_hooks( + self, mock_run, mock_hooks, mock_get_files + ): + """run_pipeline передаёт changed_files в run_hooks(event='pipeline_completed'). + + Используем проект с path='/tmp' (реальная директория), чтобы + _get_changed_files был вызван. + """ + c = init_db(":memory:") + models.create_project(c, "kin-tmp", "KinTmp", "/tmp", tech_stack=["vue3"]) + models.create_task(c, "KT-001", "kin-tmp", "Fix bug") + + mock_run.return_value = _mock_claude_success({"result": "done"}) + mock_hooks.return_value = [] + mock_get_files.return_value = ["web/frontend/App.vue", "core/models.py"] + + steps = [{"role": "debugger", "brief": "find bug"}] + result = run_pipeline(c, "KT-001", steps) + c.close() + + assert result["success"] is True + mock_get_files.assert_called_once_with("/tmp") + + # pipeline_completed call должен содержать changed_files + pipeline_calls = [ + call for call in mock_hooks.call_args_list + if call.kwargs.get("event") == "pipeline_completed" + ] + assert len(pipeline_calls) >= 1 + kw = pipeline_calls[0].kwargs + assert kw.get("changed_files") == ["web/frontend/App.vue", "core/models.py"] + + @patch("agents.runner._run_autocommit") + @patch("core.hooks.subprocess.run") + @patch("agents.runner._run_claude") + def test_pipeline_completes_when_frontend_hook_build_fails( + self, mock_run_claude, mock_hook_run, mock_autocommit + ): + """Ошибка сборки фронтенда (exitcode=1) не роняет pipeline (AC #3 KIN-003). + + Хук выполняется и возвращает failure, но pipeline.status = 'completed' + и результат run_pipeline['success'] = True. + + Примечание: патчим _run_claude (не subprocess.run) чтобы не конфликтовать + с core.hooks.subprocess.run — оба ссылаются на один и тот же subprocess.run. + """ + from core.hooks import create_hook + + c = init_db(":memory:") + models.create_project(c, "kin-build", "KinBuild", "/tmp", tech_stack=["vue3"]) + models.create_task(c, "KB-001", "kin-build", "Add feature") + create_hook( + c, "kin-build", "rebuild-frontend", "pipeline_completed", + "/tmp/rebuild.sh", + trigger_module_path=None, + working_dir="/tmp", + ) + + mock_run_claude.return_value = { + "output": "done", "returncode": 0, "error": None, + "empty_output": False, "tokens_used": None, "cost_usd": None, + } + # npm run build завершается с ошибкой + fail_proc = MagicMock() + fail_proc.returncode = 1 + fail_proc.stdout = "" + fail_proc.stderr = "Error: Cannot find module './App'" + mock_hook_run.return_value = fail_proc + + steps = [{"role": "tester", "brief": "test feature"}] + result = run_pipeline(c, "KB-001", steps) + + assert result["success"] is True, ( + "Ошибка сборки хука не должна ронять pipeline" + ) + pipe = c.execute( + "SELECT status FROM pipelines WHERE task_id='KB-001'" + ).fetchone() + assert pipe["status"] == "completed" + c.close() + + @patch("agents.runner._run_autocommit") + @patch("agents.runner.subprocess.run") + def test_pipeline_changed_files_is_none_when_project_path_missing( + self, mock_run, mock_autocommit, conn + ): + """Если путь проекта не существует, changed_files=None передаётся в run_hooks. + + Хуки по-прежнему запускаются, но без git-фильтра (task_modules fallback). + """ + # vdol path = ~/projects/vdolipoperek (не существует в CI) + # Хук без trigger_module_path должен сработать + from core.hooks import create_hook, get_hook_logs + + create_hook(conn, "vdol", "always", "pipeline_completed", + "echo ok", trigger_module_path=None, working_dir="/tmp") + + mock_run.return_value = _mock_claude_success({"result": "done"}) + build_proc = MagicMock(returncode=0, stdout="ok", stderr="") + + with patch("core.hooks.subprocess.run", return_value=build_proc): + steps = [{"role": "tester", "brief": "test"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + # Хук без фильтра должен был выполниться + logs = get_hook_logs(conn, project_id="vdol") + assert len(logs) >= 1 + + +# --------------------------------------------------------------------------- +# _save_sysadmin_output — KIN-081 +# --------------------------------------------------------------------------- + +class TestSaveSysadminOutput: + def test_modules_added_count_for_new_modules(self, conn): + """KIN-081: _save_sysadmin_output считает modules_added правильно через _created.""" + result = { + "raw_output": json.dumps({ + "modules": [ + {"name": "nginx", "type": "infra", "path": "/etc/nginx", + "description": "Web server"}, + {"name": "postgres", "type": "infra", "path": "/var/lib/postgresql", + "description": "Database"}, + ], + "decisions": [], + }) + } + counts = _save_sysadmin_output(conn, "vdol", "VDOL-001", result) + assert counts["modules_added"] == 2 + assert counts["modules_skipped"] == 0 + + def test_modules_skipped_count_for_duplicate_names(self, conn): + """KIN-081: повторный вызов с теми же модулями: added=0, skipped=2.""" + raw = json.dumps({ + "modules": [ + {"name": "nginx", "type": "infra", "path": "/etc/nginx"}, + {"name": "postgres", "type": "infra", "path": "/var/lib/postgresql"}, + ], + "decisions": [], + }) + result = {"raw_output": raw} + # First call — adds + _save_sysadmin_output(conn, "vdol", "VDOL-001", result) + # Second call — all duplicates + counts = _save_sysadmin_output(conn, "vdol", "VDOL-001", result) + assert counts["modules_added"] == 0 + assert counts["modules_skipped"] == 2 + + def test_empty_output_returns_zeros(self, conn): + """_save_sysadmin_output с не-JSON строкой возвращает нули.""" + counts = _save_sysadmin_output(conn, "vdol", "VDOL-001", + {"raw_output": "Agent completed the task."}) + assert counts == { + "decisions_added": 0, "decisions_skipped": 0, + "modules_added": 0, "modules_skipped": 0, + } + + def test_decisions_added_and_skipped(self, conn): + """_save_sysadmin_output дедуплицирует decisions через add_decision_if_new.""" + raw = json.dumps({ + "modules": [], + "decisions": [ + {"type": "convention", "title": "Use WAL mode", + "description": "PRAGMA journal_mode=WAL for SQLite"}, + ], + }) + result = {"raw_output": raw} + counts1 = _save_sysadmin_output(conn, "vdol", "VDOL-001", result) + assert counts1["decisions_added"] == 1 + assert counts1["decisions_skipped"] == 0 + + counts2 = _save_sysadmin_output(conn, "vdol", "VDOL-001", result) + assert counts2["decisions_added"] == 0 + assert counts2["decisions_skipped"] == 1 + + +# --------------------------------------------------------------------------- +# check_claude_auth +# --------------------------------------------------------------------------- + +class TestCheckClaudeAuth: + """Tests for check_claude_auth() — Claude CLI login healthcheck.""" + + @patch("agents.runner.subprocess.run") + def test_ok_when_returncode_zero(self, mock_run): + """Не бросает исключение при returncode=0 и корректном JSON.""" + mock = MagicMock() + mock.stdout = json.dumps({"result": "ok"}) + mock.stderr = "" + mock.returncode = 0 + mock_run.return_value = mock + + check_claude_auth() # должна вернуть None без исключений + + @patch("agents.runner.subprocess.run") + def test_not_logged_in_via_string_in_stdout(self, mock_run): + """Бросает ClaudeAuthError при 'Not logged in' в stdout.""" + mock = MagicMock() + mock.stdout = "Not logged in" + mock.stderr = "" + mock.returncode = 1 + mock_run.return_value = mock + + with pytest.raises(ClaudeAuthError) as exc_info: + check_claude_auth() + assert "login" in str(exc_info.value).lower() + + @patch("agents.runner.subprocess.run") + def test_not_logged_in_case_insensitive(self, mock_run): + """Бросает ClaudeAuthError при 'not logged in' в любом регистре.""" + mock = MagicMock() + mock.stdout = "" + mock.stderr = "Error: NOT LOGGED IN to Claude" + mock.returncode = 1 + mock_run.return_value = mock + + with pytest.raises(ClaudeAuthError): + check_claude_auth() + + @patch("agents.runner.subprocess.run") + def test_not_logged_in_via_string_in_stderr(self, mock_run): + """Бросает ClaudeAuthError при 'Not logged in' в stderr.""" + mock = MagicMock() + mock.stdout = "" + mock.stderr = "Error: Not logged in to Claude" + mock.returncode = 1 + mock_run.return_value = mock + + with pytest.raises(ClaudeAuthError): + check_claude_auth() + + @patch("agents.runner.subprocess.run") + def test_not_logged_in_via_nonzero_returncode(self, mock_run): + """Бросает ClaudeAuthError при ненулевом returncode (без 'Not logged in' текста).""" + mock = MagicMock() + mock.stdout = "" + mock.stderr = "Some other error" + mock.returncode = 1 + mock_run.return_value = mock + + with pytest.raises(ClaudeAuthError): + check_claude_auth() + + @patch("agents.runner.subprocess.run") + def test_not_logged_in_via_is_error_in_json(self, mock_run): + """Бросает ClaudeAuthError при is_error=true в JSON даже с returncode=0.""" + mock = MagicMock() + mock.stdout = json.dumps({"is_error": True, "result": "authentication required"}) + mock.stderr = "" + mock.returncode = 0 + mock_run.return_value = mock + + with pytest.raises(ClaudeAuthError): + check_claude_auth() + + @patch("agents.runner.subprocess.run", side_effect=FileNotFoundError) + def test_raises_when_cli_not_found(self, mock_run): + """При FileNotFoundError бросает ClaudeAuthError с понятным сообщением.""" + with pytest.raises(ClaudeAuthError) as exc_info: + check_claude_auth() + assert "PATH" in str(exc_info.value) or "not found" in str(exc_info.value).lower() + + @patch("agents.runner.subprocess.run", side_effect=subprocess.TimeoutExpired(cmd="claude", timeout=10)) + def test_ok_when_timeout(self, mock_run): + """При TimeoutExpired не бросает исключение (не блокируем на timeout).""" + check_claude_auth() # должна вернуть None без исключений diff --git a/tests/test_telegram.py b/tests/test_telegram.py new file mode 100644 index 0000000..a3110de --- /dev/null +++ b/tests/test_telegram.py @@ -0,0 +1,304 @@ +""" +Tests for core/telegram.py — send_telegram_escalation — KIN-BIZ-001. + +Covers: + - Correct Telegram API call parameters (token, chat_id, task_id, agent_role, reason) + - Graceful failure when Telegram API is unavailable (no exceptions raised) + - telegram_sent flag written to DB after successful send (mark_telegram_sent) +""" + +import json +import urllib.error +from unittest.mock import MagicMock, patch + +import pytest + +from core import models +from core.db import init_db +from core.telegram import send_telegram_escalation + + +# --------------------------------------------------------------------------- +# Fixtures +# --------------------------------------------------------------------------- + +@pytest.fixture +def db_conn(): + """Fresh in-memory DB for each test.""" + conn = init_db(db_path=":memory:") + yield conn + conn.close() + + +@pytest.fixture +def tg_env(monkeypatch): + """Inject Telegram credentials via env vars (bypass secrets file). + + Also stubs _load_kin_config so the secrets file doesn't override env vars. + """ + monkeypatch.setenv("KIN_TG_BOT_TOKEN", "test-token-abc123") + monkeypatch.setenv("KIN_TG_CHAT_ID", "99887766") + monkeypatch.setattr("core.telegram._load_kin_config", lambda: {}) + + +@pytest.fixture +def mock_urlopen_ok(): + """Mock urllib.request.urlopen to return HTTP 200.""" + mock_resp = MagicMock() + mock_resp.status = 200 + mock_resp.__enter__ = lambda s: s + mock_resp.__exit__ = MagicMock(return_value=False) + with patch("urllib.request.urlopen", return_value=mock_resp) as m: + yield m + + +# --------------------------------------------------------------------------- +# Unit tests: send_telegram_escalation — correct API call parameters +# --------------------------------------------------------------------------- + +def test_send_telegram_escalation_url_contains_bot_token(tg_env, mock_urlopen_ok): + """Запрос уходит на URL с правильным bot token.""" + send_telegram_escalation( + task_id="KIN-001", + project_name="Test Project", + agent_role="backend_dev", + reason="Cannot access DB", + pipeline_step="2", + ) + req = mock_urlopen_ok.call_args[0][0] + assert "test-token-abc123" in req.full_url + assert "sendMessage" in req.full_url + + +def test_send_telegram_escalation_sends_to_correct_chat_id(tg_env, mock_urlopen_ok): + """В теле POST-запроса содержится правильный chat_id.""" + send_telegram_escalation( + task_id="KIN-001", + project_name="Test", + agent_role="pm", + reason="Blocked", + pipeline_step="1", + ) + req = mock_urlopen_ok.call_args[0][0] + body = json.loads(req.data.decode()) + assert body["chat_id"] == "99887766" + + +def test_send_telegram_escalation_includes_task_id_in_message(tg_env, mock_urlopen_ok): + """task_id присутствует в тексте сообщения.""" + send_telegram_escalation( + task_id="KIN-TEST-007", + project_name="My Project", + agent_role="frontend_dev", + reason="No API access", + pipeline_step="3", + ) + req = mock_urlopen_ok.call_args[0][0] + body = json.loads(req.data.decode()) + assert "KIN-TEST-007" in body["text"] + + +def test_send_telegram_escalation_includes_agent_role_in_message(tg_env, mock_urlopen_ok): + """agent_role присутствует в тексте сообщения.""" + send_telegram_escalation( + task_id="KIN-001", + project_name="Test", + agent_role="sysadmin", + reason="SSH timeout", + pipeline_step="1", + ) + req = mock_urlopen_ok.call_args[0][0] + body = json.loads(req.data.decode()) + assert "sysadmin" in body["text"] + + +def test_send_telegram_escalation_includes_reason_in_message(tg_env, mock_urlopen_ok): + """reason присутствует в тексте сообщения.""" + send_telegram_escalation( + task_id="KIN-001", + project_name="Test", + agent_role="pm", + reason="Access denied to external API", + pipeline_step="2", + ) + req = mock_urlopen_ok.call_args[0][0] + body = json.loads(req.data.decode()) + assert "Access denied to external API" in body["text"] + + +def test_send_telegram_escalation_uses_post_method(tg_env, mock_urlopen_ok): + """Запрос отправляется методом POST.""" + send_telegram_escalation( + task_id="KIN-001", + project_name="Test", + agent_role="pm", + reason="Reason", + pipeline_step="1", + ) + req = mock_urlopen_ok.call_args[0][0] + assert req.method == "POST" + + +def test_send_telegram_escalation_returns_true_on_success(tg_env, mock_urlopen_ok): + """Функция возвращает True при успешном ответе HTTP 200.""" + result = send_telegram_escalation( + task_id="KIN-001", + project_name="Test", + agent_role="pm", + reason="Reason", + pipeline_step="1", + ) + assert result is True + + +def test_send_telegram_escalation_includes_pipeline_step_in_message(tg_env, mock_urlopen_ok): + """pipeline_step включён в текст сообщения.""" + send_telegram_escalation( + task_id="KIN-001", + project_name="Test", + agent_role="debugger", + reason="Reason", + pipeline_step="5", + ) + req = mock_urlopen_ok.call_args[0][0] + body = json.loads(req.data.decode()) + assert "5" in body["text"] + + +# --------------------------------------------------------------------------- +# Graceful failure tests — Telegram API unavailable +# --------------------------------------------------------------------------- + +def test_send_telegram_escalation_returns_false_on_url_error(tg_env): + """Функция возвращает False (не бросает) при urllib.error.URLError.""" + with patch("urllib.request.urlopen", side_effect=urllib.error.URLError("Connection refused")): + result = send_telegram_escalation( + task_id="KIN-001", + project_name="Test", + agent_role="pm", + reason="Reason", + pipeline_step="1", + ) + assert result is False + + +def test_send_telegram_escalation_returns_false_on_unexpected_exception(tg_env): + """Функция возвращает False (не бросает) при неожиданной ошибке.""" + with patch("urllib.request.urlopen", side_effect=RuntimeError("Unexpected!")): + result = send_telegram_escalation( + task_id="KIN-001", + project_name="Test", + agent_role="pm", + reason="Reason", + pipeline_step="1", + ) + assert result is False + + +def test_send_telegram_escalation_never_raises_exception(tg_env): + """Функция никогда не бросает исключение — пайплайн не должен падать.""" + with patch("urllib.request.urlopen", side_effect=Exception("Anything at all")): + try: + result = send_telegram_escalation( + task_id="KIN-001", + project_name="Test", + agent_role="pm", + reason="Reason", + pipeline_step="1", + ) + except Exception as exc: + pytest.fail(f"send_telegram_escalation raised: {exc!r}") + assert result is False + + +def test_send_telegram_escalation_returns_false_on_http_non_200(tg_env): + """Функция возвращает False при HTTP ответе != 200.""" + mock_resp = MagicMock() + mock_resp.status = 403 + mock_resp.__enter__ = lambda s: s + mock_resp.__exit__ = MagicMock(return_value=False) + with patch("urllib.request.urlopen", return_value=mock_resp): + result = send_telegram_escalation( + task_id="KIN-001", + project_name="Test", + agent_role="pm", + reason="Reason", + pipeline_step="1", + ) + assert result is False + + +# --------------------------------------------------------------------------- +# Missing credentials tests +# --------------------------------------------------------------------------- + +def test_send_telegram_escalation_returns_false_when_no_bot_token(monkeypatch): + """Без bot token функция возвращает False, не падает.""" + monkeypatch.delenv("KIN_TG_BOT_TOKEN", raising=False) + monkeypatch.setenv("KIN_TG_CHAT_ID", "12345") + with patch("core.telegram._load_kin_config", return_value={}): + result = send_telegram_escalation( + task_id="KIN-001", + project_name="Test", + agent_role="pm", + reason="Reason", + pipeline_step="1", + ) + assert result is False + + +def test_send_telegram_escalation_returns_false_when_no_chat_id(monkeypatch): + """Без KIN_TG_CHAT_ID функция возвращает False, не падает.""" + monkeypatch.setenv("KIN_TG_BOT_TOKEN", "some-token") + monkeypatch.delenv("KIN_TG_CHAT_ID", raising=False) + with patch("core.telegram._load_kin_config", return_value={"tg_bot": "some-token"}): + result = send_telegram_escalation( + task_id="KIN-001", + project_name="Test", + agent_role="pm", + reason="Reason", + pipeline_step="1", + ) + assert result is False + + +# --------------------------------------------------------------------------- +# DB tests: mark_telegram_sent +# --------------------------------------------------------------------------- + +def test_mark_telegram_sent_sets_flag_in_db(db_conn): + """mark_telegram_sent() устанавливает telegram_sent=1 в БД.""" + models.create_project(db_conn, "proj1", "Project 1", "/proj1") + models.create_task(db_conn, "PROJ1-001", "proj1", "Task 1") + + task = models.get_task(db_conn, "PROJ1-001") + assert not bool(task.get("telegram_sent")) + + models.mark_telegram_sent(db_conn, "PROJ1-001") + + task = models.get_task(db_conn, "PROJ1-001") + assert bool(task["telegram_sent"]) is True + + +def test_mark_telegram_sent_does_not_affect_other_tasks(db_conn): + """mark_telegram_sent() обновляет только указанную задачу.""" + models.create_project(db_conn, "proj1", "Project 1", "/proj1") + models.create_task(db_conn, "PROJ1-001", "proj1", "Task 1") + models.create_task(db_conn, "PROJ1-002", "proj1", "Task 2") + + models.mark_telegram_sent(db_conn, "PROJ1-001") + + task2 = models.get_task(db_conn, "PROJ1-002") + assert not bool(task2.get("telegram_sent")) + + +def test_mark_telegram_sent_idempotent(db_conn): + """Повторный вызов mark_telegram_sent() не вызывает ошибок.""" + models.create_project(db_conn, "proj1", "Project 1", "/proj1") + models.create_task(db_conn, "PROJ1-001", "proj1", "Task 1") + + models.mark_telegram_sent(db_conn, "PROJ1-001") + models.mark_telegram_sent(db_conn, "PROJ1-001") # second call + + task = models.get_task(db_conn, "PROJ1-001") + assert bool(task["telegram_sent"]) is True diff --git a/web/api.py b/web/api.py index 367063c..62382bb 100644 --- a/web/api.py +++ b/web/api.py @@ -3,6 +3,9 @@ Kin Web API — FastAPI backend reading ~/.kin/kin.db via core.models. Run: uvicorn web.api:app --reload --port 8420 """ +import logging +import mimetypes +import shutil import subprocess import sys from pathlib import Path @@ -10,14 +13,15 @@ from pathlib import Path # Ensure project root on sys.path sys.path.insert(0, str(Path(__file__).parent.parent)) -from fastapi import FastAPI, HTTPException, Query +from fastapi import FastAPI, File, HTTPException, Query, UploadFile from fastapi.middleware.cors import CORSMiddleware -from fastapi.responses import JSONResponse, FileResponse +from fastapi.responses import JSONResponse, FileResponse, Response from fastapi.staticfiles import StaticFiles -from pydantic import BaseModel +from pydantic import BaseModel, model_validator from core.db import init_db from core import models +from core.models import VALID_COMPLETION_MODES, TASK_CATEGORIES from agents.bootstrap import ( detect_tech_stack, detect_modules, extract_decisions_from_claude_md, find_vault_root, scan_obsidian, save_to_db, @@ -25,6 +29,62 @@ from agents.bootstrap import ( DB_PATH = Path.home() / ".kin" / "kin.db" +_logger = logging.getLogger("kin") + +# --------------------------------------------------------------------------- +# Startup: verify claude CLI is available in PATH +# --------------------------------------------------------------------------- + +def _check_claude_available() -> None: + """Warn at startup if claude CLI cannot be found in PATH. + + launchctl daemons run with a stripped environment and may not see + /opt/homebrew/bin where claude is typically installed. + See Decision #28. + """ + from agents.runner import _build_claude_env # avoid circular import at module level + env = _build_claude_env() + claude_path = shutil.which("claude", path=env["PATH"]) + if claude_path: + _logger.info("claude CLI found: %s", claude_path) + else: + _logger.warning( + "WARNING: claude CLI not found in PATH (%s). " + "Agent pipelines will fail with returncode 127. " + "Fix: add /opt/homebrew/bin to EnvironmentVariables.PATH in " + "~/Library/LaunchAgents/com.kin.api.plist and reload with: " + "launchctl unload ~/Library/LaunchAgents/com.kin.api.plist && " + "launchctl load ~/Library/LaunchAgents/com.kin.api.plist", + env.get("PATH", ""), + ) + + +def _check_git_available() -> None: + """Warn at startup if git cannot be found in PATH. + + launchctl daemons run with a stripped environment and may not see + git in the standard directories. See Decision #28. + """ + from agents.runner import _build_claude_env # avoid circular import at module level + env = _build_claude_env() + git_path = shutil.which("git", path=env["PATH"]) + if git_path: + _logger.info("git found: %s", git_path) + else: + _logger.warning( + "WARNING: git not found in PATH (%s). " + "Autocommit will fail silently. " + "Fix: add git directory to EnvironmentVariables.PATH in " + "~/Library/LaunchAgents/com.kin.api.plist and reload with: " + "launchctl unload ~/Library/LaunchAgents/com.kin.api.plist && " + "launchctl load ~/Library/LaunchAgents/com.kin.api.plist", + env.get("PATH", ""), + ) + + +_check_claude_available() +_check_git_available() + app = FastAPI(title="Kin API", version="0.1.0") app.add_middleware( @@ -39,6 +99,32 @@ def get_conn(): return init_db(DB_PATH) +def _launch_pipeline_subprocess(task_id: str) -> None: + """Spawn `cli.main run {task_id}` in a detached background subprocess. + + Used by auto-trigger (label 'auto') and revise endpoint. + Never raises — subprocess errors are logged only. + """ + import os + kin_root = Path(__file__).parent.parent + cmd = [sys.executable, "-m", "cli.main", "--db", str(DB_PATH), "run", task_id] + cmd.append("--allow-write") + env = os.environ.copy() + env["KIN_NONINTERACTIVE"] = "1" + try: + proc = subprocess.Popen( + cmd, + cwd=str(kin_root), + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + stdin=subprocess.DEVNULL, + env=env, + ) + _logger.info("Auto-triggered pipeline for %s, pid=%d", task_id, proc.pid) + except Exception as exc: + _logger.warning("Failed to launch pipeline for %s: %s", task_id, exc) + + # --------------------------------------------------------------------------- # Projects # --------------------------------------------------------------------------- @@ -53,6 +139,44 @@ def list_projects(status: str | None = None): return summary +class NewProjectCreate(BaseModel): + id: str + name: str + path: str | None = None + description: str + roles: list[str] + tech_stack: list[str] | None = None + priority: int = 5 + language: str = "ru" + + +@app.post("/api/projects/new") +def new_project_with_phases(body: NewProjectCreate): + """Create project + sequential research phases (KIN-059).""" + from core.phases import create_project_with_phases, validate_roles + clean_roles = validate_roles(body.roles) + if not clean_roles: + raise HTTPException(400, "At least one research role must be selected (excluding architect)") + conn = get_conn() + if models.get_project(conn, body.id): + conn.close() + raise HTTPException(409, f"Project '{body.id}' already exists") + try: + result = create_project_with_phases( + conn, body.id, body.name, body.path, + description=body.description, + selected_roles=clean_roles, + tech_stack=body.tech_stack, + priority=body.priority, + language=body.language, + ) + except ValueError as e: + conn.close() + raise HTTPException(400, str(e)) + conn.close() + return result + + @app.get("/api/projects/{project_id}") def get_project(project_id: str): conn = get_conn() @@ -67,36 +191,168 @@ def get_project(project_id: str): return {**p, "tasks": tasks, "modules": mods, "decisions": decisions} +VALID_PROJECT_TYPES = {"development", "operations", "research"} + + class ProjectCreate(BaseModel): id: str name: str - path: str + path: str | None = None tech_stack: list[str] | None = None status: str = "active" priority: int = 5 + project_type: str = "development" + ssh_host: str | None = None + ssh_user: str | None = None + ssh_key_path: str | None = None + ssh_proxy_jump: str | None = None + + @model_validator(mode="after") + def validate_fields(self) -> "ProjectCreate": + if self.project_type == "operations" and not self.ssh_host: + raise ValueError("ssh_host is required for operations projects") + if self.project_type != "operations" and not self.path: + raise ValueError("path is required for non-operations projects") + return self class ProjectPatch(BaseModel): - execution_mode: str + execution_mode: str | None = None + autocommit_enabled: bool | None = None + auto_test_enabled: bool | None = None + obsidian_vault_path: str | None = None + deploy_command: str | None = None + project_type: str | None = None + ssh_host: str | None = None + ssh_user: str | None = None + ssh_key_path: str | None = None + ssh_proxy_jump: str | None = None @app.patch("/api/projects/{project_id}") def patch_project(project_id: str, body: ProjectPatch): - if body.execution_mode not in VALID_EXECUTION_MODES: + has_any = any([ + body.execution_mode, body.autocommit_enabled is not None, + body.auto_test_enabled is not None, + body.obsidian_vault_path, body.deploy_command is not None, + body.project_type, body.ssh_host is not None, + body.ssh_user is not None, body.ssh_key_path is not None, + body.ssh_proxy_jump is not None, + ]) + if not has_any: + raise HTTPException(400, "Nothing to update.") + if body.execution_mode is not None and body.execution_mode not in VALID_EXECUTION_MODES: raise HTTPException(400, f"Invalid execution_mode '{body.execution_mode}'. Must be one of: {', '.join(VALID_EXECUTION_MODES)}") + if body.project_type is not None and body.project_type not in VALID_PROJECT_TYPES: + raise HTTPException(400, f"Invalid project_type '{body.project_type}'. Must be one of: {', '.join(VALID_PROJECT_TYPES)}") conn = get_conn() p = models.get_project(conn, project_id) if not p: conn.close() raise HTTPException(404, f"Project '{project_id}' not found") - models.update_project(conn, project_id, execution_mode=body.execution_mode) + fields = {} + if body.execution_mode is not None: + fields["execution_mode"] = body.execution_mode + if body.autocommit_enabled is not None: + fields["autocommit_enabled"] = int(body.autocommit_enabled) + if body.auto_test_enabled is not None: + fields["auto_test_enabled"] = int(body.auto_test_enabled) + if body.obsidian_vault_path is not None: + fields["obsidian_vault_path"] = body.obsidian_vault_path + if body.deploy_command is not None: + # Empty string = sentinel for clearing (decision #68) + fields["deploy_command"] = None if body.deploy_command == "" else body.deploy_command + if body.project_type is not None: + fields["project_type"] = body.project_type + if body.ssh_host is not None: + fields["ssh_host"] = body.ssh_host + if body.ssh_user is not None: + fields["ssh_user"] = body.ssh_user + if body.ssh_key_path is not None: + fields["ssh_key_path"] = body.ssh_key_path + if body.ssh_proxy_jump is not None: + fields["ssh_proxy_jump"] = body.ssh_proxy_jump + models.update_project(conn, project_id, **fields) p = models.get_project(conn, project_id) conn.close() return p +@app.delete("/api/projects/{project_id}", status_code=204) +def delete_project(project_id: str): + """Delete a project and all its related data (tasks, decisions, phases, logs).""" + conn = get_conn() + p = models.get_project(conn, project_id) + if not p: + conn.close() + raise HTTPException(404, f"Project '{project_id}' not found") + models.delete_project(conn, project_id) + conn.close() + return Response(status_code=204) + + +@app.post("/api/projects/{project_id}/sync/obsidian") +def sync_obsidian_endpoint(project_id: str): + """Запускает двусторонний Obsidian sync для проекта.""" + from core.obsidian_sync import sync_obsidian + + conn = get_conn() + p = models.get_project(conn, project_id) + if not p: + conn.close() + raise HTTPException(404, f"Project '{project_id}' not found") + if not p.get("obsidian_vault_path"): + conn.close() + raise HTTPException(400, "obsidian_vault_path not set for this project") + result = sync_obsidian(conn, project_id) + conn.close() + return result + + +@app.post("/api/projects/{project_id}/deploy") +def deploy_project(project_id: str): + """Execute deploy_command for a project. Returns stdout/stderr/exit_code. + + # WARNING: shell=True — deploy_command is admin-only, set in Settings by the project owner. + """ + import time + conn = get_conn() + p = models.get_project(conn, project_id) + conn.close() + if not p: + raise HTTPException(404, f"Project '{project_id}' not found") + deploy_command = p.get("deploy_command") + if not deploy_command: + raise HTTPException(400, "deploy_command not set for this project") + cwd = p.get("path") or None + start = time.monotonic() + try: + result = subprocess.run( + deploy_command, + shell=True, # WARNING: shell=True — command is admin-only + cwd=cwd, + capture_output=True, + text=True, + timeout=60, + ) + except subprocess.TimeoutExpired: + raise HTTPException(504, "Deploy command timed out after 60 seconds") + except Exception as e: + raise HTTPException(500, f"Deploy failed: {e}") + duration = round(time.monotonic() - start, 2) + return { + "success": result.returncode == 0, + "exit_code": result.returncode, + "stdout": result.stdout, + "stderr": result.stderr, + "duration_seconds": duration, + } + + @app.post("/api/projects") def create_project(body: ProjectCreate): + if body.project_type not in VALID_PROJECT_TYPES: + raise HTTPException(400, f"Invalid project_type '{body.project_type}'. Must be one of: {', '.join(VALID_PROJECT_TYPES)}") conn = get_conn() if models.get_project(conn, body.id): conn.close() @@ -104,11 +360,182 @@ def create_project(body: ProjectCreate): p = models.create_project( conn, body.id, body.name, body.path, tech_stack=body.tech_stack, status=body.status, priority=body.priority, + project_type=body.project_type, + ssh_host=body.ssh_host, + ssh_user=body.ssh_user, + ssh_key_path=body.ssh_key_path, + ssh_proxy_jump=body.ssh_proxy_jump, ) conn.close() return p +# --------------------------------------------------------------------------- +# Phases (KIN-059) +# --------------------------------------------------------------------------- + +@app.get("/api/projects/{project_id}/phases") +def get_project_phases(project_id: str): + """List research phases for a project, with task data joined.""" + conn = get_conn() + p = models.get_project(conn, project_id) + if not p: + conn.close() + raise HTTPException(404, f"Project '{project_id}' not found") + phases = models.list_phases(conn, project_id) + result = [] + for phase in phases: + task = models.get_task(conn, phase["task_id"]) if phase.get("task_id") else None + result.append({**phase, "task": task}) + conn.close() + return result + + +class PhaseApprove(BaseModel): + comment: str | None = None + + +class PhaseReject(BaseModel): + reason: str + + +class PhaseRevise(BaseModel): + comment: str + + +@app.post("/api/phases/{phase_id}/approve") +def approve_phase(phase_id: int, body: PhaseApprove | None = None): + """Approve a research phase and activate the next one.""" + from core.phases import approve_phase as _approve + conn = get_conn() + phase = models.get_phase(conn, phase_id) + if not phase: + conn.close() + raise HTTPException(404, f"Phase {phase_id} not found") + try: + result = _approve(conn, phase_id) + except ValueError as e: + conn.close() + raise HTTPException(400, str(e)) + # Mark the phase's task as done for consistency + if phase.get("task_id"): + models.update_task(conn, phase["task_id"], status="done") + conn.close() + return result + + +@app.post("/api/phases/{phase_id}/reject") +def reject_phase(phase_id: int, body: PhaseReject): + """Reject a research phase.""" + from core.phases import reject_phase as _reject + conn = get_conn() + phase = models.get_phase(conn, phase_id) + if not phase: + conn.close() + raise HTTPException(404, f"Phase {phase_id} not found") + try: + result = _reject(conn, phase_id, body.reason) + except ValueError as e: + conn.close() + raise HTTPException(400, str(e)) + conn.close() + return result + + +@app.post("/api/phases/{phase_id}/revise") +def revise_phase(phase_id: int, body: PhaseRevise): + """Request revision for a research phase.""" + from core.phases import revise_phase as _revise + if not body.comment.strip(): + raise HTTPException(400, "comment is required") + conn = get_conn() + phase = models.get_phase(conn, phase_id) + if not phase: + conn.close() + raise HTTPException(404, f"Phase {phase_id} not found") + try: + result = _revise(conn, phase_id, body.comment) + except ValueError as e: + conn.close() + raise HTTPException(400, str(e)) + conn.close() + return result + + +@app.post("/api/projects/{project_id}/phases/start") +def start_project_phase(project_id: str): + """Launch agent for the current active/revising phase in background. Returns 202. + + Finds the first phase with status 'active' or 'revising', sets its task to + in_progress, spawns a background subprocess (same as /api/tasks/{id}/run), + and returns immediately so the HTTP request doesn't block on agent execution. + """ + from agents.runner import check_claude_auth, ClaudeAuthError + try: + check_claude_auth() + except ClaudeAuthError: + raise HTTPException(503, detail={ + "error": "claude_auth_required", + "message": "Claude CLI requires login", + "instructions": "Run: claude login", + }) + + conn = get_conn() + p = models.get_project(conn, project_id) + if not p: + conn.close() + raise HTTPException(404, f"Project '{project_id}' not found") + + phases = models.list_phases(conn, project_id) + active_phase = next( + (ph for ph in phases if ph["status"] in ("active", "revising")), None + ) + if not active_phase: + conn.close() + raise HTTPException(404, f"No active or revising phase for project '{project_id}'") + + task_id = active_phase.get("task_id") + if not task_id: + conn.close() + raise HTTPException(400, f"Phase {active_phase['id']} has no task assigned") + + t = models.get_task(conn, task_id) + if not t: + conn.close() + raise HTTPException(404, f"Task '{task_id}' not found") + + models.update_task(conn, task_id, status="in_progress") + conn.close() + + kin_root = Path(__file__).parent.parent + cmd = [sys.executable, "-m", "cli.main", "--db", str(DB_PATH), + "run", task_id] + cmd.append("--allow-write") # always required: subprocess runs non-interactively (stdin=DEVNULL) + + import os + env = os.environ.copy() + env["KIN_NONINTERACTIVE"] = "1" + + try: + proc = subprocess.Popen( + cmd, + cwd=str(kin_root), + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + stdin=subprocess.DEVNULL, + env=env, + ) + _logger.info("Phase agent started for task %s (phase %d), pid=%d", + task_id, active_phase["id"], proc.pid) + except Exception as e: + raise HTTPException(500, f"Failed to start phase agent: {e}") + + return JSONResponse( + {"status": "started", "phase_id": active_phase["id"], "task_id": task_id}, + status_code=202, + ) + + # --------------------------------------------------------------------------- # Tasks # --------------------------------------------------------------------------- @@ -128,6 +555,9 @@ class TaskCreate(BaseModel): title: str priority: int = 5 route_type: str | None = None + category: str | None = None + acceptance_criteria: str | None = None + labels: list[str] | None = None @app.post("/api/tasks") @@ -137,32 +567,42 @@ def create_task(body: TaskCreate): if not p: conn.close() raise HTTPException(404, f"Project '{body.project_id}' not found") - # Auto-generate task ID - existing = models.list_tasks(conn, project_id=body.project_id) - prefix = body.project_id.upper() - max_num = 0 - for t in existing: - if t["id"].startswith(prefix + "-"): - try: - num = int(t["id"].split("-", 1)[1]) - max_num = max(max_num, num) - except ValueError: - pass - task_id = f"{prefix}-{max_num + 1:03d}" + category = None + if body.category: + category = body.category.upper() + if category not in TASK_CATEGORIES: + conn.close() + raise HTTPException(400, f"Invalid category '{category}'. Must be one of: {', '.join(TASK_CATEGORIES)}") + task_id = models.next_task_id(conn, body.project_id, category=category) brief = {"route_type": body.route_type} if body.route_type else None t = models.create_task(conn, task_id, body.project_id, body.title, - priority=body.priority, brief=brief) + priority=body.priority, brief=brief, category=category, + acceptance_criteria=body.acceptance_criteria, + labels=body.labels) conn.close() + + # Auto-trigger: if task has 'auto' label, launch pipeline in background + if body.labels and "auto" in body.labels: + _launch_pipeline_subprocess(task_id) + return t +VALID_ROUTE_TYPES = {"debug", "feature", "refactor", "hotfix"} + + class TaskPatch(BaseModel): status: str | None = None execution_mode: str | None = None + priority: int | None = None + route_type: str | None = None + title: str | None = None + brief_text: str | None = None + acceptance_criteria: str | None = None VALID_STATUSES = set(models.VALID_TASK_STATUSES) -VALID_EXECUTION_MODES = {"auto", "review"} +VALID_EXECUTION_MODES = VALID_COMPLETION_MODES @app.patch("/api/tasks/{task_id}") @@ -171,8 +611,15 @@ def patch_task(task_id: str, body: TaskPatch): raise HTTPException(400, f"Invalid status '{body.status}'. Must be one of: {', '.join(VALID_STATUSES)}") if body.execution_mode is not None and body.execution_mode not in VALID_EXECUTION_MODES: raise HTTPException(400, f"Invalid execution_mode '{body.execution_mode}'. Must be one of: {', '.join(VALID_EXECUTION_MODES)}") - if body.status is None and body.execution_mode is None: - raise HTTPException(400, "Nothing to update. Provide status or execution_mode.") + if body.priority is not None and not (1 <= body.priority <= 10): + raise HTTPException(400, "priority must be between 1 and 10") + if body.route_type is not None and body.route_type and body.route_type not in VALID_ROUTE_TYPES: + raise HTTPException(400, f"Invalid route_type '{body.route_type}'. Must be one of: {', '.join(sorted(VALID_ROUTE_TYPES))} or empty string to clear") + if body.title is not None and not body.title.strip(): + raise HTTPException(400, "title must not be empty") + all_none = all(v is None for v in [body.status, body.execution_mode, body.priority, body.route_type, body.title, body.brief_text, body.acceptance_criteria]) + if all_none: + raise HTTPException(400, "Nothing to update.") conn = get_conn() t = models.get_task(conn, task_id) if not t: @@ -183,6 +630,24 @@ def patch_task(task_id: str, body: TaskPatch): fields["status"] = body.status if body.execution_mode is not None: fields["execution_mode"] = body.execution_mode + if body.priority is not None: + fields["priority"] = body.priority + if body.title is not None: + fields["title"] = body.title.strip() + if body.route_type is not None or body.brief_text is not None: + current_brief = t.get("brief") or {} + if isinstance(current_brief, str): + current_brief = {"text": current_brief} + if body.route_type is not None: + if body.route_type: + current_brief = {**current_brief, "route_type": body.route_type} + else: + current_brief = {k: v for k, v in current_brief.items() if k != "route_type"} + if body.brief_text is not None: + current_brief = {**current_brief, "text": body.brief_text} + fields["brief"] = current_brief if current_brief else None + if body.acceptance_criteria is not None: + fields["acceptance_criteria"] = body.acceptance_criteria models.update_task(conn, task_id, **fields) t = models.get_task(conn, task_id) conn.close() @@ -226,8 +691,10 @@ def get_task_full(task_id: str): decisions = models.get_decisions(conn, t["project_id"]) # Filter to decisions linked to this task task_decisions = [d for d in decisions if d.get("task_id") == task_id] + p = models.get_project(conn, t["project_id"]) + project_deploy_command = p.get("deploy_command") if p else None conn.close() - return {**t, "pipeline_steps": steps, "related_decisions": task_decisions} + return {**t, "pipeline_steps": steps, "related_decisions": task_decisions, "project_deploy_command": project_deploy_command} class TaskApprove(BaseModel): @@ -255,6 +722,21 @@ def approve_task(task_id: str, body: TaskApprove | None = None): event="task_done", task_modules=task_modules) except Exception: pass + + # Advance phase state machine if this task belongs to an active phase + phase_result = None + phase_row = conn.execute( + "SELECT * FROM project_phases WHERE task_id = ?", (task_id,) + ).fetchone() + if phase_row: + phase = dict(phase_row) + if phase.get("status") == "active": + from core.phases import approve_phase as _approve_phase + try: + phase_result = _approve_phase(conn, phase["id"]) + except ValueError: + pass + decision = None if body and body.decision_title: decision = models.add_decision( @@ -275,6 +757,7 @@ def approve_task(task_id: str, body: TaskApprove | None = None): "followup_tasks": followup_tasks, "needs_decision": len(pending_actions) > 0, "pending_actions": pending_actions, + "phase": phase_result, } @@ -317,6 +800,68 @@ def reject_task(task_id: str, body: TaskReject): return {"status": "pending", "reason": body.reason} +_MAX_REVISE_COUNT = 5 + + +class TaskRevise(BaseModel): + comment: str + steps: list[dict] | None = None # override pipeline steps (optional) + target_role: str | None = None # if set, re-run only [target_role, reviewer] instead of full pipeline + + +@app.post("/api/tasks/{task_id}/revise") +def revise_task(task_id: str, body: TaskRevise): + """Revise a task: update comment, increment revise_count, and re-run pipeline.""" + if not body.comment.strip(): + raise HTTPException(400, "comment must not be empty") + + conn = get_conn() + t = models.get_task(conn, task_id) + if not t: + conn.close() + raise HTTPException(404, f"Task '{task_id}' not found") + + revise_count = (t.get("revise_count") or 0) + 1 + if revise_count > _MAX_REVISE_COUNT: + conn.close() + raise HTTPException(400, f"Max revisions ({_MAX_REVISE_COUNT}) reached for this task") + + models.update_task( + conn, task_id, + status="in_progress", + revise_comment=body.comment, + revise_count=revise_count, + revise_target_role=body.target_role, + ) + + # Resolve steps: explicit > target_role shortcut > last pipeline steps + steps = body.steps + if not steps: + if body.target_role: + steps = [{"role": body.target_role}, {"role": "reviewer"}] + else: + row = conn.execute( + "SELECT steps FROM pipelines WHERE task_id = ? ORDER BY id DESC LIMIT 1", + (task_id,), + ).fetchone() + if row: + import json as _json + raw = row["steps"] + steps = _json.loads(raw) if isinstance(raw, str) else raw + + conn.close() + + # Launch pipeline in background subprocess + _launch_pipeline_subprocess(task_id) + + return { + "status": "in_progress", + "comment": body.comment, + "revise_count": revise_count, + "pipeline_steps": steps, + } + + @app.get("/api/tasks/{task_id}/running") def is_task_running(task_id: str): """Check if task has an active (running) pipeline.""" @@ -338,11 +883,24 @@ def is_task_running(task_id: str): @app.post("/api/tasks/{task_id}/run") def run_task(task_id: str): """Launch pipeline for a task in background. Returns 202.""" + from agents.runner import check_claude_auth, ClaudeAuthError + try: + check_claude_auth() + except ClaudeAuthError: + raise HTTPException(503, detail={ + "error": "claude_auth_required", + "message": "Claude CLI requires login", + "instructions": "Run: claude login", + }) + conn = get_conn() t = models.get_task(conn, task_id) if not t: conn.close() raise HTTPException(404, f"Task '{task_id}' not found") + if t.get("status") == "in_progress": + conn.close() + return JSONResponse({"error": "task_already_running"}, status_code=409) # Set task to in_progress immediately so UI updates models.update_task(conn, task_id, status="in_progress") conn.close() @@ -361,6 +919,7 @@ def run_task(task_id: str): cmd, cwd=str(kin_root), stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, stdin=subprocess.DEVNULL, env=env, ) @@ -526,8 +1085,14 @@ def bootstrap(body: BootstrapRequest): if obs["tasks"] or obs["decisions"]: obsidian = obs - save_to_db(conn, body.id, body.name, str(project_path), - tech_stack, modules, decisions, obsidian) + try: + save_to_db(conn, body.id, body.name, str(project_path), + tech_stack, modules, decisions, obsidian) + except Exception as e: + if models.get_project(conn, body.id): + models.delete_project(conn, body.id) + conn.close() + raise HTTPException(500, f"Bootstrap failed: {e}") p = models.get_project(conn, body.id) conn.close() return { @@ -538,6 +1103,559 @@ def bootstrap(body: BootstrapRequest): } +# --------------------------------------------------------------------------- +# Environments (KIN-087) +# --------------------------------------------------------------------------- + +VALID_AUTH_TYPES = {"password", "key", "ssh_key"} + + +class EnvironmentCreate(BaseModel): + name: str + host: str + port: int = 22 + username: str + auth_type: str = "password" + auth_value: str | None = None + is_installed: bool = False + + @model_validator(mode="after") + def validate_env_fields(self) -> "EnvironmentCreate": + if not self.name.strip(): + raise ValueError("name must not be empty") + if not self.host.strip(): + raise ValueError("host must not be empty") + if not self.username.strip(): + raise ValueError("username must not be empty") + if self.auth_type not in VALID_AUTH_TYPES: + raise ValueError(f"auth_type must be one of: {', '.join(VALID_AUTH_TYPES)}") + if not (1 <= self.port <= 65535): + raise ValueError("port must be between 1 and 65535") + return self + + +class EnvironmentPatch(BaseModel): + name: str | None = None + host: str | None = None + port: int | None = None + username: str | None = None + auth_type: str | None = None + auth_value: str | None = None + is_installed: bool | None = None + + +def _trigger_sysadmin_scan(conn, project_id: str, env: dict) -> str: + """Create a sysadmin env-scan task and launch it in background. + + env must be the raw record from get_environment() (contains obfuscated auth_value). + Guard: skips if an active sysadmin task for this environment already exists. + Returns task_id of the created (or existing) task. + """ + env_id = env["id"] + existing = conn.execute( + """SELECT id FROM tasks + WHERE project_id = ? AND assigned_role = 'sysadmin' + AND status NOT IN ('done', 'cancelled') + AND brief LIKE ?""", + (project_id, f'%"env_id": {env_id}%'), + ).fetchone() + if existing: + return existing["id"] + + task_id = models.next_task_id(conn, project_id, category="INFRA") + brief = { + "type": "env_scan", + "env_id": env_id, + "host": env["host"], + "port": env["port"], + "username": env["username"], + "auth_type": env["auth_type"], + # auth_value is decrypted plaintext (get_environment decrypts via _decrypt_auth). + # Stored in tasks.brief — treat as sensitive. + "auth_value_b64": env.get("auth_value"), + "text": ( + f"Провести полный аудит среды '{env['name']}' на сервере {env['host']}.\n\n" + f"Подключение: {env['username']}@{env['host']}:{env['port']} (auth_type={env['auth_type']}).\n\n" + "Задачи:\n" + "1. Проверить git config (user, remote, текущую ветку)\n" + "2. Установленный стек (python/node/java версии, package managers)\n" + "3. Переменные окружения (.env файлы, systemd EnvironmentFile)\n" + "4. Nginx/caddy конфиги (виртуальные хосты, SSL)\n" + "5. Systemd/supervisor сервисы проекта\n" + "6. SSH-ключи (authorized_keys, known_hosts)\n" + "7. Если чего-то не хватает для подключения или аудита — эскалация к человеку." + ), + } + models.create_task( + conn, task_id, project_id, + title=f"[{env['name']}] Env scan: {env['host']}", + assigned_role="sysadmin", + category="INFRA", + brief=brief, + ) + models.update_task(conn, task_id, status="in_progress") + + kin_root = Path(__file__).parent.parent + cmd = [sys.executable, "-m", "cli.main", "--db", str(DB_PATH), "run", task_id] + cmd.append("--allow-write") + import os as _os + env_vars = _os.environ.copy() + env_vars["KIN_NONINTERACTIVE"] = "1" + try: + subprocess.Popen( + cmd, + cwd=str(kin_root), + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + stdin=subprocess.DEVNULL, + env=env_vars, + ) + except Exception as e: + _logger.warning("Failed to start sysadmin scan for %s: %s", task_id, e) + + return task_id + + +@app.get("/api/projects/{project_id}/environments") +def list_environments(project_id: str): + conn = get_conn() + p = models.get_project(conn, project_id) + if not p: + conn.close() + raise HTTPException(404, f"Project '{project_id}' not found") + envs = models.list_environments(conn, project_id) + conn.close() + return envs + + +@app.post("/api/projects/{project_id}/environments", status_code=201) +def create_environment(project_id: str, body: EnvironmentCreate): + conn = get_conn() + p = models.get_project(conn, project_id) + if not p: + conn.close() + raise HTTPException(404, f"Project '{project_id}' not found") + try: + env = models.create_environment( + conn, project_id, + name=body.name, + host=body.host, + port=body.port, + username=body.username, + auth_type=body.auth_type, + auth_value=body.auth_value, + is_installed=body.is_installed, + ) + except Exception as e: + conn.close() + if "UNIQUE constraint" in str(e): + raise HTTPException(409, f"Environment '{body.name}' already exists for this project") + if "KIN_SECRET_KEY" in str(e): + raise HTTPException(503, "Server misconfiguration: KIN_SECRET_KEY is not set. Contact admin.") + if isinstance(e, ModuleNotFoundError) or "cryptography" in str(e) or "No module named" in str(e): + raise HTTPException(503, "Server misconfiguration: cryptography package not installed. Run: python3.11 -m pip install cryptography") + raise HTTPException(500, str(e)) + scan_task_id = None + if body.is_installed: + raw_env = models.get_environment(conn, env["id"]) + scan_task_id = _trigger_sysadmin_scan(conn, project_id, raw_env) + conn.close() + result = {**env} + if scan_task_id: + result["scan_task_id"] = scan_task_id + return JSONResponse(result, status_code=201) + + +@app.patch("/api/projects/{project_id}/environments/{env_id}") +def patch_environment(project_id: str, env_id: int, body: EnvironmentPatch): + all_none = all(v is None for v in [ + body.name, body.host, body.port, body.username, + body.auth_type, body.auth_value, body.is_installed, + ]) + if all_none: + raise HTTPException(400, "Nothing to update.") + if body.auth_type is not None and body.auth_type not in VALID_AUTH_TYPES: + raise HTTPException(400, f"auth_type must be one of: {', '.join(VALID_AUTH_TYPES)}") + if body.port is not None and not (1 <= body.port <= 65535): + raise HTTPException(400, "port must be between 1 and 65535") + if body.name is not None and not body.name.strip(): + raise HTTPException(400, "name must not be empty") + if body.username is not None and not body.username.strip(): + raise HTTPException(400, "username must not be empty") + if body.host is not None and not body.host.strip(): + raise HTTPException(400, "host must not be empty") + + conn = get_conn() + p = models.get_project(conn, project_id) + if not p: + conn.close() + raise HTTPException(404, f"Project '{project_id}' not found") + existing = models.get_environment(conn, env_id) + if not existing or existing.get("project_id") != project_id: + conn.close() + raise HTTPException(404, f"Environment #{env_id} not found") + + was_installed = bool(existing.get("is_installed")) + + fields = {} + if body.name is not None: + fields["name"] = body.name + if body.host is not None: + fields["host"] = body.host + if body.port is not None: + fields["port"] = body.port + if body.username is not None: + fields["username"] = body.username + if body.auth_type is not None: + fields["auth_type"] = body.auth_type + if body.auth_value: # only update if non-empty (empty = don't change stored cred) + fields["auth_value"] = body.auth_value + if body.is_installed is not None: + fields["is_installed"] = int(body.is_installed) + + try: + updated = models.update_environment(conn, env_id, **fields) + except Exception as e: + conn.close() + if "UNIQUE constraint" in str(e): + raise HTTPException(409, f"Environment name already exists for this project") + if "KIN_SECRET_KEY" in str(e): + raise HTTPException(503, "Server misconfiguration: KIN_SECRET_KEY is not set. Contact admin.") + if isinstance(e, ModuleNotFoundError) or "cryptography" in str(e) or "No module named" in str(e): + raise HTTPException(503, "Server misconfiguration: cryptography package not installed. Run: python3.11 -m pip install cryptography") + raise HTTPException(500, str(e)) + + scan_task_id = None + if body.is_installed is True and not was_installed: + raw_env = models.get_environment(conn, env_id) + scan_task_id = _trigger_sysadmin_scan(conn, project_id, raw_env) + + conn.close() + result = {**updated} + if scan_task_id: + result["scan_task_id"] = scan_task_id + return result + + +@app.delete("/api/projects/{project_id}/environments/{env_id}", status_code=204) +def delete_environment(project_id: str, env_id: int): + conn = get_conn() + p = models.get_project(conn, project_id) + if not p: + conn.close() + raise HTTPException(404, f"Project '{project_id}' not found") + # Check existence directly — no decryption needed for delete + row = conn.execute( + "SELECT project_id FROM project_environments WHERE id = ?", (env_id,) + ).fetchone() + if not row or dict(row)["project_id"] != project_id: + conn.close() + raise HTTPException(404, f"Environment #{env_id} not found") + models.delete_environment(conn, env_id) + conn.close() + return Response(status_code=204) + + +@app.post("/api/projects/{project_id}/environments/{env_id}/scan", status_code=202) +def scan_environment(project_id: str, env_id: int): + """Manually re-trigger sysadmin env scan for an environment.""" + import os as _os + if not _os.environ.get("KIN_SECRET_KEY"): + raise HTTPException(503, "Server misconfiguration: KIN_SECRET_KEY is not set. Contact admin.") + conn = get_conn() + p = models.get_project(conn, project_id) + if not p: + conn.close() + raise HTTPException(404, f"Project '{project_id}' not found") + raw_env = models.get_environment(conn, env_id) + if not raw_env or raw_env.get("project_id") != project_id: + conn.close() + raise HTTPException(404, f"Environment #{env_id} not found") + task_id = _trigger_sysadmin_scan(conn, project_id, raw_env) + conn.close() + return JSONResponse({"status": "started", "task_id": task_id}, status_code=202) + + +# --------------------------------------------------------------------------- +# Notifications (escalations from blocked agents) +# --------------------------------------------------------------------------- + +@app.get("/api/notifications") +def get_notifications(project_id: str | None = None): + """Return tasks with status='blocked' as escalation notifications. + + Each item includes task details, the agent role that blocked it, + the reason, the pipeline step, and whether a Telegram alert was sent. + Intended for GUI polling (5s interval). + """ + conn = get_conn() + query = "SELECT * FROM tasks WHERE status = 'blocked'" + params: list = [] + if project_id: + query += " AND project_id = ?" + params.append(project_id) + query += " ORDER BY blocked_at DESC, updated_at DESC" + rows = conn.execute(query, params).fetchall() + conn.close() + + notifications = [] + for row in rows: + t = dict(row) + notifications.append({ + "task_id": t["id"], + "project_id": t["project_id"], + "title": t.get("title"), + "agent_role": t.get("blocked_agent_role"), + "reason": t.get("blocked_reason"), + "pipeline_step": t.get("blocked_pipeline_step"), + "blocked_at": t.get("blocked_at") or t.get("updated_at"), + "telegram_sent": bool(t.get("telegram_sent")), + }) + return notifications + + +# --------------------------------------------------------------------------- +# Attachments (KIN-090) +# --------------------------------------------------------------------------- + +_MAX_ATTACHMENT_SIZE = 10 * 1024 * 1024 # 10 MB + + +def _attachment_dir(project_path: Path, task_id: str) -> Path: + """Return (and create) {project_path}/.kin/attachments/{task_id}/.""" + d = project_path / ".kin" / "attachments" / task_id + d.mkdir(parents=True, exist_ok=True) + return d + + +@app.post("/api/tasks/{task_id}/attachments", status_code=201) +async def upload_attachment(task_id: str, file: UploadFile = File(...)): + conn = get_conn() + t = models.get_task(conn, task_id) + if not t: + conn.close() + raise HTTPException(404, f"Task '{task_id}' not found") + p = models.get_project(conn, t["project_id"]) + if not p or not p.get("path"): + conn.close() + raise HTTPException(400, "Attachments not supported for operations projects") + + # Sanitize filename: strip directory components + safe_name = Path(file.filename or "upload").name + if not safe_name: + conn.close() + raise HTTPException(400, "Invalid filename") + + att_dir = _attachment_dir(Path(p["path"]), task_id) + dest = att_dir / safe_name + + # Path traversal guard + if not dest.is_relative_to(att_dir): + conn.close() + raise HTTPException(400, "Invalid filename") + + # Read with size limit + content = await file.read(_MAX_ATTACHMENT_SIZE + 1) + if len(content) > _MAX_ATTACHMENT_SIZE: + conn.close() + raise HTTPException(413, f"File too large. Maximum size is {_MAX_ATTACHMENT_SIZE // (1024*1024)} MB") + + dest.write_bytes(content) + + mime_type = mimetypes.guess_type(safe_name)[0] or "application/octet-stream" + attachment = models.create_attachment( + conn, task_id, + filename=safe_name, + path=str(dest), + mime_type=mime_type, + size=len(content), + ) + conn.close() + return JSONResponse(attachment, status_code=201) + + +@app.get("/api/tasks/{task_id}/attachments") +def list_task_attachments(task_id: str): + conn = get_conn() + t = models.get_task(conn, task_id) + if not t: + conn.close() + raise HTTPException(404, f"Task '{task_id}' not found") + attachments = models.list_attachments(conn, task_id) + conn.close() + return attachments + + +@app.delete("/api/tasks/{task_id}/attachments/{attachment_id}", status_code=204) +def delete_task_attachment(task_id: str, attachment_id: int): + conn = get_conn() + att = models.get_attachment(conn, attachment_id) + if not att or att["task_id"] != task_id: + conn.close() + raise HTTPException(404, f"Attachment #{attachment_id} not found") + # Delete file from disk + try: + Path(att["path"]).unlink(missing_ok=True) + except Exception: + pass + models.delete_attachment(conn, attachment_id) + conn.close() + return Response(status_code=204) + + +@app.get("/api/attachments/{attachment_id}/file") +def get_attachment_file(attachment_id: int): + conn = get_conn() + att = models.get_attachment(conn, attachment_id) + conn.close() + if not att: + raise HTTPException(404, f"Attachment #{attachment_id} not found") + file_path = Path(att["path"]) + if not file_path.exists(): + raise HTTPException(404, "Attachment file not found on disk") + return FileResponse( + str(file_path), + media_type=att["mime_type"], + filename=att["filename"], + ) + + +# --------------------------------------------------------------------------- +# Chat (KIN-OBS-012) +# --------------------------------------------------------------------------- + +class ChatMessageIn(BaseModel): + content: str + + +@app.get("/api/projects/{project_id}/chat") +def get_chat_history( + project_id: str, + limit: int = Query(50, ge=1, le=200), + before_id: int | None = None, +): + """Return chat history for a project. Enriches task_created messages with task_stub.""" + conn = get_conn() + p = models.get_project(conn, project_id) + if not p: + conn.close() + raise HTTPException(404, f"Project '{project_id}' not found") + messages = models.get_chat_messages(conn, project_id, limit=limit, before_id=before_id) + for msg in messages: + if msg.get("message_type") == "task_created" and msg.get("task_id"): + task = models.get_task(conn, msg["task_id"]) + if task: + msg["task_stub"] = { + "id": task["id"], + "title": task["title"], + "status": task["status"], + } + conn.close() + return messages + + +@app.post("/api/projects/{project_id}/chat") +def send_chat_message(project_id: str, body: ChatMessageIn): + """Process a user message: classify intent, create task or answer, return both messages.""" + from core.chat_intent import classify_intent + + if not body.content.strip(): + raise HTTPException(400, "content must not be empty") + + conn = get_conn() + p = models.get_project(conn, project_id) + if not p: + conn.close() + raise HTTPException(404, f"Project '{project_id}' not found") + + # 1. Save user message + user_msg = models.add_chat_message(conn, project_id, "user", body.content) + + # 2. Classify intent + intent = classify_intent(body.content) + + task = None + + if intent == "task_request": + # 3a. Create task (category OBS) and run pipeline in background + task_id = models.next_task_id(conn, project_id, category="OBS") + title = body.content[:120].strip() + t = models.create_task( + conn, task_id, project_id, title, + brief={"text": body.content, "source": "chat"}, + category="OBS", + ) + task = t + + import os as _os + env_vars = _os.environ.copy() + env_vars["KIN_NONINTERACTIVE"] = "1" + kin_root = Path(__file__).parent.parent + try: + subprocess.Popen( + [sys.executable, "-m", "cli.main", "--db", str(DB_PATH), + "run", task_id, "--allow-write"], + cwd=str(kin_root), + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + stdin=subprocess.DEVNULL, + env=env_vars, + ) + except Exception as e: + _logger.warning("Failed to start pipeline for chat task %s: %s", task_id, e) + + assistant_content = f"Создал задачу {task_id}: {title}" + assistant_msg = models.add_chat_message( + conn, project_id, "assistant", assistant_content, + message_type="task_created", task_id=task_id, + ) + assistant_msg["task_stub"] = { + "id": t["id"], + "title": t["title"], + "status": t["status"], + } + + elif intent == "status_query": + # 3b. Return current task status summary + in_progress = models.list_tasks(conn, project_id=project_id, status="in_progress") + pending = models.list_tasks(conn, project_id=project_id, status="pending") + review = models.list_tasks(conn, project_id=project_id, status="review") + + parts = [] + if in_progress: + parts.append("В работе ({}):\n{}".format( + len(in_progress), + "\n".join(f" • {t['id']} — {t['title'][:60]}" for t in in_progress[:5]), + )) + if review: + parts.append("На ревью ({}):\n{}".format( + len(review), + "\n".join(f" • {t['id']} — {t['title'][:60]}" for t in review[:5]), + )) + if pending: + parts.append("Ожидает ({}):\n{}".format( + len(pending), + "\n".join(f" • {t['id']} — {t['title'][:60]}" for t in pending[:5]), + )) + + content = "\n\n".join(parts) if parts else "Нет активных задач." + assistant_msg = models.add_chat_message(conn, project_id, "assistant", content) + + else: # question + assistant_msg = models.add_chat_message( + conn, project_id, "assistant", + "Я пока не умею отвечать на вопросы напрямую. " + "Если хотите — опишите задачу, я создам её и запущу агентов.", + ) + + conn.close() + return { + "user_message": user_msg, + "assistant_message": assistant_msg, + "task": task, + } + + # --------------------------------------------------------------------------- # SPA static files (AFTER all /api/ routes) # --------------------------------------------------------------------------- diff --git a/web/frontend/src/App.vue b/web/frontend/src/App.vue index 4ebce21..df4e357 100644 --- a/web/frontend/src/App.vue +++ b/web/frontend/src/App.vue @@ -1,4 +1,5 @@