Compare commits
3 commits
9d85f2f84b
...
89595aa077
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
89595aa077 | ||
|
|
e9285042ae | ||
|
|
dc47d313a9 |
3 changed files with 132 additions and 5 deletions
|
|
@ -83,7 +83,7 @@ You receive:
|
|||
- `relevant_decisions` IDs are correct and relevant to the specialist's work
|
||||
- Department heads are used only for genuinely cross-domain complex tasks
|
||||
|
||||
## Output format
|
||||
## Return Format
|
||||
|
||||
Return ONLY valid JSON (no markdown, no explanation):
|
||||
|
||||
|
|
|
|||
127
tests/test_kin_docs_002_regression.py
Normal file
127
tests/test_kin_docs_002_regression.py
Normal file
|
|
@ -0,0 +1,127 @@
|
|||
"""Regression tests for KIN-DOCS-002 — Standardise all agent prompts.
|
||||
|
||||
Acceptance criteria:
|
||||
1. pytest green (checked by running this suite)
|
||||
2. No file in agents/prompts/ contains the old '## Output format' section header
|
||||
3. Every prompt file contains '## Return Format'
|
||||
4. Every prompt file contains the full standard structure:
|
||||
## Working Mode, ## Focus On (or ## Focus), ## Quality Checks (or ## Quality), ## Return Format
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
PROMPTS_DIR = Path(__file__).parent.parent / "agents" / "prompts"
|
||||
|
||||
|
||||
def _prompt_files():
|
||||
return sorted(PROMPTS_DIR.glob("*.md"))
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# AC-2: No legacy '## Output format' section
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestNoLegacyOutputFormatSection:
|
||||
"""Проверяет отсутствие устаревшей секции '## Output format' во всех промптах."""
|
||||
|
||||
def test_no_prompt_contains_old_output_format_header(self):
|
||||
"""Ни один файл agents/prompts/*.md не содержит '## Output format'."""
|
||||
files_with_old_header = [
|
||||
f.name
|
||||
for f in _prompt_files()
|
||||
if "## Output format" in f.read_text(encoding="utf-8")
|
||||
]
|
||||
assert files_with_old_header == [], (
|
||||
f"Файлы с устаревшей секцией '## Output format': {files_with_old_header}"
|
||||
)
|
||||
|
||||
def test_grep_output_format_is_empty(self):
|
||||
"""Эквивалент: grep -rl '## Output format' agents/prompts/ — пустой вывод."""
|
||||
matches = [
|
||||
str(f)
|
||||
for f in _prompt_files()
|
||||
if "## Output format" in f.read_text(encoding="utf-8")
|
||||
]
|
||||
assert matches == [], (
|
||||
"grep -rl '## Output format' agents/prompts/ должен давать пустой вывод, "
|
||||
f"но нашёл: {matches}"
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# AC-3: Every prompt contains '## Return Format'
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestAllPromptsContainReturnFormat:
|
||||
"""Проверяет наличие секции '## Return Format' во всех промптах."""
|
||||
|
||||
def test_return_format_count_equals_prompt_count(self):
|
||||
"""Число промптов с '## Return Format' равно общему числу промптов."""
|
||||
all_files = _prompt_files()
|
||||
files_with_rf = [f for f in all_files if "## Return Format" in f.read_text(encoding="utf-8")]
|
||||
assert len(files_with_rf) == len(all_files), (
|
||||
f"Промптов всего: {len(all_files)}, "
|
||||
f"с '## Return Format': {len(files_with_rf)}. "
|
||||
f"Без секции: {[f.name for f in all_files if f not in files_with_rf]}"
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize("prompt_file", [f.name for f in sorted(PROMPTS_DIR.glob("*.md"))])
|
||||
def test_each_prompt_has_return_format(self, prompt_file):
|
||||
"""Каждый промпт-файл содержит секцию '## Return Format'."""
|
||||
content = (PROMPTS_DIR / prompt_file).read_text(encoding="utf-8")
|
||||
assert "## Return Format" in content, (
|
||||
f"{prompt_file} не содержит секцию '## Return Format'"
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# AC-4: Full standard structure in every prompt
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestAllPromptsContainStandardStructure:
|
||||
"""Проверяет наличие всех обязательных секций стандартного шаблона."""
|
||||
|
||||
REQUIRED_SECTIONS = [
|
||||
"## Working Mode",
|
||||
"## Return Format",
|
||||
]
|
||||
|
||||
@pytest.mark.parametrize("prompt_file", [f.name for f in sorted(PROMPTS_DIR.glob("*.md"))])
|
||||
def test_each_prompt_has_working_mode(self, prompt_file):
|
||||
"""Каждый промпт содержит секцию '## Working Mode'."""
|
||||
content = (PROMPTS_DIR / prompt_file).read_text(encoding="utf-8")
|
||||
assert "## Working Mode" in content, (
|
||||
f"{prompt_file} не содержит секцию '## Working Mode'"
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize("prompt_file", [f.name for f in sorted(PROMPTS_DIR.glob("*.md"))])
|
||||
def test_return_format_comes_after_working_mode(self, prompt_file):
|
||||
"""Секция '## Return Format' идёт после '## Working Mode'."""
|
||||
content = (PROMPTS_DIR / prompt_file).read_text(encoding="utf-8")
|
||||
wm_pos = content.find("## Working Mode")
|
||||
rf_pos = content.find("## Return Format")
|
||||
if wm_pos == -1 or rf_pos == -1:
|
||||
pytest.skip(f"{prompt_file}: одна из секций отсутствует (покрывается другим тестом)")
|
||||
assert rf_pos > wm_pos, (
|
||||
f"{prompt_file}: '## Return Format' (pos={rf_pos}) должна идти после "
|
||||
f"'## Working Mode' (pos={wm_pos})"
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Sanity: prompt count stays at 25
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestPromptCount:
|
||||
"""Проверяет, что число промптов не изменилось неожиданно."""
|
||||
|
||||
def test_prompt_count_is_25(self):
|
||||
"""В agents/prompts/ ровно 25 файлов .md."""
|
||||
count = len(_prompt_files())
|
||||
assert count == 25, (
|
||||
f"Ожидалось 25 промптов, найдено {count}. "
|
||||
"Если добавлен новый промпт — обнови этот тест."
|
||||
)
|
||||
|
|
@ -332,10 +332,10 @@ class TestPmPromptStatusFieldConsistency:
|
|||
content = f.read()
|
||||
|
||||
# Find the main output example block
|
||||
# It starts after '## Output format' and ends before '## Blocked Protocol'
|
||||
output_section_start = content.find("## Output format")
|
||||
# It starts after '## Return Format' and ends before '## Blocked Protocol'
|
||||
output_section_start = content.find("## Return Format")
|
||||
blocked_section_start = content.find("## Blocked Protocol")
|
||||
assert output_section_start != -1, "Section '## Output format' not found in pm.md"
|
||||
assert output_section_start != -1, "Section '## Return Format' not found in pm.md"
|
||||
assert blocked_section_start != -1, "Section '## Blocked Protocol' not found in pm.md"
|
||||
|
||||
output_section = content[output_section_start:blocked_section_start]
|
||||
|
|
@ -343,7 +343,7 @@ class TestPmPromptStatusFieldConsistency:
|
|||
# The main example JSON block (the first ```json...``` block in this section)
|
||||
import re
|
||||
json_blocks = re.findall(r"```json\s*(.*?)```", output_section, re.DOTALL)
|
||||
assert len(json_blocks) >= 1, "No JSON example found in ## Output format section"
|
||||
assert len(json_blocks) >= 1, "No JSON example found in ## Return Format section"
|
||||
|
||||
main_example_json_text = json_blocks[0].strip()
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue