Protokoll #24286

ID24286
Zeitstempel2025-12-27 16:06:43.470901
Clientroot
IP145.224.72.140
Modellclaude-sonnet-4-20250514
Statuspending
Tokens773 (Input: 773, Output: 0)
Dauer-
Request-Zeit2025-12-27 16:06:43.470901
Response-Zeit-

Request

{
    "event": "PostToolUse",
    "tool_name": "Read",
    "tool_response": {
        "type": "text",
        "file": {
            "filePath": "\/var\/www\/scripts\/pipeline\/generators\/content_generator.py",
            "content": "\"\"\"\nContent Generator - Core content generation with LLM calls.\n\"\"\"\n\nimport json\nimport sys\nimport time\n\nsys.path.insert(0, \"\/var\/www\/scripts\/pipeline\")\n\nfrom config import ANTHROPIC_API_KEY, ANTHROPIC_MODEL, OLLAMA_CHAT_MODEL, OLLAMA_HOST\nfrom db import db\n\nfrom .config_loader import get_order, get_prompt, parse_author_profile, parse_structure\nfrom .persistence import save_sources, save_version, update_order_status\nfrom .rag_context import get_rag_context, get_semantic_context, get_taxonomy_context\n\n\ndef build_generation_prompt(\n    briefing: str,\n    context: list[dict],\n    profile: dict | None,\n    contract: dict | None,\n    structure: dict | None = None,\n    semantic: dict | None = None,\n    taxonomy: list | None = None,\n) -> str:\n    \"\"\"Build the content generation prompt.\"\"\"\n\n    # Format context\n    context_text = \"\"\n    for i, ctx in enumerate(context, 1):\n        context_text += f\"\\n[Quelle {i}: {ctx['source']}]\\n{ctx['content']}\\n\"\n\n    # Build semantic context (entities and relations)\n    semantic_text = \"\"\n    if semantic:\n        if semantic.get(\"entities\"):\n            semantic_text += \"\\n## Relevante Konzepte\\n\"\n            for e in semantic[\"entities\"][:10]:\n                desc = e.get(\"description\") or \"\"\n                if desc:\n                    semantic_text += f\"- **{e['name']}** ({e['type']}): {desc[:100]}\\n\"\n                else:\n                    semantic_text += f\"- **{e['name']}** ({e['type']})\\n\"\n\n        if semantic.get(\"relations\"):\n            semantic_text += \"\\n## Konzept-Beziehungen\\n\"\n            for r in semantic[\"relations\"][:10]:\n                semantic_text += f\"- {r['source']} → {r['relation_type']} → {r['target']}\\n\"\n\n    # Build taxonomy context\n    taxonomy_text = \"\"\n    if taxonomy:\n        taxonomy_text = \"\\n## Thematische Einordnung\\n\"\n        taxonomy_text += \", \".join([t[\"name\"] for t in taxonomy])\n\n    # Build profile instructions - detect new vs old format\n    profile_text = \"\"\n    if profile:\n        config = profile.get(\"config\", {})\n\n        # Detect new format (has \"haltung\" or \"tonalitaet\" at top level)\n        if \"haltung\" in config or \"tonalitaet\" in config or \"grammatik_und_satzbau\" in config:\n            # New Cary-style profile\n            profile_text = f\"\"\"\n## Autorenprofil: {profile.get(\"name\", \"Standard\")}\n\n{parse_author_profile(config)}\n\"\"\"\n        else:\n            # Old format - keep backwards compatibility\n            autorenprofil = config.get(\"autorenprofil\", config)\n\n            stimme = autorenprofil.get(\"stimme\", {})\n            stimme_text = \"\"\n            if stimme:\n                stimme_text = f\"\"\"\n### Stimme\/Tonalität:\n- Ton: {stimme.get(\"ton\", \"neutral\")}",
            "numLines": 80,
            "startLine": 1,
            "totalLines": 400
        }
    }
}

Response

-
← Vorheriger Zur Liste Nächster →