Protokoll #12535

ID12535
Zeitstempel2025-12-24 09:35:18.078133
Clientroot
IP145.224.96.190
Modellclaude-sonnet-4-20250514
Statuscompleted
Tokens1,336 (Input: 35, Output: 1,301)
Dauer94 ms
Request-Zeit2025-12-24 09:35:18.078133
Response-Zeit2025-12-24 09:35:18.172056

Request

{
    "event": "PreToolUse",
    "tool_name": "Read",
    "tool_input": {
        "file_path": "\/var\/www\/scripts\/pipeline\/generate.py",
        "offset": 570,
        "limit": 150
    }
}

Response

{
    "tool_response": {
        "type": "text",
        "file": {
            "filePath": "\/var\/www\/scripts\/pipeline\/generate.py",
            "content": "def generate_content(order_id, model=\"anthropic\", collection=\"documents\", context_limit=5):\n    \"\"\"\n    Main content generation function.\n\n    Args:\n        order_id: Content order ID\n        model: 'anthropic' or 'ollama'\n        collection: Qdrant collection to search\n        context_limit: Number of context chunks\n\n    Returns:\n        dict with version_id, content, sources\n    \"\"\"\n    db.connect()\n\n    try:\n        # Load order\n        order = get_order(order_id)\n        if not order:\n            return {\"error\": f\"Order {order_id} not found\"}\n\n        # Update status\n        update_order_status(order_id, \"generating\")\n\n        # Get RAG context\n        context = get_rag_context(order[\"briefing\"], collection, context_limit)\n\n        # Build profile\/contract\/structure\n        profile = None\n        if order.get(\"profile_config\"):\n            config = (\n                json.loads(order[\"profile_config\"])\n                if isinstance(order[\"profile_config\"], str)\n                else order[\"profile_config\"]\n            )\n            profile = {\"name\": order[\"profile_name\"], \"config\": config}\n\n        contract = None\n        if order.get(\"contract_config\"):\n            config = (\n                json.loads(order[\"contract_config\"])\n                if isinstance(order[\"contract_config\"], str)\n                else order[\"contract_config\"]\n            )\n            contract = {\"name\": order[\"contract_name\"], \"config\": config}\n\n        structure = None\n        output_format = \"markdown\"  # Default\n        if order.get(\"structure_config\"):\n            config = (\n                json.loads(order[\"structure_config\"])\n                if isinstance(order[\"structure_config\"], str)\n                else order[\"structure_config\"]\n            )\n            structure = {\"name\": order[\"structure_name\"], \"config\": config}\n            # Determine output format from structure\n            ausgabe = config.get(\"ausgabe\", {})\n            output_format = ausgabe.get(\"format\", \"markdown\")\n\n        # Build prompt\n        prompt = build_generation_prompt(order[\"briefing\"], context, profile, contract, structure)\n\n        # Generate content\n        content = call_llm(prompt, model, client_name=\"content-studio-generate\")\n\n        # Get current version number\n        cursor = db.execute(\n            \"SELECT MAX(version_number) as max_v FROM content_versions WHERE order_id = %s\", (order_id,)\n        )\n        result = cursor.fetchone()\n        cursor.close()\n        version_number = (result[\"max_v\"] or 0) + 1\n\n        # Save version with correct format\n        version_id = save_version(order_id, content, version_number, output_format)\n\n        # Save sources\n        save_sources(order_id, context)\n\n        # Update status\n        update_order_status(order_id, \"critique\")\n\n        return {\n            \"success\": True,\n            \"order_id\": order_id,\n            \"version_id\": version_id,\n            \"version_number\": version_number,\n            \"content\": content,\n            \"sources\": [{\"source\": c[\"source\"], \"score\": c[\"score\"]} for c in context],\n        }\n\n    except Exception as e:\n        update_order_status(order_id, \"draft\")\n        return {\"error\": str(e)}\n    finally:\n        db.disconnect()\n\n\ndef get_critic(critic_id):\n    \"\"\"Load critic from database.\"\"\"\n    cursor = db.execute(\n        \"\"\"SELECT c.*, p.content as prompt_content\n           FROM critics c\n           LEFT JOIN prompts p ON c.prompt_id = p.id\n           WHERE c.id = %s AND c.is_active = 1\"\"\",\n        (critic_id,),\n    )\n    result = cursor.fetchone()\n    cursor.close()\n    return result\n\n\ndef run_critic(content, critic_id, model=\"anthropic\"):\n    \"\"\"\n    Run a single critic on content.\n\n    Returns:\n        dict with feedback and rating\n    \"\"\"\n    db.connect()\n\n    try:\n        critic = get_critic(critic_id)\n        if not critic:\n            return {\"error\": f\"Critic {critic_id} not found\"}\n\n        fokus = json.loads(critic[\"fokus\"]) if isinstance(critic[\"fokus\"], str) else critic[\"fokus\"]\n        fokus_str = \", \".join(fokus)\n\n        # Load prompt from database (via critic.prompt_id or fallback to generic)\n        prompt_template = critic.get(\"prompt_content\")\n        if not prompt_template:\n            prompt_template = get_prompt(\"critic-generic\")\n        if not prompt_template:\n            # Ultimate fallback - should never happen if DB is properly set up\n            prompt_template = \"\"\"Du bist ein kritischer Lektor mit dem Fokus auf: {fokus}\n\nAnalysiere den folgenden Text und gib strukturiertes Feedback:\n\n## Text:\n{content}\n\n## Deine Aufgabe:\n1. Prüfe den Text auf die Aspekte: {fokus}\n2. Identifiziere konkrete Verbesserungspunkte\n3. Bewerte die Qualität (1-10)\n\nAntworte im JSON-Format:\n{{\n  \"rating\": 8,",
            "numLines": 150,
            "startLine": 570,
            "totalLines": 987
        }
    }
}
← Vorheriger Zur Liste Nächster →