Protokoll #13372

ID13372
Zeitstempel2025-12-25 00:24:26.224215
Clientroot
IP145.224.72.140
Modellclaude-sonnet-4-20250514
Statuscompleted
Tokens807 (Input: 35, Output: 772)
Dauer82 ms
Request-Zeit2025-12-25 00:24:26.224215
Response-Zeit2025-12-25 00:24:26.305791

Request

{
    "event": "PreToolUse",
    "tool_name": "Read",
    "tool_input": {
        "file_path": "\/var\/www\/scripts\/pipeline\/generate.py",
        "offset": 680,
        "limit": 80
    }
}

Response

{
    "tool_response": {
        "type": "text",
        "file": {
            "filePath": "\/var\/www\/scripts\/pipeline\/generate.py",
            "content": "            cursor = db.execute(\n                \"\"\"INSERT IGNORE INTO content_sources (order_id, chunk_id, relevance_score)\n                   VALUES (%s, %s, %s)\"\"\",\n                (order_id, chunk[\"id\"], ctx[\"score\"]),\n            )\n            db.commit()\n            cursor.close()\n\n\ndef update_order_status(order_id, status):\n    \"\"\"Update order status.\"\"\"\n    cursor = db.execute(\"UPDATE content_orders SET status = %s, updated_at = NOW() WHERE id = %s\", (status, order_id))\n    db.commit()\n    cursor.close()\n\n\ndef generate_content(order_id, model=\"anthropic\", collection=\"documents\", context_limit=5):\n    \"\"\"\n    Main content generation function.\n\n    Args:\n        order_id: Content order ID\n        model: 'anthropic' or 'ollama'\n        collection: Qdrant collection to search\n        context_limit: Number of context chunks\n\n    Returns:\n        dict with version_id, content, sources\n    \"\"\"\n    db.connect()\n\n    try:\n        # Load order\n        order = get_order(order_id)\n        if not order:\n            return {\"error\": f\"Order {order_id} not found\"}\n\n        # Update status\n        update_order_status(order_id, \"generating\")\n\n        # Get RAG context\n        context = get_rag_context(order[\"briefing\"], collection, context_limit)\n\n        # Build profile\/contract\/structure\n        profile = None\n        if order.get(\"profile_config\"):\n            config = (\n                json.loads(order[\"profile_config\"])\n                if isinstance(order[\"profile_config\"], str)\n                else order[\"profile_config\"]\n            )\n            profile = {\"name\": order[\"profile_name\"], \"config\": config}\n\n        contract = None\n        if order.get(\"contract_config\"):\n            config = (\n                json.loads(order[\"contract_config\"])\n                if isinstance(order[\"contract_config\"], str)\n                else order[\"contract_config\"]\n            )\n            contract = {\"name\": order[\"contract_name\"], \"config\": config}\n\n        structure = None\n        output_format = \"markdown\"  # Default\n        if order.get(\"structure_config\"):\n            config = (\n                json.loads(order[\"structure_config\"])\n                if isinstance(order[\"structure_config\"], str)\n                else order[\"structure_config\"]\n            )\n            structure = {\"name\": order[\"structure_name\"], \"config\": config}\n            # Determine output format from structure\n            ausgabe = config.get(\"ausgabe\", {})\n            output_format = ausgabe.get(\"format\", \"markdown\")\n\n        # Build prompt\n        prompt = build_generation_prompt(order[\"briefing\"], context, profile, contract, structure)\n\n        # Generate content\n        content = call_llm(prompt, model, client_name=\"content-studio-generate\")",
            "numLines": 80,
            "startLine": 680,
            "totalLines": 1113
        }
    }
}
← Vorheriger Zur Liste Nächster →