Protokoll #2745
| ID | 2745 |
|---|---|
| Zeitstempel | 2025-12-22 01:25:10.393490 |
| Client | root |
| IP | Warning: Undefined array key "request_ip" in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 13 Deprecated: htmlspecialchars(): Passing null to parameter #1 ($string) of type string is deprecated in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 13 |
| Modell | claude-sonnet-4-20250514 |
| Status | completed |
| Tokens | 855 (Input: 0, Output: 0) |
| Dauer | 82 ms |
| Request-Zeit | Warning: Undefined array key "request_timestamp" in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 18 |
| Response-Zeit | - |
Warning: Undefined array key "error_message" in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 22
Request
{
"event": "PreToolUse",
"tool_name": "Read",
"tool_input": {
"file_path": "\/opt\/scripts\/pipeline\/generate.py",
"offset": 200,
"limit": 100
}
}
Response
{
"tool_response": {
"type": "text",
"file": {
"filePath": "\/opt\/scripts\/pipeline\/generate.py",
"content": "{context_text}\n\n## Briefing:\n{briefing}\n\n## Anweisungen:\n1. Nutze die Informationen aus dem Kontext als Grundlage\n2. Halte dich an das Autorenprofil und den Schreibstil\n3. Beachte die Vorgaben aus dem Contract\n4. Strukturiere den Text gemäß dem Template (falls angegeben)\n5. Schreibe auf Deutsch\n6. Kennzeichne verwendete Quellen\n\nErstelle nun den Content:\"\"\"\n\n return prompt\n\n\ndef call_llm(prompt, model=\"anthropic\"):\n \"\"\"Call LLM to generate content.\"\"\"\n if model == \"anthropic\" and ANTHROPIC_API_KEY:\n import anthropic\n\n client = anthropic.Anthropic(api_key=ANTHROPIC_API_KEY)\n\n message = client.messages.create(\n model=ANTHROPIC_MODEL, max_tokens=4000, messages=[{\"role\": \"user\", \"content\": prompt}]\n )\n return message.content[0].text\n else:\n # Fallback to Ollama\n import requests\n\n response = requests.post(\n f\"{OLLAMA_HOST}\/api\/generate\",\n json={\"model\": OLLAMA_CHAT_MODEL, \"prompt\": prompt, \"stream\": False},\n timeout=300,\n )\n response.raise_for_status()\n return response.json().get(\"response\", \"\")\n\n\ndef save_version(order_id, content, version_number=1):\n \"\"\"Save content version to database.\"\"\"\n content_json = json.dumps({\"text\": content, \"format\": \"markdown\"})\n\n cursor = db.execute(\n \"\"\"INSERT INTO content_versions (order_id, version_number, content)\n VALUES (%s, %s, %s)\"\"\",\n (order_id, version_number, content_json),\n )\n db.commit()\n version_id = cursor.lastrowid\n cursor.close()\n return version_id\n\n\ndef save_sources(order_id, context):\n \"\"\"Save RAG sources to content_sources.\"\"\"\n for ctx in context:\n # Try to find chunk_id by content match\n cursor = db.execute(\"SELECT id FROM chunks WHERE content LIKE %s LIMIT 1\", (ctx[\"content\"][:100] + \"%\",))\n chunk = cursor.fetchone()\n cursor.close()\n\n if chunk:\n cursor = db.execute(\n \"\"\"INSERT IGNORE INTO content_sources (order_id, chunk_id, relevance_score)\n VALUES (%s, %s, %s)\"\"\",\n (order_id, chunk[\"id\"], ctx[\"score\"]),\n )\n db.commit()\n cursor.close()\n\n\ndef update_order_status(order_id, status):\n \"\"\"Update order status.\"\"\"\n cursor = db.execute(\"UPDATE content_orders SET status = %s, updated_at = NOW() WHERE id = %s\", (status, order_id))\n db.commit()\n cursor.close()\n\n\ndef generate_content(order_id, model=\"anthropic\", collection=\"documents\", context_limit=5):\n \"\"\"\n Main content generation function.\n\n Args:\n order_id: Content order ID\n model: 'anthropic' or 'ollama'\n collection: Qdrant collection to search\n context_limit: Number of context chunks\n\n Returns:\n dict with version_id, content, sources\n \"\"\"\n db.connect()\n\n try:\n # Load order\n order = get_order(order_id)",
"numLines": 100,
"startLine": 200,
"totalLines": 641
}
}
}