Protokoll #11617
| ID | 11617 |
|---|---|
| Zeitstempel | 2025-12-23 20:26:44.731810 |
| Client | root |
| IP | Warning: Undefined array key "request_ip" in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 13 Deprecated: htmlspecialchars(): Passing null to parameter #1 ($string) of type string is deprecated in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 13 |
| Modell | claude-sonnet-4-20250514 |
| Status | completed |
| Tokens | 1,346 (Input: 0, Output: 0) |
| Dauer | 89 ms |
| Request-Zeit | Warning: Undefined array key "request_timestamp" in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 18 |
| Response-Zeit | - |
Warning: Undefined array key "error_message" in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 22
Request
{
"event": "PreToolUse",
"tool_name": "Read",
"tool_input": {
"file_path": "\/opt\/scripts\/pipeline\/generate.py",
"offset": 500,
"limit": 150
}
}
Response
{
"tool_response": {
"type": "text",
"file": {
"filePath": "\/opt\/scripts\/pipeline\/generate.py",
"content": "\n finally:\n # Calculate duration\n duration_ms = int((time.time() - start_time) * 1000)\n\n # Log to protokoll\n db.log_to_protokoll(\n client_name=client_name,\n request=prompt,\n response=response_text if status == \"completed\" else None,\n model_name=model_name,\n tokens_input=tokens_input,\n tokens_output=tokens_output,\n duration_ms=duration_ms,\n status=status,\n error_message=error_message,\n )\n\n return response_text\n\n\ndef save_version(order_id, content, version_number=1):\n \"\"\"Save content version to database.\"\"\"\n content_json = json.dumps({\"text\": content, \"format\": \"markdown\"})\n\n cursor = db.execute(\n \"\"\"INSERT INTO content_versions (order_id, version_number, content)\n VALUES (%s, %s, %s)\"\"\",\n (order_id, version_number, content_json),\n )\n db.commit()\n version_id = cursor.lastrowid\n cursor.close()\n return version_id\n\n\ndef save_sources(order_id, context):\n \"\"\"Save RAG sources to content_sources.\"\"\"\n for ctx in context:\n # Try to find chunk_id by content match\n cursor = db.execute(\"SELECT id FROM chunks WHERE content LIKE %s LIMIT 1\", (ctx[\"content\"][:100] + \"%\",))\n chunk = cursor.fetchone()\n cursor.close()\n\n if chunk:\n cursor = db.execute(\n \"\"\"INSERT IGNORE INTO content_sources (order_id, chunk_id, relevance_score)\n VALUES (%s, %s, %s)\"\"\",\n (order_id, chunk[\"id\"], ctx[\"score\"]),\n )\n db.commit()\n cursor.close()\n\n\ndef update_order_status(order_id, status):\n \"\"\"Update order status.\"\"\"\n cursor = db.execute(\"UPDATE content_orders SET status = %s, updated_at = NOW() WHERE id = %s\", (status, order_id))\n db.commit()\n cursor.close()\n\n\ndef generate_content(order_id, model=\"anthropic\", collection=\"documents\", context_limit=5):\n \"\"\"\n Main content generation function.\n\n Args:\n order_id: Content order ID\n model: 'anthropic' or 'ollama'\n collection: Qdrant collection to search\n context_limit: Number of context chunks\n\n Returns:\n dict with version_id, content, sources\n \"\"\"\n db.connect()\n\n try:\n # Load order\n order = get_order(order_id)\n if not order:\n return {\"error\": f\"Order {order_id} not found\"}\n\n # Update status\n update_order_status(order_id, \"generating\")\n\n # Get RAG context\n context = get_rag_context(order[\"briefing\"], collection, context_limit)\n\n # Build profile\/contract\/structure\n profile = None\n if order.get(\"profile_config\"):\n config = (\n json.loads(order[\"profile_config\"])\n if isinstance(order[\"profile_config\"], str)\n else order[\"profile_config\"]\n )\n profile = {\"name\": order[\"profile_name\"], \"config\": config}\n\n contract = None\n if order.get(\"contract_config\"):\n config = (\n json.loads(order[\"contract_config\"])\n if isinstance(order[\"contract_config\"], str)\n else order[\"contract_config\"]\n )\n contract = {\"name\": order[\"contract_name\"], \"config\": config}\n\n structure = None\n if order.get(\"structure_config\"):\n config = (\n json.loads(order[\"structure_config\"])\n if isinstance(order[\"structure_config\"], str)\n else order[\"structure_config\"]\n )\n structure = {\"name\": order[\"structure_name\"], \"config\": config}\n\n # Build prompt\n prompt = build_generation_prompt(order[\"briefing\"], context, profile, contract, structure)\n\n # Generate content\n content = call_llm(prompt, model, client_name=\"content-studio-generate\")\n\n # Get current version number\n cursor = db.execute(\n \"SELECT MAX(version_number) as max_v FROM content_versions WHERE order_id = %s\", (order_id,)\n )\n result = cursor.fetchone()\n cursor.close()\n version_number = (result[\"max_v\"] or 0) + 1\n\n # Save version\n version_id = save_version(order_id, content, version_number)\n\n # Save sources\n save_sources(order_id, context)\n\n # Update status\n update_order_status(order_id, \"critique\")\n\n return {\n \"success\": True,\n \"order_id\": order_id,\n \"version_id\": version_id,\n \"version_number\": version_number,\n \"content\": content,\n \"sources\": [{\"source\": c[\"source\"], \"score\": c[\"score\"]} for c in context],\n }\n\n except Exception as e:\n update_order_status(order_id, \"draft\")",
"numLines": 150,
"startLine": 500,
"totalLines": 968
}
}
}