Protokoll #11693
| ID | 11693 |
|---|---|
| Zeitstempel | 2025-12-23 20:58:20.915361 |
| Client | root |
| IP | Warning: Undefined array key "request_ip" in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 13 Deprecated: htmlspecialchars(): Passing null to parameter #1 ($string) of type string is deprecated in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 13 |
| Modell | claude-sonnet-4-20250514 |
| Status | completed |
| Tokens | 588 (Input: 0, Output: 0) |
| Dauer | 74 ms |
| Request-Zeit | Warning: Undefined array key "request_timestamp" in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 18 |
| Response-Zeit | - |
Warning: Undefined array key "error_message" in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 22
Request
{
"event": "PreToolUse",
"tool_name": "Read",
"tool_input": {
"file_path": "\/opt\/scripts\/pipeline\/generate.py",
"offset": 460,
"limit": 60
}
}
Response
{
"tool_response": {
"type": "text",
"file": {
"filePath": "\/opt\/scripts\/pipeline\/generate.py",
"content": "\n try:\n if model == \"anthropic\" and ANTHROPIC_API_KEY:\n import anthropic\n\n client = anthropic.Anthropic(api_key=ANTHROPIC_API_KEY)\n model_name = ANTHROPIC_MODEL\n\n message = client.messages.create(\n model=ANTHROPIC_MODEL, max_tokens=4000, messages=[{\"role\": \"user\", \"content\": prompt}]\n )\n response_text = message.content[0].text\n\n # Extract token usage from Anthropic response\n if hasattr(message, \"usage\"):\n tokens_input = getattr(message.usage, \"input_tokens\", 0)\n tokens_output = getattr(message.usage, \"output_tokens\", 0)\n else:\n # Fallback to Ollama\n import requests\n\n model_name = OLLAMA_CHAT_MODEL\n\n response = requests.post(\n f\"{OLLAMA_HOST}\/api\/generate\",\n json={\"model\": OLLAMA_CHAT_MODEL, \"prompt\": prompt, \"stream\": False},\n timeout=300,\n )\n response.raise_for_status()\n result = response.json()\n response_text = result.get(\"response\", \"\")\n\n # Extract token counts from Ollama response\n tokens_input = result.get(\"prompt_eval_count\", 0)\n tokens_output = result.get(\"eval_count\", 0)\n\n except Exception as e:\n status = \"error\"\n error_message = str(e)\n raise\n\n finally:\n # Calculate duration\n duration_ms = int((time.time() - start_time) * 1000)\n\n # Log to protokoll\n db.log_to_protokoll(\n client_name=client_name,\n request=prompt,\n response=response_text if status == \"completed\" else None,\n model_name=model_name,\n tokens_input=tokens_input,\n tokens_output=tokens_output,\n duration_ms=duration_ms,\n status=status,\n error_message=error_message,\n )\n\n return response_text\n",
"numLines": 60,
"startLine": 460,
"totalLines": 978
}
}
}