Protokoll #6499
| ID | 6499 |
|---|---|
| Zeitstempel | 2025-12-23 05:58:15.231975 |
| Client | root |
| IP | Warning: Undefined array key "request_ip" in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 13 Deprecated: htmlspecialchars(): Passing null to parameter #1 ($string) of type string is deprecated in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 13 |
| Modell | claude-sonnet-4-20250514 |
| Status | completed |
| Tokens | 2,537 (Input: 0, Output: 0) |
| Dauer | 104 ms |
| Request-Zeit | Warning: Undefined array key "request_timestamp" in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 18 |
| Response-Zeit | - |
Warning: Undefined array key "error_message" in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 22
Request
{
"event": "PreToolUse",
"tool_name": "Write",
"tool_input": {
"file_path": "\/opt\/scripts\/pipeline\/protokoll.py",
"content": "\"\"\"\nKI-Protokoll Integration for Pipeline\nLogs all LLM calls to ki_dev.protokoll for tracking and cost analysis.\n\"\"\"\n\nimport time\nfrom datetime import datetime\n\nimport mysql.connector\n\nfrom config import DB_PROTOKOLL_CONFIG\n\n\nclass Protokoll:\n \"\"\"Logger for LLM calls to ki_dev.protokoll.\"\"\"\n\n def __init__(self, client_name=\"pipeline\"):\n self.client_name = client_name\n self.conn = None\n\n def connect(self):\n \"\"\"Connect to protokoll database.\"\"\"\n if not self.conn or not self.conn.is_connected():\n self.conn = mysql.connector.connect(**DB_PROTOKOLL_CONFIG)\n\n def disconnect(self):\n \"\"\"Disconnect from database.\"\"\"\n if self.conn and self.conn.is_connected():\n self.conn.close()\n self.conn = None\n\n def log_llm_call(\n self,\n request: str,\n response: str = None,\n model_name: str = None,\n tokens_input: int = 0,\n tokens_output: int = 0,\n duration_ms: int = 0,\n status: str = \"completed\",\n error_message: str = None,\n ):\n \"\"\"\n Log an LLM call to ki_dev.protokoll.\n\n Args:\n request: The prompt\/request sent to the LLM\n response: The LLM response (truncated if too long)\n model_name: Name of the model used\n tokens_input: Input token count\n tokens_output: Output token count\n duration_ms: Duration in milliseconds\n status: 'pending', 'completed', or 'error'\n error_message: Error message if status is 'error'\n \"\"\"\n self.connect()\n\n try:\n cursor = self.conn.cursor()\n now = datetime.now()\n\n # Truncate response if too long (TEXT limit ~65KB)\n if response and len(response) > 60000:\n response = response[:60000] + \"...[truncated]\"\n\n cursor.execute(\n \"\"\"INSERT INTO protokoll\n (request_ip, client_name, request, request_timestamp,\n response, response_timestamp, duration_ms,\n tokens_input, tokens_output, tokens_total,\n model_name, status, error_message)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\",\n (\n \"127.0.0.1\", # Pipeline runs locally\n self.client_name,\n request[:10000] if request else \"\", # Truncate request too\n now,\n response,\n now if response else None,\n duration_ms,\n tokens_input,\n tokens_output,\n tokens_input + tokens_output,\n model_name,\n status,\n error_message,\n ),\n )\n self.conn.commit()\n cursor.close()\n return cursor.lastrowid\n\n except Exception as e:\n print(f\"Protokoll logging failed: {e}\")\n return None\n\n\nclass LLMCallContext:\n \"\"\"Context manager for timing and logging LLM calls.\"\"\"\n\n def __init__(self, protokoll: Protokoll, request: str, model_name: str):\n self.protokoll = protokoll\n self.request = request\n self.model_name = model_name\n self.start_time = None\n self.response = None\n self.tokens_input = 0\n self.tokens_output = 0\n self.error = None\n\n def __enter__(self):\n self.start_time = time.time()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n duration_ms = int((time.time() - self.start_time) * 1000)\n\n if exc_type is not None:\n self.protokoll.log_llm_call(\n request=self.request,\n response=None,\n model_name=self.model_name,\n duration_ms=duration_ms,\n status=\"error\",\n error_message=str(exc_val),\n )\n else:\n self.protokoll.log_llm_call(\n request=self.request,\n response=self.response,\n model_name=self.model_name,\n tokens_input=self.tokens_input,\n tokens_output=self.tokens_output,\n duration_ms=duration_ms,\n status=\"completed\",\n )\n\n return False # Don't suppress exceptions\n\n def set_response(self, response: str, tokens_input: int = 0, tokens_output: int = 0):\n \"\"\"Set response data for logging.\"\"\"\n self.response = response\n self.tokens_input = tokens_input\n self.tokens_output = tokens_output\n\n\n# Global instance for pipeline\nprotokoll = Protokoll(\"pipeline\")\n"
}
}
Response
{
"tool_response": {
"type": "create",
"filePath": "\/opt\/scripts\/pipeline\/protokoll.py",
"content": "\"\"\"\nKI-Protokoll Integration for Pipeline\nLogs all LLM calls to ki_dev.protokoll for tracking and cost analysis.\n\"\"\"\n\nimport time\nfrom datetime import datetime\n\nimport mysql.connector\n\nfrom config import DB_PROTOKOLL_CONFIG\n\n\nclass Protokoll:\n \"\"\"Logger for LLM calls to ki_dev.protokoll.\"\"\"\n\n def __init__(self, client_name=\"pipeline\"):\n self.client_name = client_name\n self.conn = None\n\n def connect(self):\n \"\"\"Connect to protokoll database.\"\"\"\n if not self.conn or not self.conn.is_connected():\n self.conn = mysql.connector.connect(**DB_PROTOKOLL_CONFIG)\n\n def disconnect(self):\n \"\"\"Disconnect from database.\"\"\"\n if self.conn and self.conn.is_connected():\n self.conn.close()\n self.conn = None\n\n def log_llm_call(\n self,\n request: str,\n response: str = None,\n model_name: str = None,\n tokens_input: int = 0,\n tokens_output: int = 0,\n duration_ms: int = 0,\n status: str = \"completed\",\n error_message: str = None,\n ):\n \"\"\"\n Log an LLM call to ki_dev.protokoll.\n\n Args:\n request: The prompt\/request sent to the LLM\n response: The LLM response (truncated if too long)\n model_name: Name of the model used\n tokens_input: Input token count\n tokens_output: Output token count\n duration_ms: Duration in milliseconds\n status: 'pending', 'completed', or 'error'\n error_message: Error message if status is 'error'\n \"\"\"\n self.connect()\n\n try:\n cursor = self.conn.cursor()\n now = datetime.now()\n\n # Truncate response if too long (TEXT limit ~65KB)\n if response and len(response) > 60000:\n response = response[:60000] + \"...[truncated]\"\n\n cursor.execute(\n \"\"\"INSERT INTO protokoll\n (request_ip, client_name, request, request_timestamp,\n response, response_timestamp, duration_ms,\n tokens_input, tokens_output, tokens_total,\n model_name, status, error_message)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\",\n (\n \"127.0.0.1\", # Pipeline runs locally\n self.client_name,\n request[:10000] if request else \"\", # Truncate request too\n now,\n response,\n now if response else None,\n duration_ms,\n tokens_input,\n tokens_output,\n tokens_input + tokens_output,\n model_name,\n status,\n error_message,\n ),\n )\n self.conn.commit()\n cursor.close()\n return cursor.lastrowid\n\n except Exception as e:\n print(f\"Protokoll logging failed: {e}\")\n return None\n\n\nclass LLMCallContext:\n \"\"\"Context manager for timing and logging LLM calls.\"\"\"\n\n def __init__(self, protokoll: Protokoll, request: str, model_name: str):\n self.protokoll = protokoll\n self.request = request\n self.model_name = model_name\n self.start_time = None\n self.response = None\n self.tokens_input = 0\n self.tokens_output = 0\n self.error = None\n\n def __enter__(self):\n self.start_time = time.time()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n duration_ms = int((time.time() - self.start_time) * 1000)\n\n if exc_type is not None:\n self.protokoll.log_llm_call(\n request=self.request,\n response=None,\n model_name=self.model_name,\n duration_ms=duration_ms,\n status=\"error\",\n error_message=str(exc_val),\n )\n else:\n self.protokoll.log_llm_call(\n request=self.request,\n response=self.response,\n model_name=self.model_name,\n tokens_input=self.tokens_input,\n tokens_output=self.tokens_output,\n duration_ms=duration_ms,\n status=\"completed\",\n )\n\n return False # Don't suppress exceptions\n\n def set_response(self, response: str, tokens_input: int = 0, tokens_output: int = 0):\n \"\"\"Set response data for logging.\"\"\"\n self.response = response\n self.tokens_input = tokens_input\n self.tokens_output = tokens_output\n\n\n# Global instance for pipeline\nprotokoll = Protokoll(\"pipeline\")\n",
"structuredPatch": [],
"originalFile": null
}
}