Protokoll #15229

ID15229
Zeitstempel2025-12-25 15:46:59.970911
Clientroot
IP145.224.72.140
Modellclaude-sonnet-4-20250514
Statuscompleted
Tokens1,985 (Input: 990, Output: 995)
Dauer96 ms
Request-Zeit2025-12-25 15:46:59.970911
Response-Zeit2025-12-25 15:47:00.066420

Request

{
    "event": "PreToolUse",
    "tool_name": "Write",
    "tool_input": {
        "file_path": "\/var\/www\/scripts\/pipeline\/knowledge\/llm_service.py",
        "content": "\"\"\"LLM-Service für Wissensextraktion.\"\"\"\nimport json\nimport re\nimport time\n\nimport requests\n\nimport sys\nsys.path.insert(0, \"\/var\/www\/scripts\/pipeline\")\n\nfrom config import ANTHROPIC_API_KEY, OLLAMA_HOST\nfrom db import db\n\nfrom .models import ModelConfig, DEFAULT_MODELS\n\n\nclass LLMService:\n    \"\"\"Service für LLM-Aufrufe.\"\"\"\n\n    def __init__(self, model_config: ModelConfig | None = None):\n        \"\"\"Initialisiere Service mit Modellkonfiguration.\"\"\"\n        self.model = model_config or DEFAULT_MODELS[\"ollama\"]\n        self.anthropic_client = None\n\n        if self.model.provider == \"anthropic\":\n            self._init_anthropic()\n\n    def _init_anthropic(self):\n        \"\"\"Initialisiere Anthropic Client.\"\"\"\n        try:\n            import anthropic\n\n            if ANTHROPIC_API_KEY:\n                self.anthropic_client = anthropic.Anthropic(api_key=ANTHROPIC_API_KEY)\n        except ImportError:\n            db.log(\"WARNING\", \"Anthropic SDK nicht installiert, fallback zu Ollama\")\n            self.model = DEFAULT_MODELS[\"ollama\"]\n\n    def call_llm(self, prompt: str, json_output: bool = True) -> str:\n        \"\"\"Rufe LLM auf und gib Antwort zurück.\"\"\"\n        start_time = time.time()\n\n        try:\n            if self.model.provider == \"anthropic\" and self.anthropic_client:\n                response = self.anthropic_client.messages.create(\n                    model=self.model.model_name,\n                    max_tokens=self.model.max_tokens,\n                    temperature=self.model.temperature,\n                    messages=[{\"role\": \"user\", \"content\": prompt}],\n                )\n                result = response.content[0].text\n                tokens_in = response.usage.input_tokens\n                tokens_out = response.usage.output_tokens\n            else:\n                # Ollama\n                payload = {\n                    \"model\": self.model.model_name,\n                    \"prompt\": prompt,\n                    \"stream\": False,\n                    \"options\": {\"temperature\": self.model.temperature},\n                }\n                if json_output:\n                    payload[\"format\"] = \"json\"\n\n                resp = requests.post(f\"{OLLAMA_HOST}\/api\/generate\", json=payload, timeout=600)\n                resp.raise_for_status()\n                data = resp.json()\n                result = data.get(\"response\", \"\")\n                tokens_in = data.get(\"prompt_eval_count\", 0)\n                tokens_out = data.get(\"eval_count\", 0)\n\n            duration_ms = int((time.time() - start_time) * 1000)\n\n            # Protokolliere LLM-Aufruf\n            db.log_to_protokoll(\n                client_name=\"pipeline-knowledge\",\n                request=prompt[:500],\n                response=result[:500],\n                model_name=f\"{self.model.provider}:{self.model.model_name}\",\n                tokens_input=tokens_in,\n                tokens_output=tokens_out,\n                duration_ms=duration_ms,\n                status=\"completed\",\n            )\n\n            return result\n\n        except Exception as e:\n            db.log(\"ERROR\", f\"LLM-Aufruf fehlgeschlagen: {e}\")\n            return \"{}\"\n\n    def parse_json(self, text: str) -> dict:\n        \"\"\"Extrahiere JSON aus LLM-Antwort.\"\"\"\n        try:\n            # Versuche direkt zu parsen\n            return json.loads(text)\n        except json.JSONDecodeError:\n            # Suche nach JSON-Block\n            match = re.search(r\"\\{[\\s\\S]*\\}\", text)\n            if match:\n                try:\n                    return json.loads(match.group())\n                except json.JSONDecodeError:\n                    pass\n        return {}\n"
    }
}

Response

{
    "tool_response": {
        "type": "create",
        "filePath": "\/var\/www\/scripts\/pipeline\/knowledge\/llm_service.py",
        "content": "\"\"\"LLM-Service für Wissensextraktion.\"\"\"\nimport json\nimport re\nimport time\n\nimport requests\n\nimport sys\nsys.path.insert(0, \"\/var\/www\/scripts\/pipeline\")\n\nfrom config import ANTHROPIC_API_KEY, OLLAMA_HOST\nfrom db import db\n\nfrom .models import ModelConfig, DEFAULT_MODELS\n\n\nclass LLMService:\n    \"\"\"Service für LLM-Aufrufe.\"\"\"\n\n    def __init__(self, model_config: ModelConfig | None = None):\n        \"\"\"Initialisiere Service mit Modellkonfiguration.\"\"\"\n        self.model = model_config or DEFAULT_MODELS[\"ollama\"]\n        self.anthropic_client = None\n\n        if self.model.provider == \"anthropic\":\n            self._init_anthropic()\n\n    def _init_anthropic(self):\n        \"\"\"Initialisiere Anthropic Client.\"\"\"\n        try:\n            import anthropic\n\n            if ANTHROPIC_API_KEY:\n                self.anthropic_client = anthropic.Anthropic(api_key=ANTHROPIC_API_KEY)\n        except ImportError:\n            db.log(\"WARNING\", \"Anthropic SDK nicht installiert, fallback zu Ollama\")\n            self.model = DEFAULT_MODELS[\"ollama\"]\n\n    def call_llm(self, prompt: str, json_output: bool = True) -> str:\n        \"\"\"Rufe LLM auf und gib Antwort zurück.\"\"\"\n        start_time = time.time()\n\n        try:\n            if self.model.provider == \"anthropic\" and self.anthropic_client:\n                response = self.anthropic_client.messages.create(\n                    model=self.model.model_name,\n                    max_tokens=self.model.max_tokens,\n                    temperature=self.model.temperature,\n                    messages=[{\"role\": \"user\", \"content\": prompt}],\n                )\n                result = response.content[0].text\n                tokens_in = response.usage.input_tokens\n                tokens_out = response.usage.output_tokens\n            else:\n                # Ollama\n                payload = {\n                    \"model\": self.model.model_name,\n                    \"prompt\": prompt,\n                    \"stream\": False,\n                    \"options\": {\"temperature\": self.model.temperature},\n                }\n                if json_output:\n                    payload[\"format\"] = \"json\"\n\n                resp = requests.post(f\"{OLLAMA_HOST}\/api\/generate\", json=payload, timeout=600)\n                resp.raise_for_status()\n                data = resp.json()\n                result = data.get(\"response\", \"\")\n                tokens_in = data.get(\"prompt_eval_count\", 0)\n                tokens_out = data.get(\"eval_count\", 0)\n\n            duration_ms = int((time.time() - start_time) * 1000)\n\n            # Protokolliere LLM-Aufruf\n            db.log_to_protokoll(\n                client_name=\"pipeline-knowledge\",\n                request=prompt[:500],\n                response=result[:500],\n                model_name=f\"{self.model.provider}:{self.model.model_name}\",\n                tokens_input=tokens_in,\n                tokens_output=tokens_out,\n                duration_ms=duration_ms,\n                status=\"completed\",\n            )\n\n            return result\n\n        except Exception as e:\n            db.log(\"ERROR\", f\"LLM-Aufruf fehlgeschlagen: {e}\")\n            return \"{}\"\n\n    def parse_json(self, text: str) -> dict:\n        \"\"\"Extrahiere JSON aus LLM-Antwort.\"\"\"\n        try:\n            # Versuche direkt zu parsen\n            return json.loads(text)\n        except json.JSONDecodeError:\n            # Suche nach JSON-Block\n            match = re.search(r\"\\{[\\s\\S]*\\}\", text)\n            if match:\n                try:\n                    return json.loads(match.group())\n                except json.JSONDecodeError:\n                    pass\n        return {}\n",
        "structuredPatch": [],
        "originalFile": null
    }
}
← Vorheriger Zur Liste Nächster →