{
"event": "PreToolUse",
"tool_name": "Write",
"tool_input": {
"file_path": "\/var\/www\/scripts\/pipeline\/semantic_chunk\/ollama_service.py",
"content": "\"\"\"\nOllama API Service for LLM communication.\n\"\"\"\n\nimport json\nimport re\nimport sys\n\nimport requests\n\nsys.path.insert(0, \"\/var\/www\/scripts\/pipeline\")\n\nfrom config import OLLAMA_HOST\nfrom db import db\n\nANALYSIS_MODEL = \"gemma3:27b-it-qat\"\n\n\nclass OllamaService:\n \"\"\"Ollama API Wrapper - Single Responsibility: LLM Kommunikation.\"\"\"\n\n def __init__(self, host: str = OLLAMA_HOST, model: str = ANALYSIS_MODEL):\n self.host = host\n self.model = model\n\n def generate(self, prompt: str, json_format: bool = True) -> dict | None:\n \"\"\"Generiere Antwort von Ollama.\"\"\"\n try:\n payload = {\n \"model\": self.model,\n \"prompt\": prompt,\n \"stream\": False,\n \"options\": {\"temperature\": 0.3, \"num_predict\": 1000},\n }\n if json_format:\n payload[\"format\"] = \"json\"\n\n response = requests.post(f\"{self.host}\/api\/generate\", json=payload, timeout=120)\n response.raise_for_status()\n\n text = response.json().get(\"response\", \"{}\")\n if json_format:\n return self._parse_json(text)\n return {\"text\": text}\n except Exception as e:\n db.log(\"ERROR\", f\"Ollama error: {e}\")\n return None\n\n def _parse_json(self, text: str) -> dict | None:\n \"\"\"Parse JSON aus Antwort.\"\"\"\n try:\n return json.loads(text)\n except json.JSONDecodeError:\n match = re.search(r\"\\{[\\s\\S]*\\}\", text)\n if match:\n try:\n return json.loads(match.group())\n except json.JSONDecodeError:\n pass\n return None\n"
}
}
{
"tool_response": {
"type": "create",
"filePath": "\/var\/www\/scripts\/pipeline\/semantic_chunk\/ollama_service.py",
"content": "\"\"\"\nOllama API Service for LLM communication.\n\"\"\"\n\nimport json\nimport re\nimport sys\n\nimport requests\n\nsys.path.insert(0, \"\/var\/www\/scripts\/pipeline\")\n\nfrom config import OLLAMA_HOST\nfrom db import db\n\nANALYSIS_MODEL = \"gemma3:27b-it-qat\"\n\n\nclass OllamaService:\n \"\"\"Ollama API Wrapper - Single Responsibility: LLM Kommunikation.\"\"\"\n\n def __init__(self, host: str = OLLAMA_HOST, model: str = ANALYSIS_MODEL):\n self.host = host\n self.model = model\n\n def generate(self, prompt: str, json_format: bool = True) -> dict | None:\n \"\"\"Generiere Antwort von Ollama.\"\"\"\n try:\n payload = {\n \"model\": self.model,\n \"prompt\": prompt,\n \"stream\": False,\n \"options\": {\"temperature\": 0.3, \"num_predict\": 1000},\n }\n if json_format:\n payload[\"format\"] = \"json\"\n\n response = requests.post(f\"{self.host}\/api\/generate\", json=payload, timeout=120)\n response.raise_for_status()\n\n text = response.json().get(\"response\", \"{}\")\n if json_format:\n return self._parse_json(text)\n return {\"text\": text}\n except Exception as e:\n db.log(\"ERROR\", f\"Ollama error: {e}\")\n return None\n\n def _parse_json(self, text: str) -> dict | None:\n \"\"\"Parse JSON aus Antwort.\"\"\"\n try:\n return json.loads(text)\n except json.JSONDecodeError:\n match = re.search(r\"\\{[\\s\\S]*\\}\", text)\n if match:\n try:\n return json.loads(match.group())\n except json.JSONDecodeError:\n pass\n return None\n",
"structuredPatch": [],
"originalFile": null
}
}