ollama_service.py
- Pfad:
/var/www/scripts/pipeline/semantic_chunk/ollama_service.py
- Namespace: pipeline
- Zeilen: 61 | Größe: 1,709 Bytes
- Geändert: 2025-12-25 14:04:58 | Gescannt: 2025-12-31 10:22:15
Code Hygiene Score: 97
- Dependencies: 90 (25%)
- LOC: 100 (20%)
- Methods: 100 (20%)
- Secrets: 100 (15%)
- Classes: 100 (10%)
- Magic Numbers: 90 (10%)
Issues 1
| Zeile |
Typ |
Beschreibung |
| 33 |
magic_number |
Magic Number gefunden: 1000 |
Dependencies 6
- use json
- use re
- use sys
- use requests
- use config.OLLAMA_HOST
- use db.db
Klassen 1
-
OllamaService
class
Zeile 19
Code
"""
Ollama API Service for LLM communication.
"""
import json
import re
import sys
import requests
sys.path.insert(0, "/var/www/scripts/pipeline")
from config import OLLAMA_HOST
from db import db
ANALYSIS_MODEL = "gemma3:27b-it-qat"
class OllamaService:
"""Ollama API Wrapper - Single Responsibility: LLM Kommunikation."""
def __init__(self, host: str = OLLAMA_HOST, model: str = ANALYSIS_MODEL):
self.host = host
self.model = model
def generate(self, prompt: str, json_format: bool = True) -> dict | None:
"""Generiere Antwort von Ollama."""
try:
payload = {
"model": self.model,
"prompt": prompt,
"stream": False,
"options": {"temperature": 0.3, "num_predict": 1000},
}
if json_format:
payload["format"] = "json"
response = requests.post(f"{self.host}/api/generate", json=payload, timeout=120)
response.raise_for_status()
text = response.json().get("response", "{}")
if json_format:
return self._parse_json(text)
return {"text": text}
except Exception as e:
db.log("ERROR", f"Ollama error: {e}")
return None
def _parse_json(self, text: str) -> dict | None:
"""Parse JSON aus Antwort."""
try:
return json.loads(text)
except json.JSONDecodeError:
match = re.search(r"\{[\s\S]*\}", text)
if match:
try:
return json.loads(match.group())
except json.JSONDecodeError:
pass
return None