{
"event": "PreToolUse",
"tool_name": "Read",
"tool_input": {
"file_path": "\/var\/www\/scripts\/pipeline\/chat.py",
"offset": 155,
"limit": 15
}
}
{
"tool_response": {
"type": "text",
"file": {
"filePath": "\/var\/www\/scripts\/pipeline\/chat.py",
"content": "#!\/usr\/bin\/env python3\n\"\"\"\nRAG-Chat für KI-System\nSemantische Suche + LLM-Antwort mit Quellenangabe.\n\nUsage:\n python chat.py \"Deine Frage hier\"\n python chat.py # Interaktiver Modus\n\"\"\"\n\nimport sys\n\nimport requests\n\nfrom config import ANTHROPIC_API_KEY, ANTHROPIC_MODEL, EMBEDDING_MODEL, OLLAMA_HOST, QDRANT_HOST, QDRANT_PORT\nfrom constants import OLLAMA_TIMEOUT\n\n\ndef get_embedding(text):\n \"\"\"Get embedding from Ollama.\"\"\"\n response = requests.post(\n f\"{OLLAMA_HOST}\/api\/embeddings\", json={\"model\": EMBEDDING_MODEL, \"prompt\": text}, timeout=OLLAMA_TIMEOUT\n )\n response.raise_for_status()\n return response.json().get(\"embedding\")",
"numLines": 25,
"startLine": 1,
"totalLines": 222
}
}
}