{
"event": "PreToolUse",
"tool_name": "Read",
"tool_input": {
"file_path": "\/var\/www\/dev.campus.systemische-tools.de\/src\/Infrastructure\/AI\/ChatService.php",
"offset": 55,
"limit": 25
}
}
{
"tool_response": {
"type": "text",
"file": {
"filePath": "\/var\/www\/dev.campus.systemische-tools.de\/src\/Infrastructure\/AI\/ChatService.php",
"content": " *\n * @param string $question The user's question to answer\n * @param string $model The LLM model (claude-* or ollama:*)\n * @param array<string> $collections Qdrant collections to search (empty = no RAG)\n * @param int $limit Maximum number of document chunks to retrieve (default: 5)\n * @param string|null $stylePrompt Optional style prompt from author profile\n * @param string|null $customSystemPrompt Optional custom system prompt (replaces default if set)\n * @param float $temperature Sampling temperature 0.0-1.0 (default: 0.7)\n * @param int $maxTokens Maximum tokens in response (default: 4096)\n *\n * @return array{\n * question: string,\n * answer: string,\n * sources: array<int, array{title: string, score: float, content?: string}>,\n * model: string,\n * usage?: array{input_tokens: int, output_tokens: int},\n * chunks_used: int\n * } Complete chat response with answer, sources, and metadata\n *\n * @throws RuntimeException If embedding generation fails\n * @throws RuntimeException If vector search fails\n * @throws RuntimeException If LLM request fails\n *\n * @example\n * $chat = new ChatService($ollama, $qdrant, $claude);",
"numLines": 25,
"startLine": 55,
"totalLines": 353
}
}
}