Protokoll #14133

ID14133
Zeitstempel2025-12-25 10:17:01.599685
Clientroot
IP145.224.72.140
Modellclaude-sonnet-4-20250514
Statuscompleted
Tokens1,656 (Input: 42, Output: 1,614)
Dauer85 ms
Request-Zeit2025-12-25 10:17:01.599685
Response-Zeit2025-12-25 10:17:01.684913

Request

{
    "event": "PreToolUse",
    "tool_name": "Read",
    "tool_input": {
        "file_path": "\/var\/www\/dev.campus.systemische-tools.de\/src\/Infrastructure\/AI\/ChatService.php",
        "limit": 150
    }
}

Response

{
    "tool_response": {
        "type": "text",
        "file": {
            "filePath": "\/var\/www\/dev.campus.systemische-tools.de\/src\/Infrastructure\/AI\/ChatService.php",
            "content": "<?php\n\ndeclare(strict_types=1);\n\nnamespace Infrastructure\\AI;\n\n\/\/ @responsibility: RAG-Pipeline für KI-Chat (Embedding, Suche, Antwort-Generierung)\n\nuse RuntimeException;\n\nfinal readonly class ChatService\n{\n    \/**\n     * Constructs a new ChatService instance.\n     *\n     * @param OllamaService  $ollama  Ollama service for embeddings and optional LLM\n     * @param QdrantService  $qdrant  Qdrant service for vector search\n     * @param ClaudeService  $claude  Claude service for high-quality LLM responses\n     * @param ScoringService $scoring Scoring service for weighted result ranking\n     *\/\n    public function __construct(\n        private OllamaService $ollama,\n        private QdrantService $qdrant,\n        private ClaudeService $claude,\n        private ScoringService $scoring\n    ) {\n    }\n\n    \/**\n     * Executes a complete RAG chat pipeline.\n     *\n     * Performs the following steps:\n     * 1. Generates an embedding vector for the question (if collections selected)\n     * 2. Searches for similar documents in the vector database(s)\n     * 3. Builds context from the most relevant chunks\n     * 4. Generates an answer using the specified LLM model\n     * 5. Extracts source information\n     * 6. Assembles a structured response\n     *\n     * If no collections are selected, steps 1-3 and 5 are skipped (no RAG).\n     *\n     * @param string        $question           The user's question to answer\n     * @param string        $model              The LLM model (claude-* or ollama:*)\n     * @param array<string> $collections        Qdrant collections to search (empty = no RAG)\n     * @param int           $limit              Maximum number of document chunks to retrieve (default: 5)\n     * @param string|null   $stylePrompt        Optional style prompt from author profile\n     * @param string|null   $customSystemPrompt Optional custom system prompt (replaces default if set)\n     * @param float         $temperature        Sampling temperature 0.0-1.0 (default: 0.7)\n     * @param int           $maxTokens          Maximum tokens in response (default: 4096)\n     *\n     * @return array{\n     *     question: string,\n     *     answer: string,\n     *     sources: array<int, array{title: string, score: float, content?: string}>,\n     *     model: string,\n     *     usage?: array{input_tokens: int, output_tokens: int},\n     *     chunks_used: int\n     * } Complete chat response with answer, sources, and metadata\n     *\n     * @throws RuntimeException If embedding generation fails\n     * @throws RuntimeException If vector search fails\n     * @throws RuntimeException If LLM request fails\n     *\n     * @example\n     * $chat = new ChatService($ollama, $qdrant, $claude);\n     * \/\/ With RAG (multiple collections)\n     * $result = $chat->chat('Was ist systemisches Coaching?', 'claude-opus-4-5-20251101', ['documents', 'mail'], 5);\n     * \/\/ Without RAG (no collections)\n     * $result = $chat->chat('Erkläre mir Python', 'claude-opus-4-5-20251101', [], 5);\n     *\/\n    public function chat(\n        string $question,\n        string $model = 'claude-opus-4-5-20251101',\n        array $collections = [],\n        int $limit = 5,\n        ?string $stylePrompt = null,\n        ?string $customSystemPrompt = null,\n        float $temperature = 0.7,\n        int $maxTokens = 4096\n    ): array {\n        $searchResults = [];\n        $context = '';\n\n        \/\/ Only perform RAG if collections are selected\n        if ($collections !== []) {\n            \/\/ Step 1: Generate embedding for the question\n            try {\n                $queryEmbedding = $this->ollama->getEmbedding($question);\n            } catch (RuntimeException $e) {\n                throw new RuntimeException(\n                    'Embedding generation failed: ' . $e->getMessage(),\n                    0,\n                    $e\n                );\n            }\n\n            if ($queryEmbedding === []) {\n                throw new RuntimeException('Embedding generation returned empty vector');\n            }\n\n            \/\/ Step 2: Search across all selected collections\n            try {\n                $searchResults = $this->searchMultipleCollections($queryEmbedding, $collections, $limit);\n            } catch (RuntimeException $e) {\n                throw new RuntimeException(\n                    'Vector search failed: ' . $e->getMessage(),\n                    0,\n                    $e\n                );\n            }\n\n            \/\/ Step 3: Build context from search results (if any found)\n            if ($searchResults !== []) {\n                $context = $this->buildContext($searchResults);\n            }\n        }\n\n        \/\/ Step 4: Parse model string and generate answer\n        $isOllama = str_starts_with($model, 'ollama:');\n        $isClaude = str_starts_with($model, 'claude-');\n        $hasContext = $context !== '';\n\n        if ($isClaude) {\n            try {\n                \/\/ Build prompt: RAG with context or direct question\n                if ($hasContext) {\n                    $userPrompt = $this->claude->buildRagPrompt($question, $context);\n                } else {\n                    $userPrompt = $question;\n                }\n\n                \/\/ Build system prompt hierarchy: Default -> Custom -> Style\n                if ($customSystemPrompt !== null && $customSystemPrompt !== '') {\n                    $systemPrompt = $customSystemPrompt;\n                } else {\n                    $systemPrompt = $hasContext\n                        ? $this->claude->getDefaultSystemPrompt()\n                        : 'Du bist ein hilfreicher Assistent. Antworte auf Deutsch, präzise und hilfreich.';\n                }\n\n                \/\/ Append style prompt from author profile if provided\n                if ($stylePrompt !== null && $stylePrompt !== '') {\n                    $systemPrompt .= \"\\n\\n\" . $stylePrompt;\n                }\n\n                $llmResponse = $this->claude->ask($userPrompt, $systemPrompt, $model, $maxTokens, $temperature);\n\n                $answer = $llmResponse['text'];\n                $usage = $llmResponse['usage'];\n            } catch (RuntimeException $e) {",
            "numLines": 150,
            "startLine": 1,
            "totalLines": 359
        }
    }
}
← Vorheriger Zur Liste Nächster →