Protokoll #27122

ID27122
Zeitstempel2025-12-28 02:28:47.038397
Clientroot
IP145.224.72.140
Modellclaude-sonnet-4-20250514
Statuscompleted
Tokens3,480 (Input: 284, Output: 3,196)
Dauer86 ms
Request-Zeit2025-12-28 02:28:47.038397
Response-Zeit2025-12-28 02:28:47.123913

Request

{
    "event": "PreToolUse",
    "tool_name": "Edit",
    "tool_input": {
        "file_path": "\/var\/www\/dev.campus.systemische-tools.de\/src\/Infrastructure\/AI\/ChatService.php",
        "old_string": "    \/**\n     * Builds a context string from search results.\n     *\n     * Concatenates the content from multiple search results into a single\n     * context string, respecting a maximum character limit. Each chunk is\n     * labeled with its source document title.\n     *\n     * Optionally prepends a semantic summary with extracted entities and taxonomy.\n     *\n     * @param array<int, array{id: int|string, score: float, payload: array<string, mixed>}> $searchResults  Vector search results\n     * @param int                                                                             $maxTokens      Maximum tokens to include (default: 3000)\n     * @param string                                                                          $semanticSummary Optional semantic summary to prepend\n     *\n     * @return string The built context string\n     *\/",
        "new_string": "    \/** Builds context string from search results (respects maxTokens limit). *\/"
    }
}

Response

{
    "tool_response": {
        "filePath": "\/var\/www\/dev.campus.systemische-tools.de\/src\/Infrastructure\/AI\/ChatService.php",
        "oldString": "    \/**\n     * Builds a context string from search results.\n     *\n     * Concatenates the content from multiple search results into a single\n     * context string, respecting a maximum character limit. Each chunk is\n     * labeled with its source document title.\n     *\n     * Optionally prepends a semantic summary with extracted entities and taxonomy.\n     *\n     * @param array<int, array{id: int|string, score: float, payload: array<string, mixed>}> $searchResults  Vector search results\n     * @param int                                                                             $maxTokens      Maximum tokens to include (default: 3000)\n     * @param string                                                                          $semanticSummary Optional semantic summary to prepend\n     *\n     * @return string The built context string\n     *\/",
        "newString": "    \/** Builds context string from search results (respects maxTokens limit). *\/",
        "originalFile": "<?php\n\ndeclare(strict_types=1);\n\nnamespace Infrastructure\\AI;\n\n\/\/ @responsibility: RAG-Pipeline für KI-Chat (Embedding, Suche, Antwort-Generierung)\n\nuse RuntimeException;\n\nfinal readonly class ChatService\n{\n    public function __construct(\n        private OllamaService $ollama,\n        private QdrantService $qdrant,\n        private ClaudeService $claude,\n        private ScoringService $scoring,\n        private ?SemanticEnrichmentService $semantic = null\n    ) {\n    }\n\n    \/**\n     * Executes a complete RAG chat pipeline: Embedding → Search → Context → LLM → Response\n     * If no collections selected, skips RAG steps (direct LLM query).\n     *\n     * @param array<string> $collections Qdrant collections to search (empty = no RAG)\n     * @return array{question: string, answer: string, sources: array, model: string, usage?: array, chunks_used: int}\n     * @throws RuntimeException If embedding\/search\/LLM fails\n     *\/\n    public function chat(\n        string $question,\n        string $model = 'claude-opus-4-5-20251101',\n        array $collections = [],\n        int $limit = 5,\n        ?string $stylePrompt = null,\n        ?string $customSystemPrompt = null,\n        float $temperature = 0.7,\n        int $maxTokens = 4096\n    ): array {\n        $searchResults = [];\n        $context = '';\n\n        \/\/ Only perform RAG if collections are selected\n        if ($collections !== []) {\n            \/\/ Step 1: Generate embedding for the question\n            try {\n                $queryEmbedding = $this->ollama->getEmbedding($question);\n            } catch (RuntimeException $e) {\n                throw new RuntimeException(\n                    'Embedding generation failed: ' . $e->getMessage(),\n                    0,\n                    $e\n                );\n            }\n\n            if ($queryEmbedding === []) {\n                throw new RuntimeException('Embedding generation returned empty vector');\n            }\n\n            \/\/ Step 2: Search across all selected collections\n            try {\n                $searchResults = $this->searchMultipleCollections($queryEmbedding, $collections, $limit);\n            } catch (RuntimeException $e) {\n                throw new RuntimeException(\n                    'Vector search failed: ' . $e->getMessage(),\n                    0,\n                    $e\n                );\n            }\n\n            \/\/ Step 2b: Semantic enrichment (graceful degradation - works without)\n            $semanticSummary = '';\n            if ($searchResults !== [] && $this->semantic !== null) {\n                $searchResults = $this->semantic->enrichSearchResults($searchResults);\n                $semanticSummary = $this->semantic->buildSemanticSummary($searchResults);\n            }\n\n            \/\/ Step 3: Build context from search results (if any found)\n            if ($searchResults !== []) {\n                $context = $this->buildContext($searchResults, 3000, $semanticSummary);\n            }\n        }\n\n        \/\/ Step 4: Parse model string and generate answer\n        $isOllama = str_starts_with($model, 'ollama:');\n        $isClaude = str_starts_with($model, 'claude-');\n        $hasContext = $context !== '';\n\n        if ($isClaude) {\n            try {\n                \/\/ Build prompt: RAG with context or direct question\n                if ($hasContext) {\n                    $userPrompt = $this->claude->buildRagPrompt($question, $context);\n                } else {\n                    $userPrompt = $question;\n                }\n\n                \/\/ Build system prompt hierarchy: Default -> Custom -> Style\n                if ($customSystemPrompt !== null && $customSystemPrompt !== '') {\n                    $systemPrompt = $customSystemPrompt;\n                } else {\n                    $systemPrompt = $hasContext\n                        ? $this->claude->getDefaultSystemPrompt()\n                        : 'Du bist ein hilfreicher Assistent. Antworte auf Deutsch, präzise und hilfreich.';\n                }\n\n                \/\/ Append style prompt from author profile if provided\n                if ($stylePrompt !== null && $stylePrompt !== '') {\n                    $systemPrompt .= \"\\n\\n\" . $stylePrompt;\n                }\n\n                $llmResponse = $this->claude->ask($userPrompt, $systemPrompt, $model, $maxTokens, $temperature);\n\n                $answer = $llmResponse['text'];\n                $usage = $llmResponse['usage'];\n            } catch (RuntimeException $e) {\n                throw new RuntimeException(\n                    'Claude API request failed: ' . $e->getMessage(),\n                    0,\n                    $e\n                );\n            }\n        } elseif ($isOllama) {\n            try {\n                \/\/ Extract actual model name (remove \"ollama:\" prefix)\n                $ollamaModel = substr($model, 7);\n\n                \/\/ Build instruction from custom prompt and style\n                $instructions = [];\n                if ($customSystemPrompt !== null && $customSystemPrompt !== '') {\n                    $instructions[] = $customSystemPrompt;\n                }\n                if ($stylePrompt !== null && $stylePrompt !== '') {\n                    $instructions[] = $stylePrompt;\n                }\n                $instructionBlock = $instructions !== [] ? implode(\"\\n\\n\", $instructions) . \"\\n\\n\" : '';\n\n                \/\/ Build prompt: RAG with context or direct question\n                if ($hasContext) {\n                    $userPrompt = sprintf(\n                        \"%sKontext aus den Dokumenten:\\n\\n%s\\n\\n---\\n\\nFrage: %s\",\n                        $instructionBlock,\n                        $context,\n                        $question\n                    );\n                } else {\n                    $userPrompt = $instructionBlock . $question;\n                }\n\n                $answer = $this->ollama->generate($userPrompt, $ollamaModel, $temperature);\n                $usage = null;\n            } catch (RuntimeException $e) {\n                throw new RuntimeException(\n                    'Ollama generation failed: ' . $e->getMessage(),\n                    0,\n                    $e\n                );\n            }\n        } else {\n            throw new RuntimeException(\n                sprintf('Unknown model \"%s\". Use claude-* or ollama:* format.', $model)\n            );\n        }\n\n        \/\/ Step 5: Extract source information\n        $sources = $this->extractSources($searchResults);\n\n        \/\/ Step 6: Assemble response\n        $response = [\n            'question' => $question,\n            'answer' => $answer,\n            'sources' => $sources,\n            'model' => $model,\n            'chunks_used' => count($searchResults),\n        ];\n\n        if ($usage !== null) {\n            $response['usage'] = $usage;\n        }\n\n        return $response;\n    }\n\n    \/**\n     * Builds a context string from search results.\n     *\n     * Concatenates the content from multiple search results into a single\n     * context string, respecting a maximum character limit. Each chunk is\n     * labeled with its source document title.\n     *\n     * Optionally prepends a semantic summary with extracted entities and taxonomy.\n     *\n     * @param array<int, array{id: int|string, score: float, payload: array<string, mixed>}> $searchResults  Vector search results\n     * @param int                                                                             $maxTokens      Maximum tokens to include (default: 3000)\n     * @param string                                                                          $semanticSummary Optional semantic summary to prepend\n     *\n     * @return string The built context string\n     *\/\n    private function buildContext(array $searchResults, int $maxTokens = 3000, string $semanticSummary = ''): string\n    {\n        $contextParts = [];\n        $totalChars = 0;\n        $maxChars = $maxTokens * 4; \/\/ Approximate: 1 token ~ 4 characters\n\n        \/\/ Prepend semantic summary if available\n        if ($semanticSummary !== '') {\n            $contextParts[] = \"[Semantischer Kontext]\\n\" . $semanticSummary;\n            $totalChars += strlen($semanticSummary);\n        }\n\n        foreach ($searchResults as $index => $result) {\n            $payload = $result['payload'];\n            \/\/ Support both payload schemas: documents + dokumentation_chunks\n            $content = (string) ($payload['content'] ?? $payload['content_preview'] ?? '');\n            $docTitle = (string) ($payload['document_title'] ?? $payload['title'] ?? 'Unbekannt');\n\n            \/\/ Check if adding this chunk would exceed the limit\n            if ($totalChars + strlen($content) > $maxChars) {\n                break;\n            }\n\n            \/\/ Build chunk header with optional entity info\n            $entities = $payload['entities'] ?? [];\n            $entityInfo = '';\n            if ($entities !== []) {\n                $entityNames = array_slice(array_column($entities, 'name'), 0, 3);\n                $entityInfo = ' | Enthält: ' . implode(', ', $entityNames);\n            }\n\n            $contextParts[] = sprintf('[Quelle %d: %s%s]%s%s', $index + 1, $docTitle, $entityInfo, \"\\n\", $content);\n            $totalChars += strlen($content);\n        }\n\n        return implode(\"\\n\\n---\\n\\n\", $contextParts);\n    }\n\n    \/**\n     * Searches across multiple Qdrant collections and merges results.\n     *\n     * Queries each collection with the same embedding vector, then merges\n     * and sorts results by score. Returns the top N results across all collections.\n     *\n     * @param array<int, float> $embedding   The query embedding vector\n     * @param array<string>     $collections Collections to search\n     * @param int               $limit       Maximum total results to return\n     *\n     * @return array<int, array{id: int|string, score: float, payload: array<string, mixed>}> Merged search results\n     *\/\n    private function searchMultipleCollections(array $embedding, array $collections, int $limit): array\n    {\n        $allResults = [];\n\n        foreach... [TRUNCATED-43f6b6ea715e5fbc]",
        "structuredPatch": [
            {
                "oldStart": 181,
                "oldLines": 21,
                "newStart": 181,
                "newLines": 7,
                "lines": [
                    "         return $response;",
                    "     }",
                    " ",
                    "-    \/**",
                    "-     * Builds a context string from search results.",
                    "-     *",
                    "-     * Concatenates the content from multiple search results into a single",
                    "-     * context string, respecting a maximum character limit. Each chunk is",
                    "-     * labeled with its source document title.",
                    "-     *",
                    "-     * Optionally prepends a semantic summary with extracted entities and taxonomy.",
                    "-     *",
                    "-     * @param array<int, array{id: int|string, score: float, payload: array<string, mixed>}> $searchResults  Vector search results",
                    "-     * @param int                                                                             $maxTokens      Maximum tokens to include (default: 3000)",
                    "-     * @param string                                                                          $semanticSummary Optional semantic summary to prepend",
                    "-     *",
                    "-     * @return string The built context string",
                    "-     *\/",
                    "+    \/** Builds context string from search results (respects maxTokens limit). *\/",
                    "     private function buildContext(array $searchResults, int $maxTokens = 3000, string $semanticSummary = ''): string",
                    "     {",
                    "         $contextParts = [];"
                ]
            }
        ],
        "userModified": false,
        "replaceAll": false
    }
}
← Vorheriger Zur Liste Nächster →