Protokoll #1065
| ID | 1065 |
|---|---|
| Zeitstempel | 2025-12-20 18:08:26.864557 |
| Client | root |
| IP | Warning: Undefined array key "request_ip" in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 13 Deprecated: htmlspecialchars(): Passing null to parameter #1 ($string) of type string is deprecated in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 13 |
| Modell | claude-sonnet-4-20250514 |
| Status | completed |
| Tokens | 2,671 (Input: 0, Output: 0) |
| Dauer | 88 ms |
| Request-Zeit | Warning: Undefined array key "request_timestamp" in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 18 |
| Response-Zeit | - |
Warning: Undefined array key "error_message" in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 22
Request
{
"event": "PreToolUse",
"tool_name": "Read",
"tool_input": {
"file_path": "\/var\/www\/dev.campus.systemische-tools.de\/src\/Infrastructure\/AI\/ChatService.php"
}
}
Response
{
"tool_response": {
"type": "text",
"file": {
"filePath": "\/var\/www\/dev.campus.systemische-tools.de\/src\/Infrastructure\/AI\/ChatService.php",
"content": "<?php\n\ndeclare(strict_types=1);\n\nnamespace Infrastructure\\AI;\n\nuse RuntimeException;\n\n\/**\n * RAG (Retrieval-Augmented Generation) Chat Service.\n *\n * Provides a complete RAG pipeline that:\n * 1. Converts questions to embeddings using Ollama\n * 2. Searches for relevant document chunks in Qdrant\n * 3. Builds context from search results\n * 4. Generates answers using Claude or Ollama\n * 5. Returns structured responses with sources and metadata\n *\n * This service orchestrates the interaction between OllamaService,\n * QdrantService, and ClaudeService to implement a production-ready\n * RAG system for document-based question answering.\n *\n * @package Infrastructure\\AI\n * @author System Generated\n * @version 1.0.0\n *\/\nfinal readonly class ChatService\n{\n \/**\n * Constructs a new ChatService instance.\n *\n * @param OllamaService $ollama Ollama service for embeddings and optional LLM\n * @param QdrantService $qdrant Qdrant service for vector search\n * @param ClaudeService $claude Claude service for high-quality LLM responses\n *\/\n public function __construct(\n private OllamaService $ollama,\n private QdrantService $qdrant,\n private ClaudeService $claude\n ) {\n }\n\n \/**\n * Executes a complete RAG chat pipeline.\n *\n * Performs the following steps:\n * 1. Generates an embedding vector for the question\n * 2. Searches for similar documents in the vector database\n * 3. Builds context from the most relevant chunks\n * 4. Generates an answer using the specified LLM model\n * 5. Extracts source information\n * 6. Assembles a structured response\n *\n * @param string $question The user's question to answer\n * @param string $model The LLM model (claude-* or ollama:*)\n * @param string $collection The Qdrant collection to search in (default: documents)\n * @param int $limit Maximum number of document chunks to retrieve (default: 5)\n * @param string|null $stylePrompt Optional style prompt from author profile\n * @param string|null $customSystemPrompt Optional custom system prompt (replaces default if set)\n *\n * @return array{\n * question: string,\n * answer: string,\n * sources: array<int, array{title: string, score: float, content?: string}>,\n * model: string,\n * usage?: array{input_tokens: int, output_tokens: int},\n * chunks_used: int\n * } Complete chat response with answer, sources, and metadata\n *\n * @throws RuntimeException If embedding generation fails\n * @throws RuntimeException If vector search fails\n * @throws RuntimeException If no relevant documents are found\n * @throws RuntimeException If LLM request fails\n *\n * @example\n * $chat = new ChatService($ollama, $qdrant, $claude);\n * $result = $chat->chat('Was ist systemisches Coaching?', 'claude-opus-4-5-20251101', 'documents', 5);\n * \/\/ Returns: [\n * \/\/ 'question' => 'Was ist systemisches Coaching?',\n * \/\/ 'answer' => 'Systemisches Coaching ist...',\n * \/\/ 'sources' => [\n * \/\/ ['title' => 'Coaching Grundlagen', 'score' => 0.89],\n * \/\/ ['title' => 'Systemische Methoden', 'score' => 0.76]\n * \/\/ ],\n * \/\/ 'model' => 'claude-opus-4-5-20251101',\n * \/\/ 'usage' => ['input_tokens' => 234, 'output_tokens' => 567],\n * \/\/ 'chunks_used' => 5\n * \/\/ ]\n *\/\n public function chat(\n string $question,\n string $model = 'claude-opus-4-5-20251101',\n string $collection = 'documents',\n int $limit = 5,\n ?string $stylePrompt = null,\n ?string $customSystemPrompt = null\n ): array {\n \/\/ Step 1: Generate embedding for the question\n try {\n $queryEmbedding = $this->ollama->getEmbedding($question);\n } catch (RuntimeException $e) {\n throw new RuntimeException(\n 'Embedding generation failed: ' . $e->getMessage(),\n 0,\n $e\n );\n }\n\n if ($queryEmbedding === []) {\n throw new RuntimeException('Embedding generation returned empty vector');\n }\n\n \/\/ Step 2: Search for relevant document chunks\n try {\n $searchResults = $this->qdrant->search($queryEmbedding, $collection, $limit);\n } catch (RuntimeException $e) {\n throw new RuntimeException(\n 'Vector search failed: ' . $e->getMessage(),\n 0,\n $e\n );\n }\n\n if ($searchResults === []) {\n throw new RuntimeException('No relevant documents found for the question');\n }\n\n \/\/ Step 3: Build context from search results\n $context = $this->buildContext($searchResults);\n\n \/\/ Step 4: Parse model string and generate answer\n $isOllama = str_starts_with($model, 'ollama:');\n $isClaude = str_starts_with($model, 'claude-');\n\n if ($isClaude) {\n try {\n $ragPrompt = $this->claude->buildRagPrompt($question, $context);\n\n \/\/ Build system prompt hierarchy: Default -> Custom -> Style\n if ($customSystemPrompt !== null && $customSystemPrompt !== '') {\n $systemPrompt = $customSystemPrompt;\n } else {\n $systemPrompt = $this->claude->getDefaultSystemPrompt();\n }\n\n \/\/ Append style prompt from author profile if provided\n if ($stylePrompt !== null && $stylePrompt !== '') {\n $systemPrompt .= \"\\n\\n\" . $stylePrompt;\n }\n\n $llmResponse = $this->claude->ask($ragPrompt, $systemPrompt, $model);\n\n $answer = $llmResponse['text'];\n $usage = $llmResponse['usage'];\n } catch (RuntimeException $e) {\n throw new RuntimeException(\n 'Claude API request failed: ' . $e->getMessage(),\n 0,\n $e\n );\n }\n } elseif ($isOllama) {\n try {\n \/\/ Extract actual model name (remove \"ollama:\" prefix)\n $ollamaModel = substr($model, 7);\n\n \/\/ Build instruction from custom prompt and style\n $instructions = [];\n if ($customSystemPrompt !== null && $customSystemPrompt !== '') {\n $instructions[] = $customSystemPrompt;\n }\n if ($stylePrompt !== null && $stylePrompt !== '') {\n $instructions[] = $stylePrompt;\n }\n $instructionBlock = $instructions !== [] ? implode(\"\\n\\n\", $instructions) . \"\\n\\n\" : '';\n\n $ragPrompt = sprintf(\n \"%sKontext aus den Dokumenten:\\n\\n%s\\n\\n---\\n\\nFrage: %s\",\n $instructionBlock,\n $context,\n $question\n );\n $answer = $this->ollama->generate($ragPrompt, $ollamaModel);\n $usage = null;\n } catch (RuntimeException $e) {\n throw new RuntimeException(\n 'Ollama generation failed: ' . $e->getMessage(),\n 0,\n $e\n );\n }\n } else {\n throw new RuntimeException(\n sprintf('Unknown model \"%s\". Use claude-* or ollama:* format.', $model)\n );\n }\n\n \/\/ Step 5: Extract source information\n $sources = $this->extractSources($searchResults);\n\n \/\/ Step 6: Assemble response\n $response = [\n 'question' => $question,\n 'answer' => $answer,\n 'sources' => $sources,\n 'model' => $model,\n 'chunks_used' => count($searchResults),\n ];\n\n if ($usage !== null) {\n $response['usage'] = $usage;\n }\n\n return $response;\n }\n\n \/**\n * Builds a context string from search results.\n *\n * Concatenates the content from multiple search results into a single\n * context string, respecting a maximum character limit. Each chunk is\n * labeled with its source document title.\n *\n * @param array<int, array{id: int|string, score: float, payload: array<string, mixed>}> $searchResults Vector search results\n * @param int $maxTokens Maximum tokens to include (default: 3000)\n *\n * @return string The built context string\n *\/\n private function buildContext(array $searchResults, int $maxTokens = 3000): string\n {\n $contextParts = [];\n $totalChars = 0;\n $maxChars = $maxTokens * 4; \/\/ Approximate: 1 token ~ 4 characters\n\n foreach ($searchResults as $index => $result) {\n $payload = $result['payload'];\n $content = (string) ($payload['content'] ?? '');\n $docTitle = (string) ($payload['document_title'] ?? 'Unbekannt');\n\n \/\/ Check if adding this chunk would exceed the limit\n if ($totalChars + strlen($content) > $maxChars) {\n break;\n }\n\n $contextParts[] = sprintf('[Quelle %d: %s]%s%s', $index + 1, $docTitle, \"\\n\", $content);\n $totalChars += strlen($content);\n }\n\n return implode(\"\\n\\n---\\n\\n\", $contextParts);\n }\n\n \/**\n * Extracts unique source information from search results.\n *\n * Collects document titles and scores from the search results,\n * deduplicating by title to provide a clean list of sources.\n * Optionally includes content preview if available.\n *\n * @param array<int, array{id: int|string, score: float, payload: array<string, mixed>}> $searchResults Vector search results\n *\n * @return array<int, array{title: string, score: float, content?: string}> Deduplicated source information\n ... [TRUNCATED-7f98530b5698d5a8]",
"numLines": 294,
"startLine": 1,
"totalLines": 294
}
}
}