Protokoll #8189

ID8189
Zeitstempel2025-12-23 09:15:16.622328
Clientroot
IP145.224.96.190
Modellclaude-sonnet-4-20250514
Statuscompleted
Tokens824 (Input: 44, Output: 780)
Dauer101 ms
Request-Zeit2025-12-23 09:15:16.622328
Response-Zeit2025-12-23 09:15:16.722830

Request

{
    "event": "PreToolUse",
    "tool_name": "Read",
    "tool_input": {
        "file_path": "\/var\/www\/dev.campus.systemische-tools.de\/src\/Infrastructure\/Persistence\/DokumentationRepository.php"
    }
}

Response

{
    "tool_response": {
        "type": "text",
        "file": {
            "filePath": "\/var\/www\/dev.campus.systemische-tools.de\/src\/UseCases\/Docs\/DocumentationChatUseCase.php",
            "content": "<?php\n\ndeclare(strict_types=1);\n\nnamespace UseCases\\Docs;\n\n\/\/ @responsibility: RAG-basierter Chat über Dokumentation\n\nuse Infrastructure\\AI\\OllamaService;\nuse Infrastructure\\Docs\\ChunkSyncService;\n\nclass DocumentationChatUseCase\n{\n    public function __construct(\n        private ChunkSyncService $syncService,\n        private OllamaService $ollamaService\n    ) {\n    }\n\n    \/**\n     * Execute documentation chat.\n     *\n     * @param string $question User question\n     * @param string $model LLM model to use\n     * @param int $limit Number of context chunks\n     * @return array{answer: string, sources: array<int, array{id: int, path: string, title: string, score: float}>}\n     *\/\n    public function execute(string $question, string $model = 'mistral', int $limit = 5): array\n    {\n        \/\/ Get relevant chunks via semantic search\n        $chunks = $this->syncService->search($question, $limit);\n\n        if (empty($chunks)) {\n            return [\n                'answer' => 'Leider konnte ich keine relevanten Informationen in der Dokumentation finden.',\n                'sources' => [],\n            ];\n        }\n\n        \/\/ Build context from chunks\n        $context = $this->buildContext($chunks);\n\n        \/\/ Generate answer using LLM\n        $prompt = $this->buildPrompt($question, $context);\n        $answer = $this->ollamaService->generate($prompt, $model, 0.3);\n\n        \/\/ Map sources\n        $sources = array_map(static fn (array $chunk): array => [\n            'id' => $chunk['doc_id'],\n            'path' => $chunk['path'],\n            'title' => $chunk['title'],\n            'score' => round($chunk['score'], 3),\n        ], $chunks);\n\n        return [\n            'answer' => $answer,\n            'sources' => $sources,\n        ];\n    }\n\n    \/**\n     * Build context from chunks.\n     *\/\n    private function buildContext(array $chunks): string\n    {\n        $parts = [];\n\n        foreach ($chunks as $chunk) {\n            $part = \"## {$chunk['title']}\\n\";\n            $part .= \"Pfad: {$chunk['path']}\\n\";\n            $part .= $chunk['content'];\n            $parts[] = $part;\n        }\n\n        return implode(\"\\n\\n---\\n\\n\", $parts);\n    }\n\n    \/**\n     * Build chat prompt.\n     *\/\n    private function buildPrompt(string $question, string $context): string\n    {\n        return <<<PROMPT\n            Du bist ein Dokumentations-Assistent. Beantworte die Frage basierend auf dem bereitgestellten Kontext.\n\n            KONTEXT:\n            {$context}\n\n            FRAGE:\n            {$question}\n\n            ANLEITUNG:\n            - Antworte auf Deutsch\n            - Sei präzise und hilfreich\n            - Wenn der Kontext die Frage nicht beantwortet, sage das ehrlich\n            - Verweise auf die relevanten Abschnitte der Dokumentation\n            PROMPT;\n    }\n}\n",
            "numLines": 100,
            "startLine": 1,
            "totalLines": 100
        }
    }
}
← Vorheriger Zur Liste Nächster →