syncService = $syncService ?? new ChunkSyncService(); $this->ollamaService = $ollamaService ?? new OllamaService(); } /** * Execute documentation chat. * * @param string $question User question * @param string $model LLM model to use * @param int $limit Number of context chunks * @return array{answer: string, sources: array} */ public function execute(string $question, string $model = 'mistral', int $limit = 5): array { // Get relevant chunks via semantic search $chunks = $this->syncService->search($question, $limit); if (empty($chunks)) { return [ 'answer' => 'Leider konnte ich keine relevanten Informationen in der Dokumentation finden.', 'sources' => [], ]; } // Build context from chunks $context = $this->buildContext($chunks); // Generate answer using LLM $prompt = $this->buildPrompt($question, $context); $answer = $this->ollamaService->generate($prompt, $model, 0.3); // Map sources $sources = array_map(static fn (array $chunk): array => [ 'id' => $chunk['doc_id'], 'path' => $chunk['path'], 'title' => $chunk['title'], 'score' => round($chunk['score'], 3), ], $chunks); return [ 'answer' => $answer, 'sources' => $sources, ]; } /** * Build context from chunks. */ private function buildContext(array $chunks): string { $parts = []; foreach ($chunks as $chunk) { $part = "## {$chunk['title']}\n"; $part .= "Pfad: {$chunk['path']}\n"; $part .= $chunk['content']; $parts[] = $part; } return implode("\n\n---\n\n", $parts); } /** * Build chat prompt. */ private function buildPrompt(string $question, string $context): string { return <<