Protokoll #32303

ID32303
Zeitstempel2026-01-02 03:46:23.632427
Clientroot
IP145.224.72.245
Modellclaude-sonnet-4-20250514
Statuscompleted
Tokens5,051 (Input: 2,523, Output: 2,528)
Dauer96 ms
Request-Zeit2026-01-02 03:46:23.632427
Response-Zeit2026-01-02 03:46:23.728622

Request

{
    "event": "PreToolUse",
    "tool_name": "Write",
    "tool_input": {
        "file_path": "\/var\/www\/migration\/content-pipeline\/src\/llm_interface.py",
        "content": "\"\"\"\nLLM Provider Interface - Abstract base for LLM providers.\n\nSOLID Principles:\n- Interface Segregation: Separate interfaces for chat, embedding, vision\n- Dependency Inversion: Depend on abstractions, not concrete implementations\n- Open\/Closed: New providers can be added without modifying existing code\n\"\"\"\n\nimport os\nimport time\nfrom abc import ABC, abstractmethod\nfrom dataclasses import dataclass\nfrom typing import Protocol, runtime_checkable\n\nimport requests\n\nfrom config import (\n    ANTHROPIC_API_KEY,\n    ANTHROPIC_MODEL,\n    OLLAMA_CHAT_MODEL,\n    OLLAMA_HOST,\n)\nfrom constants import LLM_TIMEOUT, MS_PER_SECOND, OLLAMA_TIMEOUT\nfrom enums import LLMProvider\n\n\n@dataclass\nclass LLMResponse:\n    \"\"\"Standardized LLM response.\"\"\"\n\n    text: str\n    model: str\n    provider: LLMProvider\n    tokens_input: int = 0\n    tokens_output: int = 0\n    duration_ms: int = 0\n    success: bool = True\n    error: str | None = None\n\n\n@runtime_checkable\nclass ChatProvider(Protocol):\n    \"\"\"Protocol for chat-capable LLM providers.\"\"\"\n\n    def chat(self, prompt: str, max_tokens: int = 2000) -> LLMResponse:\n        \"\"\"Send chat message and get response.\"\"\"\n        ...\n\n    @property\n    def model_name(self) -> str:\n        \"\"\"Return the model identifier.\"\"\"\n        ...\n\n\n@runtime_checkable\nclass EmbeddingProvider(Protocol):\n    \"\"\"Protocol for embedding-capable providers.\"\"\"\n\n    def embed(self, text: str) -> list[float]:\n        \"\"\"Generate embedding vector for text.\"\"\"\n        ...\n\n    @property\n    def dimension(self) -> int:\n        \"\"\"Return embedding dimension.\"\"\"\n        ...\n\n\nclass BaseLLMProvider(ABC):\n    \"\"\"Abstract base class for LLM providers.\"\"\"\n\n    @property\n    @abstractmethod\n    def provider_type(self) -> LLMProvider:\n        \"\"\"Return provider type enum.\"\"\"\n        pass\n\n    @property\n    @abstractmethod\n    def model_name(self) -> str:\n        \"\"\"Return model identifier.\"\"\"\n        pass\n\n    @abstractmethod\n    def chat(self, prompt: str, max_tokens: int = 2000) -> LLMResponse:\n        \"\"\"Send chat message and get response.\"\"\"\n        pass\n\n\nclass OllamaProvider(BaseLLMProvider):\n    \"\"\"Ollama LLM provider implementation.\"\"\"\n\n    def __init__(\n        self,\n        model: str | None = None,\n        host: str | None = None,\n        timeout: int = OLLAMA_TIMEOUT,\n    ):\n        self._model = model or OLLAMA_CHAT_MODEL\n        self._host = host or OLLAMA_HOST\n        self._timeout = timeout\n\n    @property\n    def provider_type(self) -> LLMProvider:\n        return LLMProvider.OLLAMA\n\n    @property\n    def model_name(self) -> str:\n        return f\"ollama:{self._model}\"\n\n    def chat(self, prompt: str, max_tokens: int = 2000) -> LLMResponse:\n        \"\"\"Send chat to Ollama and get response.\"\"\"\n        start_time = time.time()\n\n        try:\n            response = requests.post(\n                f\"{self._host}\/api\/generate\",\n                json={\n                    \"model\": self._model,\n                    \"prompt\": prompt,\n                    \"stream\": False,\n                    \"options\": {\"num_predict\": max_tokens},\n                },\n                timeout=self._timeout,\n            )\n            response.raise_for_status()\n            data = response.json()\n\n            duration_ms = int((time.time() - start_time) * MS_PER_SECOND)\n\n            return LLMResponse(\n                text=data.get(\"response\", \"\"),\n                model=self.model_name,\n                provider=self.provider_type,\n                tokens_input=data.get(\"prompt_eval_count\", 0),\n                tokens_output=data.get(\"eval_count\", 0),\n                duration_ms=duration_ms,\n            )\n\n        except Exception as e:\n            duration_ms = int((time.time() - start_time) * MS_PER_SECOND)\n            return LLMResponse(\n                text=\"\",\n                model=self.model_name,\n                provider=self.provider_type,\n                duration_ms=duration_ms,\n                success=False,\n                error=str(e),\n            )\n\n    def chat_json(self, prompt: str, max_tokens: int = 2000) -> LLMResponse:\n        \"\"\"Send chat to Ollama with JSON format enforcement.\"\"\"\n        start_time = time.time()\n\n        try:\n            response = requests.post(\n                f\"{self._host}\/api\/generate\",\n                json={\n                    \"model\": self._model,\n                    \"prompt\": prompt,\n                    \"stream\": False,\n                    \"format\": \"json\",\n                    \"options\": {\"num_predict\": max_tokens},\n                },\n                timeout=self._timeout,\n            )\n            response.raise_for_status()\n            data = response.json()\n\n            duration_ms = int((time.time() - start_time) * MS_PER_SECOND)\n\n            return LLMResponse(\n                text=data.get(\"response\", \"\"),\n                model=self.model_name,\n                provider=self.provider_type,\n                tokens_input=data.get(\"prompt_eval_count\", 0),\n                tokens_output=data.get(\"eval_count\", 0),\n                duration_ms=duration_ms,\n            )\n\n        except Exception as e:\n            duration_ms = int((time.time() - start_time) * MS_PER_SECOND)\n            return LLMResponse(\n                text=\"\",\n                model=self.model_name,\n                provider=self.provider_type,\n                duration_ms=duration_ms,\n                success=False,\n                error=str(e),\n            )\n\n\nclass AnthropicProvider(BaseLLMProvider):\n    \"\"\"Anthropic Claude LLM provider implementation.\"\"\"\n\n    def __init__(\n        self,\n        model: str | None = None,\n        api_key: str | None = None,\n        timeout: int = LLM_TIMEOUT,\n    ):\n        self._model = model or ANTHROPIC_MODEL\n        self._api_key = api_key or ANTHROPIC_API_KEY\n        self._timeout = timeout\n        self._client = None\n\n    def _get_client(self):\n        \"\"\"Lazy-load Anthropic client.\"\"\"\n        if self._client is None:\n            try:\n                import anthropic\n\n                self._client = anthropic.Anthropic(api_key=self._api_key)\n            except ImportError:\n                raise ImportError(\"anthropic package not installed\")\n        return self._client\n\n    @property\n    def provider_type(self) -> LLMProvider:\n        return LLMProvider.ANTHROPIC\n\n    @property\n    def model_name(self) -> str:\n        return self._model\n\n    def chat(self, prompt: str, max_tokens: int = 2000) -> LLMResponse:\n        \"\"\"Send chat to Anthropic and get response.\"\"\"\n        start_time = time.time()\n\n        try:\n            client = self._get_client()\n            message = client.messages.create(\n                model=self._model,\n                max_tokens=max_tokens,\n                messages=[{\"role\": \"user\", \"content\": prompt}],\n            )\n\n            duration_ms = int((time.time() - start_time) * MS_PER_SECOND)\n\n            return LLMResponse(\n                text=message.content[0].text,\n                model=self.model_name,\n                provider=self.provider_type,\n                tokens_input=message.usage.input_tokens,\n                tokens_output=message.usage.output_tokens,\n                duration_ms=duration_ms,\n            )\n\n        except Exception as e:\n            duration_ms = int((time.time() - start_time) * MS_PER_SECOND)\n            return LLMResponse(\n                text=\"\",\n                model=self.model_name,\n                provider=self.provider_type,\n                duration_ms=duration_ms,\n                success=False,\n                error=str(e),\n            )\n\n\nclass LLMFactory:\n    \"\"\"Factory for creating LLM provider instances.\n\n    SOLID: Factory pattern for creating providers without\n    coupling to concrete implementations.\n    \"\"\"\n\n    _providers: dict[LLMProvider, type[BaseLLMProvider]] = {\n        LLMProvider.OLLAMA: OllamaProvider,\n        LLMProvider.ANTHROPIC: AnthropicProvider,\n    }\n\n    @classmethod\n    def create(\n        cls,\n        provider: LLMProvider | str,\n        model: str | None = None,\n        **kwargs,\n    ) -> BaseLLMProvider:\n        \"\"\"Create LLM provider instance.\n\n        Args:\n            provider: Provider type (enum or string)\n            model: Optional model override\n            **kwargs: Additional provider-specific arguments\n\n        Returns:\n            Configured LLM provider instance\n        \"\"\"\n        if isinstance(provider, str):\n            provider = LLMProvider(provider)\n\n        provider_class = cls._providers.get(provider)\n        if not provider_class:\n            raise ValueError(f\"Unknown provider: {provider}\")\n\n        return provider_class(model=model, **kwargs)\n\n    @classmethod\n    def register(cls, provider: LLMProvider, provider_class: type[BaseLLMProvider]):\n        \"\"\"Register a new provider type.\n\n        SOLID: Open for extension - new providers can be registered\n        without modifying this class.\n        \"\"\"\n        cls._providers[provider] = provider_class\n\n\n# Convenience function for backward compatibility\ndef get_llm_provider(use_anthropic: bool = False, model: str | None = None) -> BaseLLMProvider:\n    \"\"\"Get LLM provider based on configuration.\n\n    Args:\n        use_anthropic: Use Anthropic instead of Ollama\n        model: Optional model override\n\n    Returns:\n        Configured LLM provider\n    \"\"\"\n    provider_type = LLMProvider.ANTHROPIC if use_anthropic else LLMProvider.OLLAMA\n    return LLMFactory.create(provider_type, model=model)\n"
    }
}

Response

{
    "tool_response": {
        "type": "create",
        "filePath": "\/var\/www\/migration\/content-pipeline\/src\/llm_interface.py",
        "content": "\"\"\"\nLLM Provider Interface - Abstract base for LLM providers.\n\nSOLID Principles:\n- Interface Segregation: Separate interfaces for chat, embedding, vision\n- Dependency Inversion: Depend on abstractions, not concrete implementations\n- Open\/Closed: New providers can be added without modifying existing code\n\"\"\"\n\nimport os\nimport time\nfrom abc import ABC, abstractmethod\nfrom dataclasses import dataclass\nfrom typing import Protocol, runtime_checkable\n\nimport requests\n\nfrom config import (\n    ANTHROPIC_API_KEY,\n    ANTHROPIC_MODEL,\n    OLLAMA_CHAT_MODEL,\n    OLLAMA_HOST,\n)\nfrom constants import LLM_TIMEOUT, MS_PER_SECOND, OLLAMA_TIMEOUT\nfrom enums import LLMProvider\n\n\n@dataclass\nclass LLMResponse:\n    \"\"\"Standardized LLM response.\"\"\"\n\n    text: str\n    model: str\n    provider: LLMProvider\n    tokens_input: int = 0\n    tokens_output: int = 0\n    duration_ms: int = 0\n    success: bool = True\n    error: str | None = None\n\n\n@runtime_checkable\nclass ChatProvider(Protocol):\n    \"\"\"Protocol for chat-capable LLM providers.\"\"\"\n\n    def chat(self, prompt: str, max_tokens: int = 2000) -> LLMResponse:\n        \"\"\"Send chat message and get response.\"\"\"\n        ...\n\n    @property\n    def model_name(self) -> str:\n        \"\"\"Return the model identifier.\"\"\"\n        ...\n\n\n@runtime_checkable\nclass EmbeddingProvider(Protocol):\n    \"\"\"Protocol for embedding-capable providers.\"\"\"\n\n    def embed(self, text: str) -> list[float]:\n        \"\"\"Generate embedding vector for text.\"\"\"\n        ...\n\n    @property\n    def dimension(self) -> int:\n        \"\"\"Return embedding dimension.\"\"\"\n        ...\n\n\nclass BaseLLMProvider(ABC):\n    \"\"\"Abstract base class for LLM providers.\"\"\"\n\n    @property\n    @abstractmethod\n    def provider_type(self) -> LLMProvider:\n        \"\"\"Return provider type enum.\"\"\"\n        pass\n\n    @property\n    @abstractmethod\n    def model_name(self) -> str:\n        \"\"\"Return model identifier.\"\"\"\n        pass\n\n    @abstractmethod\n    def chat(self, prompt: str, max_tokens: int = 2000) -> LLMResponse:\n        \"\"\"Send chat message and get response.\"\"\"\n        pass\n\n\nclass OllamaProvider(BaseLLMProvider):\n    \"\"\"Ollama LLM provider implementation.\"\"\"\n\n    def __init__(\n        self,\n        model: str | None = None,\n        host: str | None = None,\n        timeout: int = OLLAMA_TIMEOUT,\n    ):\n        self._model = model or OLLAMA_CHAT_MODEL\n        self._host = host or OLLAMA_HOST\n        self._timeout = timeout\n\n    @property\n    def provider_type(self) -> LLMProvider:\n        return LLMProvider.OLLAMA\n\n    @property\n    def model_name(self) -> str:\n        return f\"ollama:{self._model}\"\n\n    def chat(self, prompt: str, max_tokens: int = 2000) -> LLMResponse:\n        \"\"\"Send chat to Ollama and get response.\"\"\"\n        start_time = time.time()\n\n        try:\n            response = requests.post(\n                f\"{self._host}\/api\/generate\",\n                json={\n                    \"model\": self._model,\n                    \"prompt\": prompt,\n                    \"stream\": False,\n                    \"options\": {\"num_predict\": max_tokens},\n                },\n                timeout=self._timeout,\n            )\n            response.raise_for_status()\n            data = response.json()\n\n            duration_ms = int((time.time() - start_time) * MS_PER_SECOND)\n\n            return LLMResponse(\n                text=data.get(\"response\", \"\"),\n                model=self.model_name,\n                provider=self.provider_type,\n                tokens_input=data.get(\"prompt_eval_count\", 0),\n                tokens_output=data.get(\"eval_count\", 0),\n                duration_ms=duration_ms,\n            )\n\n        except Exception as e:\n            duration_ms = int((time.time() - start_time) * MS_PER_SECOND)\n            return LLMResponse(\n                text=\"\",\n                model=self.model_name,\n                provider=self.provider_type,\n                duration_ms=duration_ms,\n                success=False,\n                error=str(e),\n            )\n\n    def chat_json(self, prompt: str, max_tokens: int = 2000) -> LLMResponse:\n        \"\"\"Send chat to Ollama with JSON format enforcement.\"\"\"\n        start_time = time.time()\n\n        try:\n            response = requests.post(\n                f\"{self._host}\/api\/generate\",\n                json={\n                    \"model\": self._model,\n                    \"prompt\": prompt,\n                    \"stream\": False,\n                    \"format\": \"json\",\n                    \"options\": {\"num_predict\": max_tokens},\n                },\n                timeout=self._timeout,\n            )\n            response.raise_for_status()\n            data = response.json()\n\n            duration_ms = int((time.time() - start_time) * MS_PER_SECOND)\n\n            return LLMResponse(\n                text=data.get(\"response\", \"\"),\n                model=self.model_name,\n                provider=self.provider_type,\n                tokens_input=data.get(\"prompt_eval_count\", 0),\n                tokens_output=data.get(\"eval_count\", 0),\n                duration_ms=duration_ms,\n            )\n\n        except Exception as e:\n            duration_ms = int((time.time() - start_time) * MS_PER_SECOND)\n            return LLMResponse(\n                text=\"\",\n                model=self.model_name,\n                provider=self.provider_type,\n                duration_ms=duration_ms,\n                success=False,\n                error=str(e),\n            )\n\n\nclass AnthropicProvider(BaseLLMProvider):\n    \"\"\"Anthropic Claude LLM provider implementation.\"\"\"\n\n    def __init__(\n        self,\n        model: str | None = None,\n        api_key: str | None = None,\n        timeout: int = LLM_TIMEOUT,\n    ):\n        self._model = model or ANTHROPIC_MODEL\n        self._api_key = api_key or ANTHROPIC_API_KEY\n        self._timeout = timeout\n        self._client = None\n\n    def _get_client(self):\n        \"\"\"Lazy-load Anthropic client.\"\"\"\n        if self._client is None:\n            try:\n                import anthropic\n\n                self._client = anthropic.Anthropic(api_key=self._api_key)\n            except ImportError:\n                raise ImportError(\"anthropic package not installed\")\n        return self._client\n\n    @property\n    def provider_type(self) -> LLMProvider:\n        return LLMProvider.ANTHROPIC\n\n    @property\n    def model_name(self) -> str:\n        return self._model\n\n    def chat(self, prompt: str, max_tokens: int = 2000) -> LLMResponse:\n        \"\"\"Send chat to Anthropic and get response.\"\"\"\n        start_time = time.time()\n\n        try:\n            client = self._get_client()\n            message = client.messages.create(\n                model=self._model,\n                max_tokens=max_tokens,\n                messages=[{\"role\": \"user\", \"content\": prompt}],\n            )\n\n            duration_ms = int((time.time() - start_time) * MS_PER_SECOND)\n\n            return LLMResponse(\n                text=message.content[0].text,\n                model=self.model_name,\n                provider=self.provider_type,\n                tokens_input=message.usage.input_tokens,\n                tokens_output=message.usage.output_tokens,\n                duration_ms=duration_ms,\n            )\n\n        except Exception as e:\n            duration_ms = int((time.time() - start_time) * MS_PER_SECOND)\n            return LLMResponse(\n                text=\"\",\n                model=self.model_name,\n                provider=self.provider_type,\n                duration_ms=duration_ms,\n                success=False,\n                error=str(e),\n            )\n\n\nclass LLMFactory:\n    \"\"\"Factory for creating LLM provider instances.\n\n    SOLID: Factory pattern for creating providers without\n    coupling to concrete implementations.\n    \"\"\"\n\n    _providers: dict[LLMProvider, type[BaseLLMProvider]] = {\n        LLMProvider.OLLAMA: OllamaProvider,\n        LLMProvider.ANTHROPIC: AnthropicProvider,\n    }\n\n    @classmethod\n    def create(\n        cls,\n        provider: LLMProvider | str,\n        model: str | None = None,\n        **kwargs,\n    ) -> BaseLLMProvider:\n        \"\"\"Create LLM provider instance.\n\n        Args:\n            provider: Provider type (enum or string)\n            model: Optional model override\n            **kwargs: Additional provider-specific arguments\n\n        Returns:\n            Configured LLM provider instance\n        \"\"\"\n        if isinstance(provider, str):\n            provider = LLMProvider(provider)\n\n        provider_class = cls._providers.get(provider)\n        if not provider_class:\n            raise ValueError(f\"Unknown provider: {provider}\")\n\n        return provider_class(model=model, **kwargs)\n\n    @classmethod\n    def register(cls, provider: LLMProvider, provider_class: type[BaseLLMProvider]):\n        \"\"\"Register a new provider type.\n\n        SOLID: Open for extension - new providers can be registered\n        without modifying this class.\n        \"\"\"\n        cls._providers[provider] = provider_class\n\n\n# Convenience function for backward compatibility\ndef get_llm_provider(use_anthropic: bool = False, model: str | None = None) -> BaseLLMProvider:\n    \"\"\"Get LLM provider based on configuration.\n\n    Args:\n        use_anthropic: Use Anthropic instead of Ollama\n        model: Optional model override\n\n    Returns:\n        Configured LLM provider\n    \"\"\"\n    provider_type = LLMProvider.ANTHROPIC if use_anthropic else LLMProvider.OLLAMA\n    return LLMFactory.create(provider_type, model=model)\n",
        "structuredPatch": [],
        "originalFile": null
    }
}
← Vorheriger Zur Liste Nächster →