{
"tool_response": {
"type": "text",
"file": {
"filePath": "\/var\/www\/scripts\/pipeline\/generators\/content_generator.py",
"content": " - Überschriften als <h2>, <h3>, <h4>\n - Listen als <ul>\/<ol> mit <li>\"\"\"\n\n # Load generate prompt template from database\n prompt_template = get_prompt(\"content-generate\")\n\n if prompt_template:\n prompt = prompt_template.format(\n profile_text=profile_text,\n contract_text=contract_text,\n structure_text=structure_text,\n context=context_text,\n briefing=briefing,\n format_instruction=format_instruction,\n semantic_text=semantic_text,\n taxonomy_text=taxonomy_text,\n )\n else:\n # Fallback if prompt not in DB\n prompt = f\"\"\"Du bist ein professioneller Content-Autor. Erstelle basierend auf dem Briefing und dem bereitgestellten Kontext einen hochwertigen Text.\n\n{profile_text}\n{contract_text}\n{structure_text}\n{semantic_text}\n{taxonomy_text}\n\n## Kontext aus der Wissensbasis:\n{context_text}\n\n## Briefing:\n{briefing}\n\n## Anweisungen:\n1. Nutze die Informationen aus dem Kontext als Grundlage\n2. Halte dich an das Autorenprofil und den Schreibstil\n3. Beachte die Vorgaben aus dem Contract\n4. Strukturiere den Text gemäß dem Template (falls angegeben)\n5. Schreibe auf Deutsch\n6. Kennzeichne verwendete Quellen\n7. Berücksichtige die relevanten Konzepte und deren Beziehungen\n{format_instruction}\n\nErstelle nun den Content:\"\"\"\n\n return prompt\n\n\ndef call_llm(prompt: str, model: str = \"anthropic\", client_name: str = \"content-studio\") -> str:\n \"\"\"\n Call LLM to generate content with protokoll logging.\n\n Args:\n prompt: The prompt to send\n model: 'anthropic' or 'ollama'\n client_name: Identifier for protokoll logging\n\n Returns:\n Generated text content\n \"\"\"\n start_time = time.time()\n response_text = \"\"\n tokens_input = 0\n tokens_output = 0\n model_name = \"\"\n error_message = None\n status = \"completed\"\n\n try:\n if model == \"anthropic\" and ANTHROPIC_API_KEY:\n import anthropic\n\n client = anthropic.Anthropic(api_key=ANTHROPIC_API_KEY)\n model_name = ANTHROPIC_MODEL\n\n message = client.messages.create(\n model=ANTHROPIC_MODEL, max_tokens=4000, messages=[{\"role\": \"user\", \"content\": prompt}]\n )\n response_text = message.content[0].text\n\n # Extract token usage from Anthropic response\n if hasattr(message, \"usage\"):\n tokens_input = getattr(message.usage, \"input_tokens\", 0)\n tokens_output = getattr(message.usage, \"output_tokens\", 0)\n else:\n # Fallback to Ollama\n import requests\n\n model_name = OLLAMA_CHAT_MODEL\n\n response = requests.post(\n f\"{OLLAMA_HOST}\/api\/generate\",\n json={\"model\": OLLAMA_CHAT_MODEL, \"prompt\": prompt, \"stream\": False},\n timeout=900, # 15 min for large models\n )\n response.raise_for_status()\n result = response.json()\n response_text = result.get(\"response\", \"\")\n\n # Extract token counts from Ollama response\n tokens_input = result.get(\"prompt_eval_count\", 0)\n tokens_output = result.get(\"eval_count\", 0)\n\n # Clean up model artifacts (Gemma, Llama, etc.)\n artifacts = [\n \"<start_of_turn>\",\n \"<\/start_of_turn>\",\n \"<end_of_turn>\",\n \"<\/end_of_turn>\",\n \"<\/s>\",\n \"<|eot_id|>\",\n \"<|im_end|>\",\n ]\n for artifact in artifacts:\n response_text = response_text.replace(artifact, \"\").strip()\n\n except Exception as e:\n status = \"error\"\n error_message = str(e)\n raise\n\n finally:\n # Calculate duration\n duration_ms = int((time.time() - start_time) * MS_PER_SECOND)\n\n # Log to protokoll\n db.log_to_protokoll(\n client_name=client_name,\n request=prompt,\n response=response_text if status == \"completed\" else None,\n model_name=model_name,\n tokens_input=tokens_input,\n tokens_output=tokens_output,\n duration_ms=duration_ms,\n status=status,\n error_message=error_message,\n )\n\n return response_text\n\n\ndef generate_content(\n order_id: int, model: str = \"anthropic\", collection: str = \"documents\", context_limit: int = 5\n) -> dict:\n \"\"\"\n Main content generation function.\n\n Args:\n order_id: Content order ID\n model: 'anthropic' or 'ollama'",
"numLines": 150,
"startLine": 150,
"totalLines": 401
}
}
}