Protokoll #11616
| ID | 11616 |
|---|---|
| Zeitstempel | 2025-12-23 20:26:38.798373 |
| Client | root |
| IP | Warning: Undefined array key "request_ip" in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 13 Deprecated: htmlspecialchars(): Passing null to parameter #1 ($string) of type string is deprecated in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 13 |
| Modell | claude-sonnet-4-20250514 |
| Status | completed |
| Tokens | 1,325 (Input: 0, Output: 0) |
| Dauer | 76 ms |
| Request-Zeit | Warning: Undefined array key "request_timestamp" in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 18 |
| Response-Zeit | - |
Warning: Undefined array key "error_message" in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 22
Request
{
"event": "PreToolUse",
"tool_name": "Read",
"tool_input": {
"file_path": "\/opt\/scripts\/pipeline\/generate.py",
"offset": 350,
"limit": 150
}
}
Response
{
"tool_response": {
"type": "text",
"file": {
"filePath": "\/opt\/scripts\/pipeline\/generate.py",
"content": "\"\"\"\n\n # Build contract requirements\n contract_text = \"\"\n if contract:\n config = contract.get(\"config\", {})\n req = config.get(\"requirements\", {})\n contract_text = f\"\"\"\nContract: {contract.get(\"name\", \"Standard\")}\n- Wortanzahl: {req.get(\"min_words\", 500)} - {req.get(\"max_words\", 5000)} Wörter\n\"\"\"\n\n # Build structure instructions - detect new vs old format\n structure_text = \"\"\n output_format = \"markdown\"\n erlaubte_tags = []\n\n if structure:\n config = structure.get(\"config\", {})\n\n # Detect new format (has \"ausgabe\" at top level)\n if \"ausgabe\" in config or \"gesamtaufbau\" in config:\n # New Blog-Struktur format\n parsed_text, output_format, erlaubte_tags = _parse_new_structure(config)\n structure_text = f\"\"\"\n## Struktur: {structure.get(\"name\", \"\")}\n\n{parsed_text}\n\"\"\"\n else:\n # Old format\n structure_text = f\"\"\"\nStruktur-Template: {structure.get(\"name\", \"\")}\n- Abschnitte: {json.dumps(config.get(\"sections\", []), ensure_ascii=False)}\n\"\"\"\n\n # Build format instruction based on structure's ausgabe\n format_instruction = \"\"\n if output_format == \"body-html\":\n tags_str = \", \".join(erlaubte_tags) if erlaubte_tags else \"h1, h2, h3, h4, p, a, ol, ul, li, strong, table, hr\"\n format_instruction = f\"\"\"7. **KRITISCH - Ausgabe als sauberes HTML:**\n - NUR diese Tags: {tags_str}\n - KEIN Markdown (keine ##, keine **, keine -)\n - KEIN div, span, br, img, script, style\n - Jeder Absatz in <p>-Tags\n - Überschriften als <h2>, <h3>, <h4>\n - Listen als <ul>\/<ol> mit <li>\"\"\"\n\n # Load generate prompt template from database\n # Note: Assumes db.connect() was called by the caller (generate_content)\n prompt_template = get_prompt(\"content-generate\")\n\n if prompt_template:\n prompt = prompt_template.format(\n profile_text=profile_text,\n contract_text=contract_text,\n structure_text=structure_text,\n context=context_text,\n briefing=briefing,\n format_instruction=format_instruction,\n )\n else:\n # Fallback if prompt not in DB\n prompt = f\"\"\"Du bist ein professioneller Content-Autor. Erstelle basierend auf dem Briefing und dem bereitgestellten Kontext einen hochwertigen Text.\n\n{profile_text}\n{contract_text}\n{structure_text}\n\n## Kontext aus der Wissensbasis:\n{context_text}\n\n## Briefing:\n{briefing}\n\n## Anweisungen:\n1. Nutze die Informationen aus dem Kontext als Grundlage\n2. Halte dich an das Autorenprofil und den Schreibstil\n3. Beachte die Vorgaben aus dem Contract\n4. Strukturiere den Text gemäß dem Template (falls angegeben)\n5. Schreibe auf Deutsch\n6. Kennzeichne verwendete Quellen\n{format_instruction}\n\nErstelle nun den Content:\"\"\"\n\n return prompt\n\n\ndef call_llm(prompt, model=\"anthropic\", client_name=\"content-studio\"):\n \"\"\"\n Call LLM to generate content with protokoll logging.\n\n Args:\n prompt: The prompt to send\n model: 'anthropic' or 'ollama'\n client_name: Identifier for protokoll logging\n\n Returns:\n Generated text content\n \"\"\"\n import time\n\n start_time = time.time()\n response_text = \"\"\n tokens_input = 0\n tokens_output = 0\n model_name = \"\"\n error_message = None\n status = \"completed\"\n\n try:\n if model == \"anthropic\" and ANTHROPIC_API_KEY:\n import anthropic\n\n client = anthropic.Anthropic(api_key=ANTHROPIC_API_KEY)\n model_name = ANTHROPIC_MODEL\n\n message = client.messages.create(\n model=ANTHROPIC_MODEL, max_tokens=4000, messages=[{\"role\": \"user\", \"content\": prompt}]\n )\n response_text = message.content[0].text\n\n # Extract token usage from Anthropic response\n if hasattr(message, \"usage\"):\n tokens_input = getattr(message.usage, \"input_tokens\", 0)\n tokens_output = getattr(message.usage, \"output_tokens\", 0)\n else:\n # Fallback to Ollama\n import requests\n\n model_name = OLLAMA_CHAT_MODEL\n\n response = requests.post(\n f\"{OLLAMA_HOST}\/api\/generate\",\n json={\"model\": OLLAMA_CHAT_MODEL, \"prompt\": prompt, \"stream\": False},\n timeout=300,\n )\n response.raise_for_status()\n result = response.json()\n response_text = result.get(\"response\", \"\")\n\n # Extract token counts from Ollama response\n tokens_input = result.get(\"prompt_eval_count\", 0)\n tokens_output = result.get(\"eval_count\", 0)\n\n except Exception as e:\n status = \"error\"\n error_message = str(e)\n raise",
"numLines": 150,
"startLine": 350,
"totalLines": 968
}
}
}