Protokoll #2690

ID2690
Zeitstempel2025-12-22 01:03:42.453570
Clientroot
IP145.224.96.190
Modellclaude-sonnet-4-20250514
Statuscompleted
Tokens1,501 (Input: 44, Output: 1,457)
Dauer473 ms
Request-Zeit2025-12-22 01:03:42.453570
Response-Zeit2025-12-22 01:03:42.926802

Request

{
    "event": "PreToolUse",
    "tool_name": "Grep",
    "tool_input": {
        "pattern": "build_generation_prompt",
        "path": "\/opt\/scripts\/pipeline\/generate.py",
        "output_mode": "content",
        "-A": 80
    }
}

Response

{
    "tool_response": {
        "mode": "content",
        "numFiles": 0,
        "filenames": [],
        "content": "93:def build_generation_prompt(briefing, context, profile, contract, structure=None):\n94-    \"\"\"Build the content generation prompt.\"\"\"\n95-\n96-    # Format context\n97-    context_text = \"\"\n98-    for i, ctx in enumerate(context, 1):\n99-        context_text += f\"\\n[Quelle {i}: {ctx['source']}]\\n{ctx['content']}\\n\"\n100-\n101-    # Build profile instructions\n102-    profile_text = \"\"\n103-    if profile:\n104-        config = profile.get(\"config\", {})\n105-        profile_text = f\"\"\"\n106-Autorenprofil: {profile.get(\"name\", \"Standard\")}\n107-- Schreibstil: {config.get(\"voice\", \"neutral\")}\n108-- Tonalität: {config.get(\"style\", \"professionell\")}\n109-- Konventionen: {config.get(\"conventions\", \"Standard-Deutsch\")}\n110-\"\"\"\n111-\n112-    # Build contract requirements\n113-    contract_text = \"\"\n114-    if contract:\n115-        config = contract.get(\"config\", {})\n116-        req = config.get(\"requirements\", {})\n117-        contract_text = f\"\"\"\n118-Contract: {contract.get(\"name\", \"Standard\")}\n119-- Wortanzahl: {req.get(\"min_words\", 500)} - {req.get(\"max_words\", 5000)} Wörter\n120-- Struktur-Validierung: {req.get(\"structure_validation\", True)}\n121-\"\"\"\n122-\n123-    # Build structure instructions\n124-    structure_text = \"\"\n125-    if structure:\n126-        config = structure.get(\"config\", {})\n127-        structure_text = f\"\"\"\n128-Struktur-Template: {structure.get(\"name\", \"\")}\n129-- Typ: {structure.get(\"type\", \"\")}\n130-- Abschnitte: {json.dumps(config.get(\"sections\", []), ensure_ascii=False)}\n131-\"\"\"\n132-\n133-    prompt = f\"\"\"Du bist ein professioneller Content-Autor. Erstelle basierend auf dem Briefing und dem bereitgestellten Kontext einen hochwertigen Text.\n134-\n135-{profile_text}\n136-{contract_text}\n137-{structure_text}\n138-\n139-## Kontext aus der Wissensbasis:\n140-{context_text}\n141-\n142-## Briefing:\n143-{briefing}\n144-\n145-## Anweisungen:\n146-1. Nutze die Informationen aus dem Kontext als Grundlage\n147-2. Halte dich an das Autorenprofil und den Schreibstil\n148-3. Beachte die Vorgaben aus dem Contract\n149-4. Strukturiere den Text gemäß dem Template (falls angegeben)\n150-5. Schreibe auf Deutsch\n151-6. Kennzeichne verwendete Quellen\n152-\n153-Erstelle nun den Content:\"\"\"\n154-\n155-    return prompt\n156-\n157-\n158-def call_llm(prompt, model=\"anthropic\"):\n159-    \"\"\"Call LLM to generate content.\"\"\"\n160-    if model == \"anthropic\" and ANTHROPIC_API_KEY:\n161-        import anthropic\n162-\n163-        client = anthropic.Anthropic(api_key=ANTHROPIC_API_KEY)\n164-\n165-        message = client.messages.create(\n166-            model=ANTHROPIC_MODEL, max_tokens=4000, messages=[{\"role\": \"user\", \"content\": prompt}]\n167-        )\n168-        return message.content[0].text\n169-    else:\n170-        # Fallback to Ollama\n171-        import requests\n172-\n173-        response = requests.post(\n--\n278:        prompt = build_generation_prompt(order[\"briefing\"], context, profile, contract, structure)\n279-\n280-        # Generate content\n281-        content = call_llm(prompt, model)\n282-\n283-        # Get current version number\n284-        cursor = db.execute(\n285-            \"SELECT MAX(version_number) as max_v FROM content_versions WHERE order_id = %s\", (order_id,)\n286-        )\n287-        result = cursor.fetchone()\n288-        cursor.close()\n289-        version_number = (result[\"max_v\"] or 0) + 1\n290-\n291-        # Save version\n292-        version_id = save_version(order_id, content, version_number)\n293-\n294-        # Save sources\n295-        save_sources(order_id, context)\n296-\n297-        # Update status\n298-        update_order_status(order_id, \"critique\")\n299-\n300-        return {\n301-            \"success\": True,\n302-            \"order_id\": order_id,\n303-            \"version_id\": version_id,\n304-            \"version_number\": version_number,\n305-            \"content\": content,\n306-            \"sources\": [{\"source\": c[\"source\"], \"score\": c[\"score\"]} for c in context],\n307-        }\n308-\n309-    except Exception as e:\n310-        update_order_status(order_id, \"draft\")\n311-        return {\"error\": str(e)}\n312-    finally:\n313-        db.disconnect()\n314-\n315-\n316-def get_critic(critic_id):\n317-    \"\"\"Load critic from database.\"\"\"\n318-    cursor = db.execute(\n319-        \"\"\"SELECT c.*, p.content as prompt_content\n320-           FROM critics c\n321-           LEFT JOIN prompts p ON c.prompt_id = p.id\n322-           WHERE c.id = %s AND c.is_active = 1\"\"\",\n323-        (critic_id,),\n324-    )\n325-    result = cursor.fetchone()\n326-    cursor.close()\n327-    return result\n328-\n329-\n330-def run_critic(content, critic_id, model=\"anthropic\"):\n331-    \"\"\"\n332-    Run a single critic on content.\n333-\n334-    Returns:\n335-        dict with feedback and rating\n336-    \"\"\"\n337-    db.connect()\n338-\n339-    try:\n340-        critic = get_critic(critic_id)\n341-        if not critic:\n342-            return {\"error\": f\"Critic {critic_id} not found\"}\n343-\n344-        fokus = json.loads(critic[\"fokus\"]) if isinstance(critic[\"fokus\"], str) else critic[\"fokus\"]\n345-\n346-        prompt = f\"\"\"Du bist ein kritischer Lektor mit dem Fokus auf: {\", \".join(fokus)}\n347-\n348-Analysiere den folgenden Text und gib strukturiertes Feedback:\n349-\n350-## Text:\n351-{content}\n352-\n353-## Deine Aufgabe:\n354-1. Prüfe den Text auf die Aspekte: {\", \".join(fokus)}\n355-2. Identifiziere konkrete Verbesserungspunkte\n356-3. Bewerte die Qualität (1-10)\n357-\n358-Antworte im JSON-Format:",
        "numLines": 163
    }
}
← Vorheriger Zur Liste Nächster →