Protokoll #4043

ID4043
Zeitstempel2025-12-22 09:04:37.000419
Clientroot
IP
Warning: Undefined array key "request_ip" in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 13

Deprecated: htmlspecialchars(): Passing null to parameter #1 ($string) of type string is deprecated in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 13
Modellclaude-sonnet-4-20250514
Statuscompleted
Tokens2,738 (Input: 0, Output: 0)
Dauer91 ms
Request-Zeit
Warning: Undefined array key "request_timestamp" in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 18
Response-Zeit-

Warning: Undefined array key "error_message" in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 22

Request

{
    "event": "PreToolUse",
    "tool_name": "Read",
    "tool_input": {
        "file_path": "\/opt\/scripts\/pipeline\/analyze.py"
    }
}

Response

{
    "tool_response": {
        "type": "text",
        "file": {
            "filePath": "\/opt\/scripts\/pipeline\/analyze.py",
            "content": "\"\"\"\nSemantic analysis for KI-System Pipeline\nEntity extraction, taxonomy classification, ontology mapping.\n\"\"\"\n\nimport json\nimport os\n\nimport requests\n\nfrom config import ANTHROPIC_MODEL, OLLAMA_HOST\nfrom db import db\n\n\ndef get_anthropic_client():\n    \"\"\"Get Anthropic API client.\"\"\"\n    try:\n        import anthropic\n\n        api_key = os.environ.get(\"ANTHROPIC_API_KEY\", \"\")\n        if not api_key:\n            # Try reading from credentials\n            cred_file = \"\/var\/www\/docs\/credentials\/credentials.md\"\n            if os.path.exists(cred_file):\n                with open(cred_file) as f:\n                    content = f.read()\n                    # Parse API key from markdown\n                    for line in content.split(\"\\n\"):\n                        if \"ANTHROPIC_API_KEY\" in line:\n                            parts = line.split(\"`\")\n                            if len(parts) >= 2:\n                                api_key = parts[1]\n                                break\n        if api_key:\n            return anthropic.Anthropic(api_key=api_key)\n    except ImportError:\n        pass\n    return None\n\n\ndef extract_entities_ollama(text, model=\"mistral\"):\n    \"\"\"Extract entities using Ollama.\"\"\"\n    prompt = f\"\"\"Analysiere den folgenden deutschen Text und extrahiere alle wichtigen Entitäten.\n\nKategorisiere jede Entität als:\n- PERSON (Namen von Personen)\n- ORGANIZATION (Firmen, Institutionen, Gruppen)\n- CONCEPT (Fachbegriffe, Methoden, Theorien)\n- LOCATION (Orte, Länder)\n- DATE (Zeitangaben)\n- OTHER (Sonstiges)\n\nAntworte NUR im JSON-Format:\n{{\"entities\": [{{\"name\": \"...\", \"type\": \"...\", \"context\": \"...\"}}]}}\n\nText:\n{text[:3000]}\n\"\"\"\n\n    try:\n        response = requests.post(\n            f\"{OLLAMA_HOST}\/api\/generate\",\n            json={\"model\": model, \"prompt\": prompt, \"stream\": False, \"format\": \"json\"},\n            timeout=120,\n        )\n        response.raise_for_status()\n        data = response.json()\n\n        # Parse JSON from response\n        response_text = data.get(\"response\", \"{}\")\n        try:\n            entities = json.loads(response_text)\n            return entities.get(\"entities\", [])\n        except json.JSONDecodeError:\n            db.log(\"WARNING\", \"Failed to parse entity JSON from Ollama\")\n            return []\n    except Exception as e:\n        db.log(\"ERROR\", f\"Ollama entity extraction failed: {e}\")\n        return []\n\n\ndef extract_entities_anthropic(text, client):\n    \"\"\"Extract entities using Anthropic Claude.\"\"\"\n    # Get prompt from database\n    prompt_template = db.get_prompt(\"entity_extraction\")\n\n    if not prompt_template:\n        prompt_template = \"\"\"Analysiere den folgenden deutschen Text und extrahiere alle wichtigen Entitäten.\n\nKategorisiere jede Entität als:\n- PERSON (Namen von Personen)\n- ORGANIZATION (Firmen, Institutionen, Gruppen)\n- CONCEPT (Fachbegriffe, Methoden, Theorien)\n- LOCATION (Orte, Länder)\n- DATE (Zeitangaben)\n- OTHER (Sonstiges)\n\nAntworte NUR im JSON-Format:\n{\"entities\": [{\"name\": \"...\", \"type\": \"...\", \"context\": \"kurzer Kontext der Erwähnung\"}]}\n\nText:\n{{TEXT}}\"\"\"\n\n    prompt = prompt_template.replace(\"{{TEXT}}\", text[:4000])\n\n    try:\n        message = client.messages.create(\n            model=ANTHROPIC_MODEL, max_tokens=2000, messages=[{\"role\": \"user\", \"content\": prompt}]\n        )\n\n        response_text = message.content[0].text\n\n        # Extract JSON from response\n        import re\n\n        json_match = re.search(r\"\\{[\\s\\S]*\\}\", response_text)\n        if json_match:\n            entities = json.loads(json_match.group())\n            return entities.get(\"entities\", [])\n        return []\n    except Exception as e:\n        db.log(\"ERROR\", f\"Anthropic entity extraction failed: {e}\")\n        return []\n\n\ndef extract_relations(text, entities, client=None):\n    \"\"\"Extract relations between entities.\"\"\"\n    if not entities or len(entities) < 2:\n        return []\n\n    entity_names = [e[\"name\"] for e in entities[:20]]\n\n    prompt = f\"\"\"Analysiere den folgenden Text und identifiziere Beziehungen zwischen den genannten Entitäten.\n\nEntitäten: {\", \".join(entity_names)}\n\nBeziehungstypen:\n- DEVELOPED_BY (wurde entwickelt von)\n- RELATED_TO (steht in Beziehung zu)\n- PART_OF (ist Teil von)\n- INFLUENCED_BY (wurde beeinflusst von)\n- USED_IN (wird verwendet in)\n\nAntworte NUR im JSON-Format:\n{{\"relations\": [{{\"source\": \"...\", \"relation\": \"...\", \"target\": \"...\"}}]}}\n\nText:\n{text[:3000]}\n\"\"\"\n\n    try:\n        if client:\n            message = client.messages.create(\n                model=ANTHROPIC_MODEL, max_tokens=1000, messages=[{\"role\": \"user\", \"content\": prompt}]\n            )\n            response_text = message.content[0].text\n        else:\n            response = requests.post(\n                f\"{OLLAMA_HOST}\/api\/generate\",\n                json={\"model\": \"mistral\", \"prompt\": prompt, \"stream\": False, \"format\": \"json\"},\n                timeout=120,\n            )\n            response.raise_for_status()\n            response_text = response.json().get(\"response\", \"{}\")\n\n        import re\n\n        json_match = re.search(r\"\\{[\\s\\S]*\\}\", response_text)\n        if json_match:\n            data = json.loads(json_match.group())\n            return data.get(\"relations\", [])\n        return []\n    except Exception as e:\n        db.log(\"ERROR\", f\"Relation extraction failed: {e}\")\n        return []\n\n\ndef classify_taxonomy(text, client=None):\n    \"\"\"Classify text into taxonomy categories.\"\"\"\n    prompt_template = db.get_prompt(\"taxonomy_classification\")\n\n    if not prompt_template:\n        prompt_template = \"\"\"Klassifiziere den folgenden Text in passende Kategorien.\n\nWähle aus diesen Hauptkategorien:\n- Methoden (Therapiemethoden, Techniken)\n- Theorie (Konzepte, Modelle, Grundlagen)\n- Praxis (Anwendung, Fallbeispiele)\n- Organisation (Strukturen, Prozesse)\n- Kommunikation (Gesprächsführung, Interaktion)\n- Entwicklung (Persönliche Entwicklung, Veränderung)\n\nAntworte NUR im JSON-Format:\n{\"categories\": [\"...\", \"...\"], \"confidence\": 0.0-1.0}\n\nText:\n{{TEXT}}\"\"\"\n\n    prompt = prompt_template.replace(\"{{TEXT}}\", text[:2000])\n\n    try:\n        if client:\n            message = client.messages.create(\n                model=ANTHROPIC_MODEL, max_tokens=500, messages=[{\"role\": \"user\", \"content\": prompt}]\n            )\n            response_text = message.content[0].text\n        else:\n            response = requests.post(\n                f\"{OLLAMA_HOST}\/api\/generate\",\n                json={\"model\": \"mistral\", \"prompt\": prompt, \"stream\": False, \"format\": \"json\"},\n                timeout=60,\n            )\n            response.raise_for_status()\n            response_text = response.json().get(\"response\", \"{}\")\n\n        import re\n\n        json_match = re.search(r\"\\{[\\s\\S]*\\}\", response_text)\n        if json_match:\n            return json.loads(json_match.group())\n        return {\"categories\": [], \"confidence\": 0}\n    except Exception as e:\n        db.log(\"ERROR\", f\"Taxonomy classification failed: {e}\")\n        return {\"categories\": [], \"confidence\": 0}\n\n\ndef store_entities(document_id, entities):\n    \"\"\"Store extracted entities in database.\"\"\"\n    stored = 0\n\n    for entity in entities:\n        try:\n            # Check if entity already exists\n            cursor = db.execute(\n                \"SELECT id FROM entities WHERE name = %s AND type = %s\", (entity[\"name\"], entity[\"type\"])\n            )\n            existing = cursor.fetchone()\n            cursor.close()\n\n            if existing:\n                entity_id = existing[\"id\"]\n            else:\n                cursor = db.execute(\n                    \"\"\"INSERT INTO entities (name, type, created_at)\n                       VALUES (%s, %s, NOW())\"\"\",\n                    (entity[\"name\"], entity[\"type\"]),\n                )\n                db.commit()\n                entity_id = cursor.lastrowid\n                cursor.close()\n\n            # Link to document\n            cursor = db.execute(\n                \"\"\"INSERT IGNORE INTO document_entities\n                   (document_id, entity_id, context, created_at)\n                   VALUES (%s, %s, %s, NOW())\"\"\",\n                (document_id, entity_id, entity.get(\"context\", \"\")),\n            )\n            db.commit()\n            cursor.close()\n            stored += 1\n\n        except Exception as e:\n            db.log(\"WARNING\", f\"Failed to store entity: {e}\")\n\n    return stored\n\n\ndef analyze_document(document_id, text, use_anthropic=True):\n    \"\"\"\n    Full semantic analysis of a document.\n    Extracts entities, relations, and taxonomy classification.\n    \"\"\"\n    db.log(\"INFO\", f\"Starting semantic analysis for document {document_id}\")\n\n    client = get_anthropic_client() if use_anthropic else None\n\n    # Extract entities\n    if client:\n        entities = extract_entities_anthropic(text, client)\n    else:\n        entities = extract_entities_ollama(text)\n\n    db.log(\"INFO\", f\"Extracted {len(entities)} entities\")\n\n    # Store entities\n    if entities:\n        stored = store_entities(document_id, entities)\n        db.log(\"INFO\", f\"Stored {stored} entities\")\n\n        # Extract relations\n        relations = extract_relations(text, entities, client)\n        db.log(\"INFO\", f\"Extracted {len(relations)} relations\")\n\n        # Store relations\n        for rel in relations:\n            try:\n                # Find entity IDs\n                cursor = db.execute(\"SELECT id FROM entities WHERE name = %s LIMIT 1\", (rel[\"source\"],))\n                source = cursor.fetchone()\n                cursor.close()\n\n                cursor = db.execute(\"SELECT id FROM entities WHERE name = %s LIMIT 1\", (rel[\"target\"],))\n                target = cursor.fetchone()\n                cursor.close()\n\n                if source and target:\n                    cursor = db.execute(\n                        \"\"\"INSERT IGNORE INTO entity_relations\n                           (source_entity_id, target_entity_id, relation_type, created_at)\n                           VALUES (%s, %s, %s, NOW())\"\"\",\n                        (source[\"id\"], target[\"id\"], rel[\"relation\"]),\n                    )\n... [TRUNCATED-703914520a071268]",
            "numLines": 361,
            "startLine": 1,
            "totalLines": 361
        }
    }
}
← Vorheriger Zur Liste Nächster →