Protokoll #15146

ID15146
Zeitstempel2025-12-25 14:00:00.343463
Clientroot
IP
Warning: Undefined array key "request_ip" in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 13

Deprecated: htmlspecialchars(): Passing null to parameter #1 ($string) of type string is deprecated in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 13
Modellclaude-sonnet-4-20250514
Statuscompleted
Tokens1,731 (Input: 0, Output: 0)
Dauer127 ms
Request-Zeit
Warning: Undefined array key "request_timestamp" in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 18
Response-Zeit-

Warning: Undefined array key "error_message" in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 22

Request

{
    "event": "PreToolUse",
    "tool_name": "Write",
    "tool_input": {
        "file_path": "\/var\/www\/scripts\/pipeline\/analyzers\/relation_extractor.py",
        "content": "\"\"\"\nRelation Extraction - Extract relations between entities.\n\"\"\"\n\nimport json\nimport re\nimport sys\nimport time\n\nimport requests\n\nsys.path.insert(0, \"\/var\/www\/scripts\/pipeline\")\n\nfrom config import ANTHROPIC_MODEL, OLLAMA_CHAT_MODEL, OLLAMA_HOST\nfrom db import db\nfrom protokoll import protokoll\n\n\ndef extract_relations(text: str, entities: list[dict], client=None) -> list[dict]:\n    \"\"\"Extract relations between entities.\"\"\"\n    if not entities or len(entities) < 2:\n        return []\n\n    entity_names = [e[\"name\"] for e in entities[:20]]\n\n    prompt_template = db.get_prompt(\"relation_extraction\")\n\n    if not prompt_template:\n        db.log(\"WARNING\", \"relation_extraction prompt not found in DB, using fallback\")\n        prompt_template = \"\"\"Identifiziere Beziehungen zwischen Entitäten.\nEntitäten: {{ENTITIES}}\nBeziehungstypen: DEVELOPED_BY, RELATED_TO, PART_OF, USED_IN, BASED_ON\nAntworte NUR im JSON-Format:\n{\"relations\": [{\"source\": \"...\", \"relation\": \"...\", \"target\": \"...\"}]}\n\nText:\n{{TEXT}}\"\"\"\n\n    prompt = prompt_template.replace(\"{{ENTITIES}}\", \", \".join(entity_names))\n    prompt = prompt.replace(\"{{TEXT}}\", text[:3000])\n\n    try:\n        start_time = time.time()\n        tokens_in, tokens_out = 0, 0\n        model_name = \"\"\n\n        if client:\n            message = client.messages.create(\n                model=ANTHROPIC_MODEL, max_tokens=1000, messages=[{\"role\": \"user\", \"content\": prompt}]\n            )\n            response_text = message.content[0].text\n            tokens_in = message.usage.input_tokens\n            tokens_out = message.usage.output_tokens\n            model_name = ANTHROPIC_MODEL\n        else:\n            response = requests.post(\n                f\"{OLLAMA_HOST}\/api\/generate\",\n                json={\"model\": OLLAMA_CHAT_MODEL, \"prompt\": prompt, \"stream\": False, \"format\": \"json\"},\n                timeout=120,\n            )\n            response.raise_for_status()\n            data = response.json()\n            response_text = data.get(\"response\", \"{}\")\n            tokens_in = data.get(\"prompt_eval_count\", 0)\n            tokens_out = data.get(\"eval_count\", 0)\n            model_name = f\"ollama:{OLLAMA_CHAT_MODEL}\"\n\n        duration_ms = int((time.time() - start_time) * 1000)\n\n        protokoll.log_llm_call(\n            request=f\"[relation_extraction] {prompt[:500]}...\",\n            response=response_text[:2000],\n            model_name=model_name,\n            tokens_input=tokens_in,\n            tokens_output=tokens_out,\n            duration_ms=duration_ms,\n            status=\"completed\",\n        )\n\n        json_match = re.search(r\"\\{[\\s\\S]*\\}\", response_text)\n        if json_match:\n            data = json.loads(json_match.group())\n            return data.get(\"relations\", [])\n        return []\n    except Exception as e:\n        db.log(\"ERROR\", f\"Relation extraction failed: {e}\")\n        protokoll.log_llm_call(\n            request=f\"[relation_extraction] {prompt[:500]}...\",\n            model_name=ANTHROPIC_MODEL if client else f\"ollama:{OLLAMA_CHAT_MODEL}\",\n            status=\"error\",\n            error_message=str(e),\n        )\n        return []\n"
    }
}

Response

{
    "tool_response": {
        "type": "create",
        "filePath": "\/var\/www\/scripts\/pipeline\/analyzers\/relation_extractor.py",
        "content": "\"\"\"\nRelation Extraction - Extract relations between entities.\n\"\"\"\n\nimport json\nimport re\nimport sys\nimport time\n\nimport requests\n\nsys.path.insert(0, \"\/var\/www\/scripts\/pipeline\")\n\nfrom config import ANTHROPIC_MODEL, OLLAMA_CHAT_MODEL, OLLAMA_HOST\nfrom db import db\nfrom protokoll import protokoll\n\n\ndef extract_relations(text: str, entities: list[dict], client=None) -> list[dict]:\n    \"\"\"Extract relations between entities.\"\"\"\n    if not entities or len(entities) < 2:\n        return []\n\n    entity_names = [e[\"name\"] for e in entities[:20]]\n\n    prompt_template = db.get_prompt(\"relation_extraction\")\n\n    if not prompt_template:\n        db.log(\"WARNING\", \"relation_extraction prompt not found in DB, using fallback\")\n        prompt_template = \"\"\"Identifiziere Beziehungen zwischen Entitäten.\nEntitäten: {{ENTITIES}}\nBeziehungstypen: DEVELOPED_BY, RELATED_TO, PART_OF, USED_IN, BASED_ON\nAntworte NUR im JSON-Format:\n{\"relations\": [{\"source\": \"...\", \"relation\": \"...\", \"target\": \"...\"}]}\n\nText:\n{{TEXT}}\"\"\"\n\n    prompt = prompt_template.replace(\"{{ENTITIES}}\", \", \".join(entity_names))\n    prompt = prompt.replace(\"{{TEXT}}\", text[:3000])\n\n    try:\n        start_time = time.time()\n        tokens_in, tokens_out = 0, 0\n        model_name = \"\"\n\n        if client:\n            message = client.messages.create(\n                model=ANTHROPIC_MODEL, max_tokens=1000, messages=[{\"role\": \"user\", \"content\": prompt}]\n            )\n            response_text = message.content[0].text\n            tokens_in = message.usage.input_tokens\n            tokens_out = message.usage.output_tokens\n            model_name = ANTHROPIC_MODEL\n        else:\n            response = requests.post(\n                f\"{OLLAMA_HOST}\/api\/generate\",\n                json={\"model\": OLLAMA_CHAT_MODEL, \"prompt\": prompt, \"stream\": False, \"format\": \"json\"},\n                timeout=120,\n            )\n            response.raise_for_status()\n            data = response.json()\n            response_text = data.get(\"response\", \"{}\")\n            tokens_in = data.get(\"prompt_eval_count\", 0)\n            tokens_out = data.get(\"eval_count\", 0)\n            model_name = f\"ollama:{OLLAMA_CHAT_MODEL}\"\n\n        duration_ms = int((time.time() - start_time) * 1000)\n\n        protokoll.log_llm_call(\n            request=f\"[relation_extraction] {prompt[:500]}...\",\n            response=response_text[:2000],\n            model_name=model_name,\n            tokens_input=tokens_in,\n            tokens_output=tokens_out,\n            duration_ms=duration_ms,\n            status=\"completed\",\n        )\n\n        json_match = re.search(r\"\\{[\\s\\S]*\\}\", response_text)\n        if json_match:\n            data = json.loads(json_match.group())\n            return data.get(\"relations\", [])\n        return []\n    except Exception as e:\n        db.log(\"ERROR\", f\"Relation extraction failed: {e}\")\n        protokoll.log_llm_call(\n            request=f\"[relation_extraction] {prompt[:500]}...\",\n            model_name=ANTHROPIC_MODEL if client else f\"ollama:{OLLAMA_CHAT_MODEL}\",\n            status=\"error\",\n            error_message=str(e),\n        )\n        return []\n",
        "structuredPatch": [],
        "originalFile": null
    }
}
← Vorheriger Zur Liste Nächster →