{
"event": "PreToolUse",
"tool_name": "Edit",
"tool_input": {
"file_path": "\/var\/www\/scripts\/pipeline\/analyzers\/relation_extractor.py",
"old_string": " duration_ms = int((time.time() - start_time) * 1000)\n\n protokoll.log_llm_call(\n request=f\"[relation_extraction] {prompt[:500]}...\",",
"new_string": " duration_ms = int((time.time() - start_time) * MS_PER_SECOND)\n\n protokoll.log_llm_call(\n request=f\"[relation_extraction] {prompt[:500]}...\","
}
}
{
"tool_response": {
"filePath": "\/var\/www\/scripts\/pipeline\/analyzers\/relation_extractor.py",
"oldString": " duration_ms = int((time.time() - start_time) * 1000)\n\n protokoll.log_llm_call(\n request=f\"[relation_extraction] {prompt[:500]}...\",",
"newString": " duration_ms = int((time.time() - start_time) * MS_PER_SECOND)\n\n protokoll.log_llm_call(\n request=f\"[relation_extraction] {prompt[:500]}...\",",
"originalFile": "\"\"\"\nRelation Extraction - Extract relations between entities.\n\"\"\"\n\nimport json\nimport re\nimport sys\nimport time\n\nimport requests\n\nsys.path.insert(0, \"\/var\/www\/scripts\/pipeline\")\n\nfrom config import ANTHROPIC_MODEL, OLLAMA_CHAT_MODEL, OLLAMA_HOST\nfrom constants import BATCH_LIMIT, LLM_TIMEOUT, MS_PER_SECOND\nfrom db import db\nfrom protokoll import protokoll\n\n\ndef extract_relations(text: str, entities: list[dict], client=None) -> list[dict]:\n \"\"\"Extract relations between entities.\"\"\"\n if not entities or len(entities) < 2:\n return []\n\n entity_names = [e[\"name\"] for e in entities[:20]]\n\n prompt_template = db.get_prompt(\"relation_extraction\")\n\n if not prompt_template:\n db.log(\"WARNING\", \"relation_extraction prompt not found in DB, using fallback\")\n prompt_template = \"\"\"Identifiziere Beziehungen zwischen Entitäten.\nEntitäten: {{ENTITIES}}\nBeziehungstypen: DEVELOPED_BY, RELATED_TO, PART_OF, USED_IN, BASED_ON\nAntworte NUR im JSON-Format:\n{\"relations\": [{\"source\": \"...\", \"relation\": \"...\", \"target\": \"...\"}]}\n\nText:\n{{TEXT}}\"\"\"\n\n prompt = prompt_template.replace(\"{{ENTITIES}}\", \", \".join(entity_names))\n prompt = prompt.replace(\"{{TEXT}}\", text[:3000])\n\n try:\n start_time = time.time()\n tokens_in, tokens_out = 0, 0\n model_name = \"\"\n\n if client:\n message = client.messages.create(\n model=ANTHROPIC_MODEL, max_tokens=BATCH_LIMIT, messages=[{\"role\": \"user\", \"content\": prompt}]\n )\n response_text = message.content[0].text\n tokens_in = message.usage.input_tokens\n tokens_out = message.usage.output_tokens\n model_name = ANTHROPIC_MODEL\n else:\n response = requests.post(\n f\"{OLLAMA_HOST}\/api\/generate\",\n json={\"model\": OLLAMA_CHAT_MODEL, \"prompt\": prompt, \"stream\": False, \"format\": \"json\"},\n timeout=LLM_TIMEOUT,\n )\n response.raise_for_status()\n data = response.json()\n response_text = data.get(\"response\", \"{}\")\n tokens_in = data.get(\"prompt_eval_count\", 0)\n tokens_out = data.get(\"eval_count\", 0)\n model_name = f\"ollama:{OLLAMA_CHAT_MODEL}\"\n\n duration_ms = int((time.time() - start_time) * 1000)\n\n protokoll.log_llm_call(\n request=f\"[relation_extraction] {prompt[:500]}...\",\n response=response_text[:2000],\n model_name=model_name,\n tokens_input=tokens_in,\n tokens_output=tokens_out,\n duration_ms=duration_ms,\n status=\"completed\",\n )\n\n json_match = re.search(r\"\\{[\\s\\S]*\\}\", response_text)\n if json_match:\n data = json.loads(json_match.group())\n return data.get(\"relations\", [])\n return []\n except Exception as e:\n db.log(\"ERROR\", f\"Relation extraction failed: {e}\")\n protokoll.log_llm_call(\n request=f\"[relation_extraction] {prompt[:500]}...\",\n model_name=ANTHROPIC_MODEL if client else f\"ollama:{OLLAMA_CHAT_MODEL}\",\n status=\"error\",\n error_message=str(e),\n )\n return []\n",
"structuredPatch": [
{
"oldStart": 66,
"oldLines": 7,
"newStart": 66,
"newLines": 7,
"lines": [
" tokens_out = data.get(\"eval_count\", 0)",
" model_name = f\"ollama:{OLLAMA_CHAT_MODEL}\"",
" ",
"- duration_ms = int((time.time() - start_time) * 1000)",
"+ duration_ms = int((time.time() - start_time) * MS_PER_SECOND)",
" ",
" protokoll.log_llm_call(",
" request=f\"[relation_extraction] {prompt[:500]}...\","
]
}
],
"userModified": false,
"replaceAll": false
}
}