{
"event": "PreToolUse",
"tool_name": "Bash",
"tool_input": {
"command": "source \/var\/www\/dev.campus.systemische-tools.de\/.env && export DB_PASSWORD=\"$MARIADB_ROOT_PASSWORD\" && .\/venv\/bin\/python -c \"\nimport json\nimport time\nimport requests\nfrom db import db\n\nOLLAMA_HOST = 'http:\/\/localhost:11434'\nMODEL = 'gemma3:27b-it-qat'\n\ndef run_ollama(prompt, timeout=300):\n start = time.time()\n try:\n response = requests.post(\n f'{OLLAMA_HOST}\/api\/generate',\n json={\n 'model': MODEL,\n 'prompt': prompt,\n 'stream': False,\n 'format': 'json',\n 'options': {'temperature': 0.3, 'num_predict': 2000}\n },\n timeout=timeout,\n )\n response.raise_for_status()\n data = response.json()\n elapsed = time.time() - start\n return {\n 'response': data.get('response', ''),\n 'tokens': data.get('eval_count', 0),\n 'duration_ms': elapsed * 1000,\n 'success': True\n }\n except Exception as e:\n return {'response': '', 'error': str(e), 'success': False, 'duration_ms': (time.time() - start) * 1000}\n\n# Get document content\ndb.connect()\ncursor = db.execute(\n '''SELECT c.content FROM chunks c\n WHERE c.document_id = 2\n ORDER BY c.chunk_index''',\n ()\n)\nchunks = cursor.fetchall()\ncursor.close()\ndb.disconnect()\n\nfull_text = '\\n\\n'.join([c['content'] for c in chunks])\nprint(f'Dokument geladen: {len(full_text)} Zeichen, {len(chunks)} Chunks')\nprint(f'Testing: {MODEL}')\nprint('='*60)\n\n# Test 1: Entity Extraction\nprint('\\n[1\/3] Entity Extraction...')\nprompt1 = f'''Analysiere den folgenden deutschen Text und extrahiere alle wichtigen Entitäten.\n\nKategorisiere jede Entität als:\n- PERSON (Namen von Personen)\n- ORGANIZATION (Firmen, Institutionen, Gruppen)\n- CONCEPT (Fachbegriffe, Methoden, Theorien)\n- LOCATION (Orte, Länder)\n- DATE (Zeitangaben)\n- OTHER (Sonstiges)\n\nAntworte NUR im JSON-Format:\n{{\\\"entities\\\": [{{\\\"name\\\": \\\"...\\\", \\\"type\\\": \\\"...\\\", \\\"context\\\": \\\"kurze Beschreibung\\\"}}]}}\n\nText:\n{full_text[:3500]}'''\n\nresult1 = run_ollama(prompt1)\nprint(f' Duration: {result1[\\\"duration_ms\\\"]:.0f}ms')\nprint(f' Tokens: {result1.get(\\\"tokens\\\", \\\"?\\\")}')\nif result1['success']:\n try:\n import re\n json_match = re.search(r'\\{[\\s\\S]*\\}', result1['response'])\n if json_match:\n data = json.loads(json_match.group())\n entities = data.get('entities', [])\n print(f' Entities found: {len(entities)}')\n for e in entities[:7]:\n print(f' • {e.get(\\\"name\\\", \\\"?\\\")} ({e.get(\\\"type\\\", \\\"?\\\")})')\n else:\n print(' JSON parsing failed - no JSON found')\n print(f' Raw: {result1[\\\"response\\\"][:200]}...')\n except Exception as ex:\n print(f' JSON parsing error: {ex}')\n print(f' Raw: {result1[\\\"response\\\"][:200]}...')\nelse:\n print(f' Error: {result1.get(\\\"error\\\")}')\n\"\n",
"timeout": 360000,
"description": "Run entity extraction test with gemma3:27b"
}
}
{
"tool_response": {
"stdout": "Dokument geladen: 9890 Zeichen, 12 Chunks\nTesting: gemma3:27b-it-qat\n============================================================\n\n[1\/3] Entity Extraction...\n Duration: 28920ms\n Tokens: 318\n Entities found: 10\n • Auftragsklärung (CONCEPT)\n • Teambuilding (CONCEPT)\n • Prozessbegleitung (CONCEPT)\n • Teamentwicklung (CONCEPT)\n • Teamcoaching (CONCEPT)\n • Moderator*in (PERSON)\n • Konfliktklärung (CONCEPT)",
"stderr": "",
"interrupted": false,
"isImage": false
}
}