Protokoll #17458

ID17458
Zeitstempel2025-12-26 14:48:14.773740
Clientroot
IP
Warning: Undefined array key "request_ip" in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 13

Deprecated: htmlspecialchars(): Passing null to parameter #1 ($string) of type string is deprecated in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 13
Modellclaude-sonnet-4-20250514
Statuscompleted
Tokens1,323 (Input: 0, Output: 0)
Dauer90 ms
Request-Zeit
Warning: Undefined array key "request_timestamp" in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 18
Response-Zeit-

Warning: Undefined array key "error_message" in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 22

Request

{
    "event": "PreToolUse",
    "tool_name": "Read",
    "tool_input": {
        "file_path": "\/var\/www\/scripts\/pipeline\/semantic_chunk_analyzer.py"
    }
}

Response

{
    "tool_response": {
        "type": "text",
        "file": {
            "filePath": "\/var\/www\/scripts\/pipeline\/analyzers\/semantic_analyzer.py",
            "content": "\"\"\"\nSemantic Analyzer - Analyze chunks for summary, keywords, sentiment, topics.\n\"\"\"\n\nimport json\nimport re\nimport sys\nimport time\n\nimport requests\n\nsys.path.insert(0, \"\/var\/www\/scripts\/pipeline\")\n\nfrom config import ANTHROPIC_MODEL, OLLAMA_CHAT_MODEL, OLLAMA_HOST\nfrom db import db\nfrom protokoll import protokoll\n\n\ndef analyze_chunk_semantics(chunk_id: int, content: str, client=None) -> dict | None:\n    \"\"\"\n    Analyze a single chunk for semantics (summary, keywords, sentiment, topics).\n    Stores result in chunk_semantics table.\n    \"\"\"\n    prompt_template = db.get_prompt(\"chunk_semantics\")\n\n    if not prompt_template:\n        prompt_template = \"\"\"Analysiere diesen Textabschnitt und extrahiere:\n\n1. **summary**: Eine kurze Zusammenfassung (1-2 Sätze)\n2. **keywords**: 3-5 wichtige Schlüsselwörter\n3. **sentiment**: Stimmung (positive, negative, neutral, mixed)\n4. **topics**: 2-3 Hauptthemen\n\nAntworte NUR im JSON-Format:\n{\"summary\": \"...\", \"keywords\": [\"...\", \"...\"], \"sentiment\": \"neutral\", \"topics\": [\"...\", \"...\"]}\n\nText:\n{{TEXT}}\"\"\"\n\n    prompt = prompt_template.replace(\"{{TEXT}}\", content[:2000])\n\n    try:\n        start_time = time.time()\n        tokens_in, tokens_out = 0, 0\n        model_name = \"\"\n\n        if client:\n            message = client.messages.create(\n                model=ANTHROPIC_MODEL, max_tokens=500, messages=[{\"role\": \"user\", \"content\": prompt}]\n            )\n            response_text = message.content[0].text\n            tokens_in = message.usage.input_tokens\n            tokens_out = message.usage.output_tokens\n            model_name = ANTHROPIC_MODEL\n        else:\n            response = requests.post(\n                f\"{OLLAMA_HOST}\/api\/generate\",\n                json={\"model\": OLLAMA_CHAT_MODEL, \"prompt\": prompt, \"stream\": False, \"format\": \"json\"},\n                timeout=60,\n            )\n            response.raise_for_status()\n            data = response.json()\n            response_text = data.get(\"response\", \"{}\")\n            tokens_in = data.get(\"prompt_eval_count\", 0)\n            tokens_out = data.get(\"eval_count\", 0)\n            model_name = f\"ollama:{OLLAMA_CHAT_MODEL}\"\n\n        duration_ms = int((time.time() - start_time) * 1000)\n\n        protokoll.log_llm_call(\n            request=f\"[chunk_semantics] chunk_id={chunk_id}\",\n            response=response_text[:1000],\n            model_name=model_name,\n            tokens_input=tokens_in,\n            tokens_output=tokens_out,\n            duration_ms=duration_ms,\n            status=\"completed\",\n        )\n\n        json_match = re.search(r\"\\{[\\s\\S]*\\}\", response_text)\n        if json_match:\n            result = json.loads(json_match.group())\n\n            cursor = db.execute(\n                \"\"\"INSERT INTO chunk_semantics\n                   (chunk_id, summary, keywords, sentiment, topics, language, analyzed_at, analysis_model)\n                   VALUES (%s, %s, %s, %s, %s, 'de', NOW(), %s)\n                   ON DUPLICATE KEY UPDATE\n                   summary = VALUES(summary), keywords = VALUES(keywords),\n                   sentiment = VALUES(sentiment), topics = VALUES(topics),\n                   analyzed_at = NOW(), analysis_model = VALUES(analysis_model)\"\"\",\n                (\n                    chunk_id,\n                    result.get(\"summary\", \"\"),\n                    json.dumps(result.get(\"keywords\", []), ensure_ascii=False),\n                    result.get(\"sentiment\", \"neutral\"),\n                    json.dumps(result.get(\"topics\", []), ensure_ascii=False),\n                    model_name,\n                ),\n            )\n            db.commit()\n            cursor.close()\n            return result\n\n        return None\n\n    except Exception as e:\n        db.log(\"ERROR\", f\"Chunk semantics analysis failed: {e}\")\n        return None\n\n\ndef analyze_chunks_semantics(document_id: int, client=None, progress=None) -> int:\n    \"\"\"Analyze all chunks of a document for semantics.\"\"\"\n    cursor = db.execute(\"SELECT id, content FROM chunks WHERE document_id = %s ORDER BY chunk_index\", (document_id,))\n    chunks = cursor.fetchall()\n    cursor.close()\n\n    total = len(chunks)\n    db.log(\"INFO\", f\"Analyzing semantics for {total} chunks\")\n    if progress:\n        progress.add_log(f\"Semantik: Analysiere {total} Chunks...\")\n\n    analyzed = 0\n    for i, chunk in enumerate(chunks, 1):\n        if progress:\n            progress.add_log(f\"Semantik: Chunk {i}\/{total}...\")\n\n        result = analyze_chunk_semantics(chunk[\"id\"], chunk[\"content\"], client)\n        if result:\n            analyzed += 1\n\n    db.log(\"INFO\", f\"Analyzed {analyzed}\/{total} chunks for semantics\")\n    if progress:\n        progress.add_log(f\"Semantik: {analyzed}\/{total} Chunks analysiert\")\n    return analyzed\n",
            "numLines": 136,
            "startLine": 1,
            "totalLines": 136
        }
    }
}
← Vorheriger Zur Liste Nächster →