Protokoll #6609

ID6609
Zeitstempel2025-12-23 06:34:01.138822
Clientroot
IP
Warning: Undefined array key "request_ip" in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 13

Deprecated: htmlspecialchars(): Passing null to parameter #1 ($string) of type string is deprecated in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 13
Modellclaude-sonnet-4-20250514
Statuscompleted
Tokens2,972 (Input: 0, Output: 0)
Dauer96 ms
Request-Zeit
Warning: Undefined array key "request_timestamp" in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 18
Response-Zeit-

Warning: Undefined array key "error_message" in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 22

Request

{
    "event": "PreToolUse",
    "tool_name": "Edit",
    "tool_input": {
        "file_path": "\/opt\/scripts\/pipeline\/pipeline.py",
        "old_string": "            \"detect_images\": True,\n            \"detect_charts\": True,\n            \"detect_tables\": True,\n            \"max_pages\": VISION_MAX_PAGES,\n        }",
        "new_string": "            \"detect_images\": True,\n            \"detect_charts\": True,\n            \"detect_tables\": True,\n        }"
    }
}

Response

{
    "tool_response": {
        "filePath": "\/opt\/scripts\/pipeline\/pipeline.py",
        "oldString": "            \"detect_images\": True,\n            \"detect_charts\": True,\n            \"detect_tables\": True,\n            \"max_pages\": VISION_MAX_PAGES,\n        }",
        "newString": "            \"detect_images\": True,\n            \"detect_charts\": True,\n            \"detect_tables\": True,\n        }",
        "originalFile": "#!\/usr\/bin\/env python3\n\"\"\"\nKI-System Document Pipeline\nMain orchestration script for document processing.\n\nUsage:\n    python pipeline.py scan      # Scan for new documents\n    python pipeline.py process   # Process queued documents\n    python pipeline.py embed     # Embed pending chunks\n    python pipeline.py all       # Full pipeline run\n    python pipeline.py all --pipeline-id=1 --run-id=5  # With tracking\n    python pipeline.py file <path>  # Process single file\n\"\"\"\n\nimport argparse\nimport json\nimport os\nimport time\nfrom pathlib import Path\n\nfrom analyze import analyze_document\nfrom chunk import chunk_by_structure\nfrom config import MAX_RETRIES, RETRY_BACKOFF_BASE\nfrom db import PipelineProgress, db\nfrom detect import queue_files, scan_directory\nfrom embed import embed_chunks, embed_pending_chunks\nfrom enrich import run_enrichment_step\nfrom extract import extract, get_full_text\nfrom vision import run_vision_step\n\n\ndef process_file(file_path, progress=None):\n    \"\"\"Process a single file through the pipeline.\"\"\"\n    db.log(\"INFO\", f\"Processing: {file_path}\")\n    file_name = Path(file_path).name\n\n    if progress:\n        progress.update_document(file_name)\n\n    # Extract text\n    if progress:\n        progress.update_step(\"extract\")\n        progress.add_log(f\"Extrahiere Text: {file_name}\")\n\n    extraction = extract(file_path)\n    if not extraction[\"success\"]:\n        db.log(\"ERROR\", f\"Extraction failed: {extraction.get('error')}\")\n        if progress:\n            progress.add_log(\"FEHLER: Extraktion fehlgeschlagen\")\n        return False, 0, 0\n\n    # Get page count for PDFs\n    total_pages = 0\n    if extraction[\"file_type\"] == \".pdf\" and isinstance(extraction.get(\"content\"), list):\n        total_pages = len(extraction[\"content\"])\n        if progress:\n            progress.add_log(f\"{total_pages} Seiten extrahiert\")\n\n    # Get document info\n    file_stat = os.stat(file_path)\n\n    import hashlib\n\n    with open(file_path, \"rb\") as f:\n        file_hash = hashlib.sha256(f.read()).hexdigest()\n\n    # Insert document\n    doc_id = db.insert_document(\n        file_path=file_path,\n        title=file_name,\n        file_type=extraction[\"file_type\"],\n        file_size=file_stat.st_size,\n        file_hash=file_hash,\n    )\n    db.log(\"INFO\", f\"Created document: {doc_id}\")\n\n    # Vision analysis for PDFs\n    if extraction[\"file_type\"] == \".pdf\":\n        if progress:\n            progress.update_step(\"vision\")\n            progress.add_log(\"Vision-Analyse gestartet...\")\n\n        db.log(\"INFO\", f\"Running vision analysis for document {doc_id}\")\n        vision_config = {\n            \"model\": \"minicpm-v:latest\",\n            \"store_images\": True,\n            \"detect_images\": True,\n            \"detect_charts\": True,\n            \"detect_tables\": True,\n            \"max_pages\": VISION_MAX_PAGES,\n        }\n        vision_result = run_vision_step(doc_id, file_path, vision_config, progress=progress)\n        if vision_result[\"success\"]:\n            db.log(\"INFO\", f\"Vision: {vision_result['pages_analyzed']}\/{vision_result['pages_total']} pages analyzed\")\n            if progress:\n                progress.add_log(f\"Vision: {vision_result['pages_analyzed']} Seiten analysiert\")\n        else:\n            db.log(\"WARNING\", f\"Vision analysis failed: {vision_result.get('error')}\")\n\n    # Chunk content\n    if progress:\n        progress.update_step(\"chunk\")\n        if total_pages > 0:\n            progress.add_log(f\"Erstelle Chunks aus {total_pages} Seiten...\")\n        else:\n            progress.add_log(\"Erstelle Chunks...\")\n\n    chunks = chunk_by_structure(extraction)\n    db.log(\"INFO\", f\"Created {len(chunks)} chunks\")\n\n    # Store chunks\n    for i, chunk in enumerate(chunks):\n        chunk_id = db.insert_chunk(\n            doc_id=doc_id,\n            chunk_index=i,\n            content=chunk[\"content\"],\n            heading_path=json.dumps(chunk.get(\"heading_path\", [])),\n            position_start=chunk.get(\"position_start\", 0),\n            position_end=chunk.get(\"position_end\", 0),\n            metadata=json.dumps(chunk.get(\"metadata\", {})),\n        )\n        chunk[\"db_id\"] = chunk_id\n\n    if progress:\n        progress.add_log(f\"{len(chunks)} Chunks erstellt (aus {total_pages} Seiten)\" if total_pages > 0 else f\"{len(chunks)} Chunks erstellt\")\n\n    # Enrich chunks with vision context (for PDFs) - requires vision step\n    if extraction[\"file_type\"] == \".pdf\" and VISION_ENABLED:\n        if progress:\n            progress.update_step(\"enrich\")\n\n        db.log(\"INFO\", f\"Running vision enrichment for document {doc_id}\")\n        enrich_result = run_enrichment_step(doc_id)\n        if enrich_result[\"success\"]:\n            db.log(\"INFO\", f\"Enrichment: {enrich_result['enriched']}\/{enrich_result['total_chunks']} chunks enriched\")\n        else:\n            db.log(\"WARNING\", f\"Enrichment failed: {enrich_result.get('error')}\")\n\n    # Generate embeddings\n    if progress:\n        progress.update_step(\"embed\")\n        progress.add_log(f\"Erstelle Embeddings für {len(chunks)} Chunks...\")\n\n    embedded = embed_chunks(chunks, doc_id, file_name, file_path, progress=progress)\n    db.log(\"INFO\", f\"Embedded {embedded}\/{len(chunks)} chunks\")\n\n    if progress:\n        progress.add_log(f\"{embedded} Embeddings erstellt\")\n\n    # Semantic analysis\n    if progress:\n        progress.update_step(\"analyze\")\n        progress.add_log(\"Semantische Analyse...\")\n\n    full_text = get_full_text(extraction)\n    analysis = analyze_document(doc_id, full_text)\n    db.log(\"INFO\", f\"Analysis complete: {analysis}\")\n\n    # Update status\n    db.update_document_status(doc_id, \"done\")\n\n    if progress:\n        progress.add_log(f\"Fertig: {file_name}\")\n\n    return True, len(chunks), embedded\n\n\ndef process_queue():\n    \"\"\"Process items from the queue.\"\"\"\n    items = db.get_pending_queue_items(limit=10)\n    db.log(\"INFO\", f\"Found {len(items)} items in queue\")\n\n    for item in items:\n        queue_id = item[\"id\"]\n        file_path = item[\"file_path\"]\n        retry_count = item[\"retry_count\"]\n\n        if retry_count >= MAX_RETRIES:\n            db.update_queue_status(queue_id, \"failed\", \"Max retries exceeded\")\n            continue\n\n        db.update_queue_status(queue_id, \"processing\")\n\n        try:\n            success = process_file(file_path)\n            if success:\n                db.update_queue_status(queue_id, \"completed\")\n            else:\n                raise Exception(\"Processing returned False\")\n        except Exception as e:\n            error_msg = str(e)\n            db.update_queue_status(queue_id, \"pending\", error_msg)\n\n            # Exponential backoff\n            wait_time = RETRY_BACKOFF_BASE ** (retry_count + 1)\n            db.log(\"INFO\", f\"Retry {retry_count + 1} in {wait_time}s: {file_path}\")\n            time.sleep(wait_time)\n\n\ndef run_scan():\n    \"\"\"Scan for new documents.\"\"\"\n    files = scan_directory()\n    print(f\"Found {len(files)} files\")\n\n    if files:\n        queued = queue_files(files)\n        print(f\"Queued {queued} files\")\n\n    return files\n\n\ndef run_full_pipeline(run_id=None, pipeline_id=None):\n    \"\"\"Run complete pipeline: scan → process → embed.\"\"\"\n    progress = PipelineProgress(run_id) if run_id else None\n\n    print(\"=\" * 50)\n    print(\"KI-System Pipeline - Full Run\")\n    if run_id:\n        print(f\"Run ID: {run_id}, Pipeline ID: {pipeline_id}\")\n    print(\"=\" * 50)\n\n    try:\n        # Phase 1: Scan\n        if progress:\n            progress.update_step(\"detect\")\n            progress.add_log(\"Scanne nach Dokumenten...\")\n\n        print(\"\\n[1\/3] Scanning for documents...\")\n        files = scan_directory()\n        print(f\"Found {len(files)} files\")\n\n        if progress:\n            progress.add_log(f\"{len(files)} neue Dokumente gefunden\")\n\n        if files:\n            queued = queue_files(files)\n            print(f\"Queued {queued} files\")\n\n            # Phase 2: Process each file\n            items = db.get_pending_queue_items(limit=100)\n            print(f\"\\n[2\/3] Processing {len(items)} documents...\")\n\n            # Update total with actual queue count (may include items from previous runs)\n            if progress:\n                progress.update_progress(total=len(items))\n                progress.add_log(f\"{len(items)} Dokumente in Queue\")\n\n            total_chunks = 0\n            total_embeddings = 0\n            processed = 0\n            failed = 0\n\n            for item in items:\n                # Check if cancelled\n                if progress and progress.is_cancelled():\n                    progress.add_log(\"Pipeline abgebrochen durch Benutzer\")\n                    progress.complete(\"cancelled\")\n                    print(\"\\nPipeline cancelled by user\")\n                    return\n\n                queue_id = item[\"id\"]\n                file_path = item[\"file_path\"]\n                file_name = Path(file_path).name\n\n                if progress:\n                    progress.update_document(file_name)\n\n                db.update_queue_status(queue_id, \"processing\")\n\n                try:\n                    success, chunks, embedded = process_file(file_path, progress)\n                    if success:\n                        db.update_queue_status(queue_id, \"completed\")\n                        processed += 1\n                        total_chunks += chunks\n                        total_embeddings += embedded\n                    else:\n                        db.update_queue_status(queue_id, \"failed\", \"Processing failed\")\n                        failed += 1\n                except Exception as e:\n                    db.update_queue_status(queue_id, \"failed\", str(e))\n                    failed += 1\n                    if progress:\n                        progress.add_log(f\"FEHLER bei {file_name}: {str(e)[:50]}\")\n\n                if progress:\n                    progress.update_progress(\n                        processed=processed,\n                        failed=failed,\n                        chunks=total_chunks,\n                        embeddings=total_embeddings,\n                    )\n        else:\n... [TRUNCATED-60bfe7d4e3b10c3d]",
        "structuredPatch": [
            {
                "oldStart": 87,
                "oldLines": 7,
                "newStart": 87,
                "newLines": 6,
                "lines": [
                    "             \"detect_images\": True,",
                    "             \"detect_charts\": True,",
                    "             \"detect_tables\": True,",
                    "-            \"max_pages\": VISION_MAX_PAGES,",
                    "         }",
                    "         vision_result = run_vision_step(doc_id, file_path, vision_config, progress=progress)",
                    "         if vision_result[\"success\"]:"
                ]
            }
        ],
        "userModified": false,
        "replaceAll": false
    }
}
← Vorheriger Zur Liste Nächster →