Protokoll #32288

ID32288
Zeitstempel2026-01-02 03:38:52.718953
Clientroot
IP145.224.72.245
Modellclaude-sonnet-4-20250514
Statuspending
Tokens2,671 (Input: 2,671, Output: 0)
Dauer-
Request-Zeit2026-01-02 03:38:52.718953
Response-Zeit-

Request

{
    "event": "PostToolUse",
    "tool_name": "Read",
    "tool_response": {
        "type": "text",
        "file": {
            "filePath": "\/var\/www\/migration\/content-pipeline\/src\/pipeline.py",
            "content": "#!\/usr\/bin\/env python3\n\"\"\"\nKI-System Document Pipeline\nMain orchestration script for document processing.\n\nUsage:\n    python pipeline.py scan             # Scan for new documents\n    python pipeline.py process          # Process queued documents\n    python pipeline.py embed            # Embed pending chunks\n    python pipeline.py semantic <id>    # Run semantic analysis on document\n    python pipeline.py semantic-queue   # Process semantic queue\n    python pipeline.py enrich-entities  # Enrich entity descriptions via Ollama\n    python pipeline.py enrich-entities 50  # Limit to 50 entities\n    python pipeline.py all              # Full pipeline run\n    python pipeline.py all --pipeline-id=1 --run-id=5  # With tracking\n    python pipeline.py file <path>      # Process single file\n    python pipeline.py status           # Show pipeline status\n\"\"\"\n\nimport argparse\nimport os\nimport time\nfrom pathlib import Path\n\nfrom config import (\n    MAX_RETRIES,\n    RETRY_BACKOFF_BASE,\n    SEMANTIC_AUTO_QUEUE,\n    SEMANTIC_SYNC,\n    SEMANTIC_USE_ANTHROPIC,\n)\nfrom constants import DEFAULT_LIMIT\nfrom db import PipelineProgress, db\nfrom detect import queue_files, scan_directory\nfrom pipeline_config import get_step_model\nfrom step_embed import EmbeddingStep\nfrom step_entity_enrich import EntityEnrichStep\nfrom step_extract import ExtractionStep\nfrom step_load import LoadStep\nfrom step_semantic import SemanticStep\nfrom step_semantic_extended import (\n    DuplicateCheckStep,\n    KnowledgeSemanticAnalyzeStep,\n    KnowledgeSemanticStoreStep,\n    TextSemanticAnalyzeStep,\n    TextSemanticStoreStep,\n)\nfrom step_transform import TransformationStep\n\n\ndef process_file(file_path, progress=None):\n    \"\"\"Process a single file through the pipeline.\"\"\"\n    file_name = Path(file_path).name\n\n    if progress:\n        progress.update_document(file_name)\n\n    # Initialize pipeline steps\n    extract_step = ExtractionStep(db, progress)\n    load_step = LoadStep(db, progress)\n    transform_step = TransformationStep(db, progress)\n    embed_step = EmbeddingStep(db, progress)\n\n    # Check if cancelled before starting\n    if progress and progress.is_cancelled():\n        return \"cancelled\", 0, 0\n\n    # Step 1: Extract\n    extract_result = extract_step.execute(file_path)\n    if not extract_result[\"success\"]:\n        if extract_result.get(\"error\") == \"cancelled\":\n            return \"cancelled\", 0, 0\n        return False, 0, 0\n\n    extraction = extract_result[\"extraction\"]\n    file_info = extract_result[\"file_info\"]\n    total_pages = extract_result.get(\"total_pages\", 0)\n\n    # Check if cancelled after extraction\n    if progress and progress.is_cancelled():\n        return \"cancelled\", 0, 0\n\n    # Step 2: Load document\n    doc_id = load_step.create_document(file_info)\n\n    # Step 3: Store pages (PDFs and multi-page documents)\n    page_map = load_step.store_pages(doc_id, extraction)\n\n    # Step 4: Vision analysis (PDFs only)\n    if file_info[\"type\"] == \".pdf\":\n        transform_step.execute_vision(doc_id, file_path, file_info[\"type\"])\n\n        # Check if cancelled after vision\n        if progress and progress.is_cancelled():\n            return \"cancelled\", 0, 0\n\n    # Step 5: Chunking\n    chunks = transform_step.execute_chunking(extraction, total_pages)\n\n    # Step 6: Store chunks with page references\n    chunks = load_step.store_chunks(doc_id, chunks, page_map)\n\n    # Check if cancelled after chunking\n    if progress and progress.is_cancelled():\n        return \"cancelled\", len(chunks), 0\n\n    # Step 7: Enrichment (PDFs only)\n    if file_info[\"type\"] == \".pdf\":\n        transform_step.execute_enrichment(doc_id, file_info[\"type\"])\n\n        # Check if cancelled after enrichment\n        if progress and progress.is_cancelled():\n            return \"cancelled\", len(chunks), 0\n\n    # Step 8: Embeddings (Layer 3 - Document becomes searchable)\n    embedded = embed_step.execute(chunks, doc_id, file_name, file_path)\n\n    # Document is now searchable - update status to \"embedded\"\n    load_step.update_document_status(doc_id, \"embedded\")\n\n    if progress:\n        progress.add_log(f\"Layer 3 fertig: {file_name} ist jetzt suchbar\")\n\n    # Check if cancelled after embedding\n    if progress and progress.is_cancelled():\n        return \"cancelled\", len(chunks), embedded\n\n    # Step 9: Semantic analysis (Layer 4 - Optional\/Async)\n    semantic_step = SemanticStep(db, progress)\n    full_text = extract_step.get_full_text_from_extraction(extraction)\n\n    if SEMANTIC_SYNC:\n        # Run semantic analysis synchronously\n        try:\n            semantic_step.execute(doc_id, full_text, use_anthropic=SEMANTIC_USE_ANTHROPIC)\n            # Update to done only after semantic completes\n            load_step.update_document_status(doc_id, \"done\")\n        except Exception as e:\n            # Semantic failed but document is still searchable\n            db.log(\"WARNING\", f\"Semantic analysis failed for {file_name}: {e}\")\n            if progress:\n                progress.add_log(f\"Semantik-Fehler (Dokument bleibt suchbar): {str(e)[:50]}\")\n    elif SEMANTIC_AUTO_QUEUE:\n        # Queue for async processing\n        semantic_step.queue(doc_id, priority=5)\n        load_step.update_document_status(doc_id, \"done\")\n        if progress:\n            progress.add_log(f\"Semantik in Queue: {file_name}\")\n    else:\n        # No semantic analysis\n        load_step.update_document_status(doc_id, \"done\")\n\n    if progress:\n        progress.add_log(f\"Fertig: {file_name}\")\n\n    return True, len(chunks), embedded\n\n\ndef process_file_v5(file_path, progress=None):\n    \"\"\"Process a single file through Pipeline #5 (Scientific Pipeline).\n\n    Key difference from process_file():\n    - Semantic analysis happens BEFORE embedding (scientifically correct)\n    - Uses extended semantic steps for text and knowledge semantics\n    \"\"\"\n    file_name = Path(file_path).name\n\n    if progress:\n        progress.update_document(file_name)\n\n    # Initialize pipeline steps\n    extract_step = ExtractionStep(db, progress)\n    load_step = LoadStep(db, progress)\n    transform_step = TransformationStep(db, progress)\n    embed_step = EmbeddingStep(db, progress)\n    text_semantic_analyze = TextSemanticAnalyzeStep(db, progress)\n    text_semantic_store = TextSemanticStoreStep(db, progress)\n    knowledge_semantic_analyze = KnowledgeSemanticAnalyzeStep(db, progress)\n    knowledge_semantic_store = KnowledgeSemanticStoreStep(db, progress)\n    duplicate_check = DuplicateCheckStep(db, progress)\n\n    # Check if cancelled before starting\n    if progress and progress.is_cancelled():\n        return \"cancelled\", 0, 0\n\n    # Phase 1: Existenz - Extract\n    extract_result = extract_step.execute(file_path)\n    if not extract_result[\"success\"]:\n        if extract_result.get(\"error\") == \"cancelled\":\n            return \"cancelled\", 0, 0\n        return False, 0, 0\n\n    extraction = extract_result[\"extraction\"]\n    file_info = extract_result[\"file_info\"]\n    total_pages = extract_result.get(\"total_pages\", 0)\n    content_hash = file_info.get(\"hash\", \"\")\n\n    # Check if cancelled after extraction\n    if progress and progress.is_cancelled():\n        return \"cancelled\", 0, 0\n\n    # Phase 1: Existenz - Load document\n    doc_id = load_step.create_document(file_info)\n\n    # Phase 1: Existenz - Duplicate check\n    dup_result = duplicate_check.execute(doc_id, content_hash)\n    if dup_result[\"status\"] == \"abort\":\n        load_step.update_document_status(doc_id, \"duplicate\")\n        if progress:\n            progress.add_log(f\"Duplikat: {file_name} = Doc #{dup_result['duplicate_id']}\")\n        return True, 0, 0  # Not an error, just skip\n\n    # Phase 2: Text - Store pages\n    page_map = load_step.store_pages(doc_id, extraction)\n\n    # Phase 2: Text - Vision analysis (PDFs only)\n    if file_info[\"type\"] == \".pdf\":\n        transform_step.execute_vision(doc_id, file_path, file_info[\"type\"])\n        if progress and progress.is_cancelled():\n            return \"cancelled\", 0, 0\n\n    # Phase 3: Struktur - Chunking\n    chunks = transform_step.execute_chunking(extraction, total_pages)\n\n    # Phase 3: Struktur - Store chunks with page references\n    chunks = load_step.store_chunks(doc_id, chunks, page_map)\n\n    if progress and progress.is_cancelled():\n        return \"cancelled\", len(chunks), 0\n\n    # Phase 3: Struktur - Enrichment (PDFs only)\n    if file_info[\"type\"] == \".pdf\":\n        transform_step.execute_enrichment(doc_id, file_info[\"type\"])\n        if progress and progress.is_cancelled():\n            return \"cancelled\", len(chunks), 0\n\n    # Phase 4: Textsemantik - Analyze chunks\n    if progress:\n        progress.add_log(\"Phase 4: Textsemantik...\")\n\n    # Prepare chunks for analysis\n    chunk_data = [{\"id\": c[\"id\"], \"content\": c[\"content\"]} for c in chunks]\n    text_model = get_step_model(\"text_semantic_analyze\")\n    analyzed_chunks = text_semantic_analyze.execute(chunk_data, {\"model\": text_model})\n\n    # Store text semantics\n    text_semantic_store.execute(analyzed_chunks, {})\n\n    if progress and progress.is_cancelled():\n        return \"cancelled\", len(chunks), 0\n\n    # Phase 5-6: Entity + Wissenssemantik\n    if progress:\n        progress.add_log(\"Phase 5-6: Entity-Extraktion + Wissenssemantik...\")\n\n    # Run standard semantic analysis (entities, relations, taxonomy)\n    semantic_step = SemanticStep(db, progress)\n    full_text = extract_step.get_full_text_from_extraction(extraction)\n\n    try:\n        semantic_step.execute(doc_id, full_text, use_anthropic=SEMANTIC_USE_ANTHROPIC)\n    except Exception as e:\n        db.log(\"WARNING\", f\"Semantic analysis failed for {file_name}: {e}\")\n        if progress:\n            progress.add_log(f\"Semantik-Warnung: {str(e)[:50]}\")\n\n    # Load entities for knowledge semantics\n    cursor = db.execute(\n        \"\"\"SELECT e.id, e.name, e.type, c.content as context\n           FROM entities e\n           JOIN chunk_entities ce ON e.id = ce.entity_id\n           JOIN chunks c ON ce.chunk_id = c.id\n           WHERE c.document_id = %s\n           GROUP BY e.id\"\"\",\n        (do... [TRUNCATED-6a1882581643ee40]",
            "numLines": 662,
            "startLine": 1,
            "totalLines": 662
        }
    }
}

Response

-
← Vorheriger Zur Liste Nächster →