Protokoll #15133

ID15133
Zeitstempel2025-12-25 13:55:49.978605
Clientroot
IP
Warning: Undefined array key "request_ip" in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 13

Deprecated: htmlspecialchars(): Passing null to parameter #1 ($string) of type string is deprecated in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 13
Modellclaude-sonnet-4-20250514
Statuscompleted
Tokens2,143 (Input: 0, Output: 0)
Dauer115 ms
Request-Zeit
Warning: Undefined array key "request_timestamp" in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 18
Response-Zeit-

Warning: Undefined array key "error_message" in /var/www/dev.campus.systemische-tools.de/src/View/protokoll/show.php on line 22

Request

{
    "event": "PreToolUse",
    "tool_name": "Write",
    "tool_input": {
        "file_path": "\/var\/www\/scripts\/pipeline\/generators\/rag_context.py",
        "content": "\"\"\"\nRAG Context Functions - Load context from Qdrant and semantic data.\n\"\"\"\n\nimport sys\n\nsys.path.insert(0, \"\/var\/www\/scripts\/pipeline\")\n\nfrom db import db\nfrom embed import search_similar\n\n\ndef get_rag_context(briefing: str, collection: str = \"documents\", limit: int = 5) -> list[dict]:\n    \"\"\"\n    Get relevant context from Qdrant based on briefing.\n    Returns list of chunks with content and metadata.\n    \"\"\"\n    results = search_similar(briefing, collection=collection, limit=limit)\n\n    context_items = []\n    for result in results:\n        context_items.append(\n            {\n                \"content\": result[\"payload\"].get(\"content\", \"\"),\n                \"source\": result[\"payload\"].get(\"document_title\", \"Unknown\"),\n                \"score\": round(result[\"score\"], 4),\n                \"chunk_id\": result[\"payload\"].get(\"chunk_id\"),\n                \"document_id\": result[\"payload\"].get(\"document_id\"),\n            }\n        )\n\n    return context_items\n\n\ndef get_semantic_context(chunk_ids: list[int]) -> dict:\n    \"\"\"\n    Load entities and relations based on chunk_ids.\n\n    Uses the chunk_entities junction table to find relevant entities,\n    then loads relations between those entities.\n\n    Args:\n        chunk_ids: List of chunk IDs from RAG context\n\n    Returns:\n        dict with 'entities' and 'relations' lists\n    \"\"\"\n    if not chunk_ids:\n        return {\"entities\": [], \"relations\": []}\n\n    # Filter out None values\n    chunk_ids = [cid for cid in chunk_ids if cid is not None]\n    if not chunk_ids:\n        return {\"entities\": [], \"relations\": []}\n\n    placeholders = \", \".join([\"%s\"] * len(chunk_ids))\n\n    # Load entities via chunk_entities\n    cursor = db.execute(\n        f\"\"\"SELECT DISTINCT e.id, e.name, e.type, e.description,\n              AVG(ce.relevance_score) as relevance\n           FROM chunk_entities ce\n           JOIN entities e ON ce.entity_id = e.id\n           WHERE ce.chunk_id IN ({placeholders})\n           GROUP BY e.id, e.name, e.type, e.description\n           ORDER BY relevance DESC\n           LIMIT 10\"\"\",\n        tuple(chunk_ids),\n    )\n    entities = cursor.fetchall()\n    cursor.close()\n\n    if not entities:\n        return {\"entities\": [], \"relations\": []}\n\n    # Get entity IDs for relation lookup\n    entity_ids = [e[\"id\"] for e in entities]\n    entity_placeholders = \", \".join([\"%s\"] * len(entity_ids))\n\n    # Load relations between found entities\n    cursor = db.execute(\n        f\"\"\"SELECT e1.name as source, er.relation_type, e2.name as target\n           FROM entity_relations er\n           JOIN entities e1 ON er.source_entity_id = e1.id\n           JOIN entities e2 ON er.target_entity_id = e2.id\n           WHERE e1.id IN ({entity_placeholders}) AND e2.id IN ({entity_placeholders})\n           LIMIT 15\"\"\",\n        tuple(entity_ids) + tuple(entity_ids),\n    )\n    relations = cursor.fetchall()\n    cursor.close()\n\n    return {\"entities\": entities, \"relations\": relations}\n\n\ndef get_taxonomy_context(document_ids: list[int]) -> list[dict]:\n    \"\"\"\n    Load taxonomy terms for documents.\n\n    Args:\n        document_ids: List of document IDs from RAG context\n\n    Returns:\n        List of taxonomy term dicts with name, slug, confidence\n    \"\"\"\n    if not document_ids:\n        return []\n\n    # Filter out None values\n    document_ids = [did for did in document_ids if did is not None]\n    if not document_ids:\n        return []\n\n    placeholders = \", \".join([\"%s\"] * len(document_ids))\n\n    cursor = db.execute(\n        f\"\"\"SELECT DISTINCT tt.name, tt.slug, MAX(dt.confidence) as confidence\n           FROM document_taxonomy dt\n           JOIN taxonomy_terms tt ON dt.taxonomy_term_id = tt.id\n           WHERE dt.document_id IN ({placeholders})\n           GROUP BY tt.id, tt.name, tt.slug\n           ORDER BY confidence DESC\"\"\",\n        tuple(document_ids),\n    )\n    taxonomy = cursor.fetchall()\n    cursor.close()\n\n    return taxonomy\n"
    }
}

Response

{
    "tool_response": {
        "type": "create",
        "filePath": "\/var\/www\/scripts\/pipeline\/generators\/rag_context.py",
        "content": "\"\"\"\nRAG Context Functions - Load context from Qdrant and semantic data.\n\"\"\"\n\nimport sys\n\nsys.path.insert(0, \"\/var\/www\/scripts\/pipeline\")\n\nfrom db import db\nfrom embed import search_similar\n\n\ndef get_rag_context(briefing: str, collection: str = \"documents\", limit: int = 5) -> list[dict]:\n    \"\"\"\n    Get relevant context from Qdrant based on briefing.\n    Returns list of chunks with content and metadata.\n    \"\"\"\n    results = search_similar(briefing, collection=collection, limit=limit)\n\n    context_items = []\n    for result in results:\n        context_items.append(\n            {\n                \"content\": result[\"payload\"].get(\"content\", \"\"),\n                \"source\": result[\"payload\"].get(\"document_title\", \"Unknown\"),\n                \"score\": round(result[\"score\"], 4),\n                \"chunk_id\": result[\"payload\"].get(\"chunk_id\"),\n                \"document_id\": result[\"payload\"].get(\"document_id\"),\n            }\n        )\n\n    return context_items\n\n\ndef get_semantic_context(chunk_ids: list[int]) -> dict:\n    \"\"\"\n    Load entities and relations based on chunk_ids.\n\n    Uses the chunk_entities junction table to find relevant entities,\n    then loads relations between those entities.\n\n    Args:\n        chunk_ids: List of chunk IDs from RAG context\n\n    Returns:\n        dict with 'entities' and 'relations' lists\n    \"\"\"\n    if not chunk_ids:\n        return {\"entities\": [], \"relations\": []}\n\n    # Filter out None values\n    chunk_ids = [cid for cid in chunk_ids if cid is not None]\n    if not chunk_ids:\n        return {\"entities\": [], \"relations\": []}\n\n    placeholders = \", \".join([\"%s\"] * len(chunk_ids))\n\n    # Load entities via chunk_entities\n    cursor = db.execute(\n        f\"\"\"SELECT DISTINCT e.id, e.name, e.type, e.description,\n              AVG(ce.relevance_score) as relevance\n           FROM chunk_entities ce\n           JOIN entities e ON ce.entity_id = e.id\n           WHERE ce.chunk_id IN ({placeholders})\n           GROUP BY e.id, e.name, e.type, e.description\n           ORDER BY relevance DESC\n           LIMIT 10\"\"\",\n        tuple(chunk_ids),\n    )\n    entities = cursor.fetchall()\n    cursor.close()\n\n    if not entities:\n        return {\"entities\": [], \"relations\": []}\n\n    # Get entity IDs for relation lookup\n    entity_ids = [e[\"id\"] for e in entities]\n    entity_placeholders = \", \".join([\"%s\"] * len(entity_ids))\n\n    # Load relations between found entities\n    cursor = db.execute(\n        f\"\"\"SELECT e1.name as source, er.relation_type, e2.name as target\n           FROM entity_relations er\n           JOIN entities e1 ON er.source_entity_id = e1.id\n           JOIN entities e2 ON er.target_entity_id = e2.id\n           WHERE e1.id IN ({entity_placeholders}) AND e2.id IN ({entity_placeholders})\n           LIMIT 15\"\"\",\n        tuple(entity_ids) + tuple(entity_ids),\n    )\n    relations = cursor.fetchall()\n    cursor.close()\n\n    return {\"entities\": entities, \"relations\": relations}\n\n\ndef get_taxonomy_context(document_ids: list[int]) -> list[dict]:\n    \"\"\"\n    Load taxonomy terms for documents.\n\n    Args:\n        document_ids: List of document IDs from RAG context\n\n    Returns:\n        List of taxonomy term dicts with name, slug, confidence\n    \"\"\"\n    if not document_ids:\n        return []\n\n    # Filter out None values\n    document_ids = [did for did in document_ids if did is not None]\n    if not document_ids:\n        return []\n\n    placeholders = \", \".join([\"%s\"] * len(document_ids))\n\n    cursor = db.execute(\n        f\"\"\"SELECT DISTINCT tt.name, tt.slug, MAX(dt.confidence) as confidence\n           FROM document_taxonomy dt\n           JOIN taxonomy_terms tt ON dt.taxonomy_term_id = tt.id\n           WHERE dt.document_id IN ({placeholders})\n           GROUP BY tt.id, tt.name, tt.slug\n           ORDER BY confidence DESC\"\"\",\n        tuple(document_ids),\n    )\n    taxonomy = cursor.fetchall()\n    cursor.close()\n\n    return taxonomy\n",
        "structuredPatch": [],
        "originalFile": null
    }
}
← Vorheriger Zur Liste Nächster →