Protokoll #13368

ID13368
Zeitstempel2025-12-25 00:23:49.528882
Clientroot
IP145.224.72.140
Modellclaude-sonnet-4-20250514
Statuscompleted
Tokens5,467 (Input: 926, Output: 4,541)
Dauer94 ms
Request-Zeit2025-12-25 00:23:49.528882
Response-Zeit2025-12-25 00:23:49.622675

Request

{
    "event": "PreToolUse",
    "tool_name": "Edit",
    "tool_input": {
        "file_path": "\/var\/www\/scripts\/pipeline\/generate.py",
        "old_string": "    if result:\n        config = json.loads(result[\"content\"]) if isinstance(result[\"content\"], str) else result[\"content\"]\n        return {\"name\": result[\"name\"], \"config\": config}\n    return None\n\n\ndef get_author_profile(profile_id):",
        "new_string": "    if result:\n        config = json.loads(result[\"content\"]) if isinstance(result[\"content\"], str) else result[\"content\"]\n        return {\"name\": result[\"name\"], \"config\": config}\n    return None\n\n\ndef get_semantic_context(chunk_ids):\n    \"\"\"\n    Load entities and relations based on chunk_ids.\n\n    Uses the chunk_entities junction table to find relevant entities,\n    then loads relations between those entities.\n\n    Args:\n        chunk_ids: List of chunk IDs from RAG context\n\n    Returns:\n        dict with 'entities' and 'relations' lists\n    \"\"\"\n    if not chunk_ids:\n        return {\"entities\": [], \"relations\": []}\n\n    # Filter out None values\n    chunk_ids = [cid for cid in chunk_ids if cid is not None]\n    if not chunk_ids:\n        return {\"entities\": [], \"relations\": []}\n\n    placeholders = \", \".join([\"%s\"] * len(chunk_ids))\n\n    # Load entities via chunk_entities\n    cursor = db.execute(\n        f\"\"\"SELECT DISTINCT e.id, e.name, e.type, e.description,\n                  AVG(ce.relevance_score) as relevance\n           FROM chunk_entities ce\n           JOIN entities e ON ce.entity_id = e.id\n           WHERE ce.chunk_id IN ({placeholders})\n           GROUP BY e.id, e.name, e.type, e.description\n           ORDER BY relevance DESC\n           LIMIT 10\"\"\",\n        tuple(chunk_ids),\n    )\n    entities = cursor.fetchall()\n    cursor.close()\n\n    if not entities:\n        return {\"entities\": [], \"relations\": []}\n\n    # Get entity IDs for relation lookup\n    entity_ids = [e[\"id\"] for e in entities]\n    entity_placeholders = \", \".join([\"%s\"] * len(entity_ids))\n\n    # Load relations between found entities\n    cursor = db.execute(\n        f\"\"\"SELECT e1.name as source, er.relation_type, e2.name as target\n           FROM entity_relations er\n           JOIN entities e1 ON er.source_entity_id = e1.id\n           JOIN entities e2 ON er.target_entity_id = e2.id\n           WHERE e1.id IN ({entity_placeholders}) AND e2.id IN ({entity_placeholders})\n           LIMIT 15\"\"\",\n        tuple(entity_ids) + tuple(entity_ids),\n    )\n    relations = cursor.fetchall()\n    cursor.close()\n\n    return {\"entities\": entities, \"relations\": relations}\n\n\ndef get_taxonomy_context(document_ids):\n    \"\"\"\n    Load taxonomy terms for documents.\n\n    Args:\n        document_ids: List of document IDs from RAG context\n\n    Returns:\n        List of taxonomy term dicts with name, slug, confidence\n    \"\"\"\n    if not document_ids:\n        return []\n\n    # Filter out None values\n    document_ids = [did for did in document_ids if did is not None]\n    if not document_ids:\n        return []\n\n    placeholders = \", \".join([\"%s\"] * len(document_ids))\n\n    cursor = db.execute(\n        f\"\"\"SELECT DISTINCT tt.name, tt.slug, MAX(dt.confidence) as confidence\n           FROM document_taxonomy dt\n           JOIN taxonomy_terms tt ON dt.taxonomy_term_id = tt.id\n           WHERE dt.document_id IN ({placeholders})\n           GROUP BY tt.id, tt.name, tt.slug\n           ORDER BY confidence DESC\"\"\",\n        tuple(document_ids),\n    )\n    taxonomy = cursor.fetchall()\n    cursor.close()\n\n    return taxonomy\n\n\ndef get_author_profile(profile_id):"
    }
}

Response

{
    "tool_response": {
        "filePath": "\/var\/www\/scripts\/pipeline\/generate.py",
        "oldString": "    if result:\n        config = json.loads(result[\"content\"]) if isinstance(result[\"content\"], str) else result[\"content\"]\n        return {\"name\": result[\"name\"], \"config\": config}\n    return None\n\n\ndef get_author_profile(profile_id):",
        "newString": "    if result:\n        config = json.loads(result[\"content\"]) if isinstance(result[\"content\"], str) else result[\"content\"]\n        return {\"name\": result[\"name\"], \"config\": config}\n    return None\n\n\ndef get_semantic_context(chunk_ids):\n    \"\"\"\n    Load entities and relations based on chunk_ids.\n\n    Uses the chunk_entities junction table to find relevant entities,\n    then loads relations between those entities.\n\n    Args:\n        chunk_ids: List of chunk IDs from RAG context\n\n    Returns:\n        dict with 'entities' and 'relations' lists\n    \"\"\"\n    if not chunk_ids:\n        return {\"entities\": [], \"relations\": []}\n\n    # Filter out None values\n    chunk_ids = [cid for cid in chunk_ids if cid is not None]\n    if not chunk_ids:\n        return {\"entities\": [], \"relations\": []}\n\n    placeholders = \", \".join([\"%s\"] * len(chunk_ids))\n\n    # Load entities via chunk_entities\n    cursor = db.execute(\n        f\"\"\"SELECT DISTINCT e.id, e.name, e.type, e.description,\n                  AVG(ce.relevance_score) as relevance\n           FROM chunk_entities ce\n           JOIN entities e ON ce.entity_id = e.id\n           WHERE ce.chunk_id IN ({placeholders})\n           GROUP BY e.id, e.name, e.type, e.description\n           ORDER BY relevance DESC\n           LIMIT 10\"\"\",\n        tuple(chunk_ids),\n    )\n    entities = cursor.fetchall()\n    cursor.close()\n\n    if not entities:\n        return {\"entities\": [], \"relations\": []}\n\n    # Get entity IDs for relation lookup\n    entity_ids = [e[\"id\"] for e in entities]\n    entity_placeholders = \", \".join([\"%s\"] * len(entity_ids))\n\n    # Load relations between found entities\n    cursor = db.execute(\n        f\"\"\"SELECT e1.name as source, er.relation_type, e2.name as target\n           FROM entity_relations er\n           JOIN entities e1 ON er.source_entity_id = e1.id\n           JOIN entities e2 ON er.target_entity_id = e2.id\n           WHERE e1.id IN ({entity_placeholders}) AND e2.id IN ({entity_placeholders})\n           LIMIT 15\"\"\",\n        tuple(entity_ids) + tuple(entity_ids),\n    )\n    relations = cursor.fetchall()\n    cursor.close()\n\n    return {\"entities\": entities, \"relations\": relations}\n\n\ndef get_taxonomy_context(document_ids):\n    \"\"\"\n    Load taxonomy terms for documents.\n\n    Args:\n        document_ids: List of document IDs from RAG context\n\n    Returns:\n        List of taxonomy term dicts with name, slug, confidence\n    \"\"\"\n    if not document_ids:\n        return []\n\n    # Filter out None values\n    document_ids = [did for did in document_ids if did is not None]\n    if not document_ids:\n        return []\n\n    placeholders = \", \".join([\"%s\"] * len(document_ids))\n\n    cursor = db.execute(\n        f\"\"\"SELECT DISTINCT tt.name, tt.slug, MAX(dt.confidence) as confidence\n           FROM document_taxonomy dt\n           JOIN taxonomy_terms tt ON dt.taxonomy_term_id = tt.id\n           WHERE dt.document_id IN ({placeholders})\n           GROUP BY tt.id, tt.name, tt.slug\n           ORDER BY confidence DESC\"\"\",\n        tuple(document_ids),\n    )\n    taxonomy = cursor.fetchall()\n    cursor.close()\n\n    return taxonomy\n\n\ndef get_author_profile(profile_id):",
        "originalFile": "#!\/usr\/bin\/env python3\n\"\"\"\nContent Generation for KI-System\nGenerates content using RAG context, author profiles, and contracts.\n\"\"\"\n\nimport json\nimport sys\n\nsys.path.insert(0, \"\/var\/www\/scripts\/pipeline\")\n\nfrom config import ANTHROPIC_API_KEY, ANTHROPIC_MODEL, OLLAMA_CHAT_MODEL, OLLAMA_HOST  # noqa: I001, E402\nfrom db import db  # noqa: E402\nfrom embed import search_similar  # noqa: E402\n\n\ndef _repair_json(json_str):\n    \"\"\"\n    Attempt to repair common JSON issues from LLM output.\n\n    Fixes:\n    - Unescaped quotes in strings\n    - Missing commas between array elements\n    - Trailing commas\n    - Control characters in strings\n    \"\"\"\n    import re\n\n    # Remove control characters except newlines and tabs\n    json_str = re.sub(r\"[\\x00-\\x08\\x0b\\x0c\\x0e-\\x1f]\", \"\", json_str)\n\n    # Fix common issue: missing comma before closing bracket in arrays\n    json_str = re.sub(r'\"\\s*\\n\\s*]', '\"\\n]', json_str)\n\n    # Fix trailing commas before closing brackets\/braces\n    json_str = re.sub(r\",\\s*}\", \"}\", json_str)\n    json_str = re.sub(r\",\\s*]\", \"]\", json_str)\n\n    # Fix missing commas between array elements (string followed by string)\n    json_str = re.sub(r'\"\\s*\\n\\s*\"', '\",\\n\"', json_str)\n\n    # Fix unescaped quotes within strings (heuristic: quotes not at boundaries)\n    # This is tricky, so we do a simple fix for common patterns\n    lines = json_str.split(\"\\n\")\n    fixed_lines = []\n    for line in lines:\n        # Count quotes - if odd number and line has content, try to fix\n        quote_count = line.count('\"') - line.count('\\\\\"')\n        if quote_count % 2 != 0 and \":\" in line:\n            # Try to escape internal quotes (very basic heuristic)\n            parts = line.split(\":\", 1)\n            if len(parts) == 2:\n                key_part = parts[0]\n                value_part = parts[1]\n                # If value has odd quotes, try to balance\n                if value_part.count('\"') % 2 != 0:\n                    # Add escaped quote or remove problematic char\n                    value_part = value_part.rstrip().rstrip(\",\")\n                    if not value_part.endswith('\"'):\n                        value_part += '\"'\n                    line = key_part + \":\" + value_part\n        fixed_lines.append(line)\n\n    return \"\\n\".join(fixed_lines)\n\n\ndef get_prompt(name):\n    \"\"\"Load prompt from database by name.\"\"\"\n    cursor = db.execute(\n        \"\"\"SELECT content FROM prompts WHERE name = %s AND is_active = 1 ORDER BY version DESC LIMIT 1\"\"\",\n        (name,),\n    )\n    result = cursor.fetchone()\n    cursor.close()\n    return result[\"content\"] if result else None\n\n\ndef get_rag_context(briefing, collection=\"documents\", limit=5):\n    \"\"\"\n    Get relevant context from Qdrant based on briefing.\n    Returns list of chunks with content and metadata.\n    \"\"\"\n    results = search_similar(briefing, collection=collection, limit=limit)\n\n    context_items = []\n    for result in results:\n        context_items.append(\n            {\n                \"content\": result[\"payload\"].get(\"content\", \"\"),\n                \"source\": result[\"payload\"].get(\"document_title\", \"Unknown\"),\n                \"score\": round(result[\"score\"], 4),\n                \"chunk_id\": result[\"payload\"].get(\"chunk_id\"),\n                \"document_id\": result[\"payload\"].get(\"document_id\"),\n            }\n        )\n\n    return context_items\n\n\ndef get_config_item(item_id, item_type):\n    \"\"\"Load configuration item from content_config table.\"\"\"\n    if not item_id:\n        return None\n\n    cursor = db.execute(\n        \"SELECT name, content FROM content_config WHERE id = %s AND type = %s AND status = 'active'\",\n        (item_id, item_type),\n    )\n    result = cursor.fetchone()\n    cursor.close()\n\n    if result:\n        config = json.loads(result[\"content\"]) if isinstance(result[\"content\"], str) else result[\"content\"]\n        return {\"name\": result[\"name\"], \"config\": config}\n    return None\n\n\ndef get_author_profile(profile_id):\n    \"\"\"Load author profile from database.\"\"\"\n    return get_config_item(profile_id, \"author_profile\")\n\n\ndef get_contract(contract_id):\n    \"\"\"Load content contract from database.\"\"\"\n    return get_config_item(contract_id, \"contract\")\n\n\ndef get_structure(structure_id):\n    \"\"\"Load content structure from database.\"\"\"\n    result = get_config_item(structure_id, \"structure\")\n    if result:\n        # Structure has additional 'type' field in config\n        result[\"type\"] = result[\"config\"].get(\"type\", \"article\")\n    return result\n\n\ndef get_order(order_id):\n    \"\"\"Load content order with all related data.\"\"\"\n    cursor = db.execute(\n        \"\"\"SELECT co.*,\n                  ap.name as profile_name, ap.content as profile_config,\n                  cc.name as contract_name, cc.content as contract_config,\n                  cs.name as structure_name, cs.content as structure_config\n           FROM content_orders co\n           LEFT JOIN content_config ap ON co.author_profile_id = ap.id AND ap.type = 'author_profile'\n           LEFT JOIN content_config cc ON co.contract_id = cc.id AND cc.type = 'contract'\n           LEFT JOIN content_config cs ON co.structure_id = cs.id AND cs.type = 'structure'\n           WHERE co.id = %s\"\"\",\n        (order_id,),\n    )\n    result = cursor.fetchone()\n    cursor.close()\n    return result\n\n\ndef _parse_new_author_profile(config):\n    \"\"\"Parse new-style author profile (Cary format) into prompt text.\"\"\"\n    sections = []\n\n    # Haltung\n    haltung = config.get(\"haltung\", {})\n    if haltung:\n        sections.append(f\"\"\"### Haltung:\n- Grundhaltung: {haltung.get(\"grundhaltung\", \"\")}\n- Ausrichtung: {haltung.get(\"ausrichtung\", \"\")}\n- Spannungstoleranz: {haltung.get(\"spannungstoleranz\", \"\")}\n- Vereinfachung: {haltung.get(\"vereinfachung\", \"\")}\"\"\")\n\n    # Tonalität\n    tonalitaet = config.get(\"tonalitaet\", {})\n    if tonalitaet:\n        sections.append(f\"\"\"### Tonalität:\n- Charakter: {tonalitaet.get(\"charakter\", \"\")}\n- Stil: {tonalitaet.get(\"stil\", \"\")}\n- Wirkung: {tonalitaet.get(\"wirkung\", \"\")}\n- Abgrenzung: {tonalitaet.get(\"abgrenzung\", \"\")}\"\"\")\n\n    # Sprachmodus\n    sprachmodus = config.get(\"sprachmodus\", {})\n    if sprachmodus:\n        sections.append(f\"\"\"### Sprachmodus:\n- Denkstil: {sprachmodus.get(\"denkstil\", \"\")}\n- Aussagenform: {sprachmodus.get(\"aussagenform\", \"\")}\n- Fragenfunktion: {sprachmodus.get(\"fragenfunktion\", \"\")}\"\"\")\n\n    # Grammatik und Satzbau - WICHTIG für Verbote\n    grammatik = config.get(\"grammatik_und_satzbau\", {})\n    if grammatik:\n        verbote = []\n        if grammatik.get(\"stakkato\") == \"ausgeschlossen\":\n            verbote.append(\"Stakkato-Sätze\")\n        if grammatik.get(\"einschuebe\") == \"keine\":\n            verbote.append(\"Einschübe\")\n        if grammatik.get(\"gedankenstriche\") == \"verboten\":\n            verbote.append(\"Gedankenstriche (–)\")\n\n        sections.append(f\"\"\"### Grammatik und Satzbau:\n- Sätze: {grammatik.get(\"saetze\", \"\")}\n- Rhythmus: {grammatik.get(\"rhythmus\", \"\")}\n- **VERBOTEN:** {\", \".join(verbote) if verbote else \"keine\"}\"\"\")\n\n    # Wortwahl\n    wortwahl = config.get(\"wortwahl\", {})\n    if wortwahl:\n        verboten = []\n        if wortwahl.get(\"buzzwords\") == \"ausgeschlossen\":\n            verboten.append(\"Buzzwords\")\n        if wortwahl.get(\"methodennamen\") == \"ausgeschlossen\":\n            verboten.append(\"Methodennamen\")\n\n        sections.append(f\"\"\"### Wortwahl:\n- Niveau: {wortwahl.get(\"niveau\", \"\")}\n- Begriffe: {wortwahl.get(\"begriffe\", \"\")}\n- **VERBOTEN:** {\", \".join(verboten) if verboten else \"keine\"}\"\"\")\n\n    # Adressierung\n    adressierung = config.get(\"adressierung\", {})\n    if adressierung:\n        sections.append(f\"\"\"### Adressierung:\n- Form: {adressierung.get(\"form\", \"Sie\")}\n- Beziehung: {adressierung.get(\"beziehung\", \"\")}\n- Einladung: {adressierung.get(\"einladung\", \"\")}\"\"\")\n\n    # Metaphern\n    metaphern = config.get(\"metaphern\", {})\n    if metaphern:\n        sections.append(f\"\"\"### Metaphern:\n- Einsatz: {metaphern.get(\"einsatz\", \"\")}\n- Herkunft: {metaphern.get(\"herkunft\", \"\")}\n- Konsistenz: {metaphern.get(\"konsistenz\", \"\")}\"\"\")\n\n    return \"\\n\\n\".join(sections)\n\n\ndef _parse_new_structure(config):\n    \"\"\"Parse new-style structure profile into prompt text and format info.\"\"\"\n    sections = []\n    output_format = None\n    erlaubte_tags = []\n\n    # Ausgabe-Format\n    ausgabe = config.get(\"ausgabe\", {})\n    if ausgabe:\n        output_format = ausgabe.get(\"format\", \"markdown\")\n        erlaubte_tags = ausgabe.get(\"erlaubte_tags\", [])\n        verbotene_tags = ausgabe.get(\"verbotene_tags\", [])\n\n        if output_format == \"body-html\":\n            sections.append(f\"\"\"### Ausgabe-Format: HTML\n- **Nur diese Tags verwenden:** {\", \".join(erlaubte_tags)}\n- **Verboten:** {\", \".join(verbotene_tags)}\n- {ausgabe.get(\"hinweis\", \"Sauberes semantisches HTML\")}\"\"\")\n\n    # Gesamtaufbau\n    aufbau = config.get(\"gesamtaufbau\", {})\n    if aufbau:\n        sections.append(f\"\"\"### Gesamtaufbau:\n- Form: {aufbau.get(\"form\", \"\")}\n- Dramaturgie: {aufbau.get(\"dramaturgie\", \"\")}\n- Linearität: {aufbau.get(\"linearitaet\", \"\")}\n- Themensprünge: {aufbau.get(\"themenspruenge\", \"\")}\"\"\")\n\n    # Einstieg\n    einstieg = config.get(\"einstieg\", {})\n    if einstieg:\n        sections.append(f\"\"\"### Einstieg:\n- Funktion: {einstieg.get(\"funktion\", \"\")}\n- Inhaltstyp: {einstieg.get(\"inhaltstyp\", \"\")}\n- Ausschluss: {einstieg.get(\"ausschluss\", \"\")}\"\"\")\n\n    # Hauptteil-Blöcke\n    hauptteil = config.get(\"hauptteil\", {})\n    bloecke = hauptteil.get(\"bloecke\", [])\n    if bloecke:\n        bloecke_text = \"\\n\".join([f\"  {i+1}. {b.get('fokus', '')} → {b.get('ziel', '')}\" for i, b in enumerate(bloecke)])\n        sections.append(f\"\"\"### Hauptteil-Struktur:\n{bloecke_text}\"\"\")\n\n    # Schluss\n    schluss = config.get(\"schluss\", {})\n    if schluss:\n        sections.append(f\"\"\"### Schluss:\n- Typ: {schluss.get(\"typ\", \"\")}\n- Funktion: {schluss.get(\"funktion\", \"\")}\n- Abgrenzung: {schluss.get(\"abgrenzung\", \"\")}\"\"\")\n\n    # Formatierung\n    formatierung = config.get(\"formatierung\", {})\n    if formatierung:\n        ausschluss = formatierung.get(\"ausschluss\", [])\n      ... [TRUNCATED-1a7e7b6180851275]",
        "structuredPatch": [
            {
                "oldStart": 115,
                "oldLines": 6,
                "newStart": 115,
                "newLines": 102,
                "lines": [
                    "     return None",
                    " ",
                    " ",
                    "+def get_semantic_context(chunk_ids):",
                    "+    \"\"\"",
                    "+    Load entities and relations based on chunk_ids.",
                    "+",
                    "+    Uses the chunk_entities junction table to find relevant entities,",
                    "+    then loads relations between those entities.",
                    "+",
                    "+    Args:",
                    "+        chunk_ids: List of chunk IDs from RAG context",
                    "+",
                    "+    Returns:",
                    "+        dict with 'entities' and 'relations' lists",
                    "+    \"\"\"",
                    "+    if not chunk_ids:",
                    "+        return {\"entities\": [], \"relations\": []}",
                    "+",
                    "+    # Filter out None values",
                    "+    chunk_ids = [cid for cid in chunk_ids if cid is not None]",
                    "+    if not chunk_ids:",
                    "+        return {\"entities\": [], \"relations\": []}",
                    "+",
                    "+    placeholders = \", \".join([\"%s\"] * len(chunk_ids))",
                    "+",
                    "+    # Load entities via chunk_entities",
                    "+    cursor = db.execute(",
                    "+        f\"\"\"SELECT DISTINCT e.id, e.name, e.type, e.description,",
                    "+                  AVG(ce.relevance_score) as relevance",
                    "+           FROM chunk_entities ce",
                    "+           JOIN entities e ON ce.entity_id = e.id",
                    "+           WHERE ce.chunk_id IN ({placeholders})",
                    "+           GROUP BY e.id, e.name, e.type, e.description",
                    "+           ORDER BY relevance DESC",
                    "+           LIMIT 10\"\"\",",
                    "+        tuple(chunk_ids),",
                    "+    )",
                    "+    entities = cursor.fetchall()",
                    "+    cursor.close()",
                    "+",
                    "+    if not entities:",
                    "+        return {\"entities\": [], \"relations\": []}",
                    "+",
                    "+    # Get entity IDs for relation lookup",
                    "+    entity_ids = [e[\"id\"] for e in entities]",
                    "+    entity_placeholders = \", \".join([\"%s\"] * len(entity_ids))",
                    "+",
                    "+    # Load relations between found entities",
                    "+    cursor = db.execute(",
                    "+        f\"\"\"SELECT e1.name as source, er.relation_type, e2.name as target",
                    "+           FROM entity_relations er",
                    "+           JOIN entities e1 ON er.source_entity_id = e1.id",
                    "+           JOIN entities e2 ON er.target_entity_id = e2.id",
                    "+           WHERE e1.id IN ({entity_placeholders}) AND e2.id IN ({entity_placeholders})",
                    "+           LIMIT 15\"\"\",",
                    "+        tuple(entity_ids) + tuple(entity_ids),",
                    "+    )",
                    "+    relations = cursor.fetchall()",
                    "+    cursor.close()",
                    "+",
                    "+    return {\"entities\": entities, \"relations\": relations}",
                    "+",
                    "+",
                    "+def get_taxonomy_context(document_ids):",
                    "+    \"\"\"",
                    "+    Load taxonomy terms for documents.",
                    "+",
                    "+    Args:",
                    "+        document_ids: List of document IDs from RAG context",
                    "+",
                    "+    Returns:",
                    "+        List of taxonomy term dicts with name, slug, confidence",
                    "+    \"\"\"",
                    "+    if not document_ids:",
                    "+        return []",
                    "+",
                    "+    # Filter out None values",
                    "+    document_ids = [did for did in document_ids if did is not None]",
                    "+    if not document_ids:",
                    "+        return []",
                    "+",
                    "+    placeholders = \", \".join([\"%s\"] * len(document_ids))",
                    "+",
                    "+    cursor = db.execute(",
                    "+        f\"\"\"SELECT DISTINCT tt.name, tt.slug, MAX(dt.confidence) as confidence",
                    "+           FROM document_taxonomy dt",
                    "+           JOIN taxonomy_terms tt ON dt.taxonomy_term_id = tt.id",
                    "+           WHERE dt.document_id IN ({placeholders})",
                    "+           GROUP BY tt.id, tt.name, tt.slug",
                    "+           ORDER BY confidence DESC\"\"\",",
                    "+        tuple(document_ids),",
                    "+    )",
                    "+    taxonomy = cursor.fetchall()",
                    "+    cursor.close()",
                    "+",
                    "+    return taxonomy",
                    "+",
                    "+",
                    " def get_author_profile(profile_id):",
                    "     \"\"\"Load author profile from database.\"\"\"",
                    "     return get_config_item(profile_id, \"author_profile\")"
                ]
            }
        ],
        "userModified": false,
        "replaceAll": false
    }
}
← Vorheriger Zur Liste Nächster →