{
"tool_response": {
"filePath": "\/var\/www\/migration\/content-pipeline\/src\/generators\/critic.py",
"oldString": " # Format prompt with variables\n prompt = prompt_template.format(fokus=fokus_str, content=content)\n\n response = call_llm(prompt, model, client_name=\"content-studio-critique\")\n\n # Parse JSON from response with robust error handling\n json_match = re.search(r\"\\{[\\s\\S]*\\}\", response)\n if json_match:\n json_str = json_match.group()\n try:\n feedback = json.loads(json_str)\n feedback[\"critic_name\"] = critic[\"name\"]\n return feedback\n except json.JSONDecodeError:\n # Try to repair common JSON issues\n repaired = repair_json(json_str)\n try:\n feedback = json.loads(repaired)\n feedback[\"critic_name\"] = critic[\"name\"]\n return feedback\n except json.JSONDecodeError:\n pass\n\n return {\n \"critic_name\": critic[\"name\"],\n \"rating\": 5,\n \"passed\": False,\n \"issues\": [\"Konnte Feedback nicht parsen\"],\n \"suggestions\": [],\n \"summary\": response[:500],\n }",
"newString": " # Format prompt with variables\n prompt = prompt_template.format(fokus=fokus_str, content=content)\n\n response = call_llm(prompt, model, client_name=\"content-studio-critique\")\n\n # Parse JSON from response using DRY utility\n feedback = parse_critic_response(response)\n feedback[\"critic_name\"] = critic[\"name\"]\n\n # If parsing failed completely, add raw response summary\n if not feedback.get(\"summary\") and response:\n feedback[\"summary\"] = response[:500]\n\n return feedback",
"originalFile": "\"\"\"\nCritic Functions - Content critique and revision.\n\nSOLID: Uses json_utils for DRY JSON parsing, enums for type safety.\n\"\"\"\n\nimport json\nimport os\nimport sys\n\nsys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\nfrom db import db\nfrom enums import ContentOrderStatus\nfrom json_utils import parse_critic_response\n\nfrom .config_loader import get_prompt\nfrom .content_generator import call_llm\nfrom .format_checker import check_formatting\nfrom .persistence import save_version, update_order_status\n\n\ndef get_critic(critic_id: int) -> dict | None:\n \"\"\"Load critic from content_config table.\"\"\"\n cursor = db.execute(\n \"\"\"SELECT cc.id, cc.name, cc.content, cc.prompt_id, cc.sort_order,\n p.content as prompt_content\n FROM content_config cc\n LEFT JOIN prompts p ON cc.prompt_id = p.id\n WHERE cc.id = %s AND cc.type = 'critic' AND cc.status = 'active'\"\"\",\n (critic_id,),\n )\n result = cursor.fetchone()\n cursor.close()\n\n if result:\n # Extract fokus from content JSON\n content = json.loads(result[\"content\"]) if isinstance(result[\"content\"], str) else result[\"content\"]\n result[\"fokus\"] = content.get(\"fokus\", [])\n\n return result\n\n\ndef run_critic(\n content: str,\n critic_id: int,\n model: str = \"anthropic\",\n structure_config: dict | None = None,\n profile_config: dict | None = None,\n) -> dict:\n \"\"\"\n Run a single critic on content.\n\n Args:\n content: The text content to critique\n critic_id: ID of the critic in content_config\n model: LLM model to use (ignored for Formatierungsprüfer)\n structure_config: Optional structure config for format rules\n profile_config: Optional author profile for format rules\n\n Returns:\n dict with feedback and rating\n \"\"\"\n db.connect()\n\n try:\n critic = get_critic(critic_id)\n if not critic:\n return {\"error\": f\"Critic {critic_id} not found\"}\n\n # Formatierungsprüfer: Use deterministic checker instead of LLM\n if critic[\"name\"] == \"Formatierungsprüfer\" or critic_id == 33:\n result = check_formatting(content, structure_config, profile_config)\n return {\n \"critic_name\": \"Formatierungsprüfer\",\n \"rating\": result[\"score\"],\n \"score\": result[\"score\"],\n \"passed\": result[\"passed\"],\n \"issues\": result[\"issues\"],\n \"suggestions\": [\"Formatierung korrigieren\"] if not result[\"passed\"] else [],\n \"summary\": result[\"summary\"],\n \"deterministic\": True, # Flag to indicate non-LLM check\n }\n\n fokus = json.loads(critic[\"fokus\"]) if isinstance(critic[\"fokus\"], str) else critic[\"fokus\"]\n fokus_str = \", \".join(fokus)\n\n # Load prompt from database (via critic.prompt_id or fallback to generic)\n prompt_template = critic.get(\"prompt_content\")\n if not prompt_template:\n prompt_template = get_prompt(\"critic-generic\")\n if not prompt_template:\n # Ultimate fallback - should never happen if DB is properly set up\n prompt_template = \"\"\"Du bist ein kritischer Lektor mit dem Fokus auf: {fokus}\n\nAnalysiere den folgenden Text und gib strukturiertes Feedback:\n\n## Text:\n{content}\n\n## Deine Aufgabe:\n1. Prüfe den Text auf die Aspekte: {fokus}\n2. Identifiziere konkrete Verbesserungspunkte\n3. Bewerte die Qualität (1-10)\n\nAntworte im JSON-Format:\n{{\n \"rating\": 8,\n \"passed\": true,\n \"issues\": [\"Issue 1\", \"Issue 2\"],\n \"suggestions\": [\"Suggestion 1\"],\n \"summary\": \"Kurze Zusammenfassung\"\n}}\"\"\"\n\n # Format prompt with variables\n prompt = prompt_template.format(fokus=fokus_str, content=content)\n\n response = call_llm(prompt, model, client_name=\"content-studio-critique\")\n\n # Parse JSON from response with robust error handling\n json_match = re.search(r\"\\{[\\s\\S]*\\}\", response)\n if json_match:\n json_str = json_match.group()\n try:\n feedback = json.loads(json_str)\n feedback[\"critic_name\"] = critic[\"name\"]\n return feedback\n except json.JSONDecodeError:\n # Try to repair common JSON issues\n repaired = repair_json(json_str)\n try:\n feedback = json.loads(repaired)\n feedback[\"critic_name\"] = critic[\"name\"]\n return feedback\n except json.JSONDecodeError:\n pass\n\n return {\n \"critic_name\": critic[\"name\"],\n \"rating\": 5,\n \"passed\": False,\n \"issues\": [\"Konnte Feedback nicht parsen\"],\n \"suggestions\": [],\n \"summary\": response[:500],\n }\n\n except Exception as e:\n return {\"error\": str(e)}\n finally:\n db.disconnect()\n\n\ndef run_critique_round(version_id: int, model: str = \"anthropic\") -> dict:\n \"\"\"\n Run all active critics on a content version.\n\n Returns:\n dict with all critique results\n \"\"\"\n db.connect()\n\n try:\n # Get version content and order settings (including selected_critics)\n cursor = db.execute(\n \"\"\"SELECT cv.*, co.id as order_id, co.current_critique_round,\n co.selected_critics, co.quality_check\n FROM content_versions cv\n JOIN content_orders co ON cv.order_id = co.id\n WHERE cv.id = %s\"\"\",\n (version_id,),\n )\n version = cursor.fetchone()\n cursor.close()\n\n if not version:\n return {\"error\": \"Version not found\"}\n\n # Check if quality_check is enabled\n if not version.get(\"quality_check\", False):\n return {\"success\": True, \"skipped\": True, \"message\": \"Qualitätsprüfung deaktiviert\"}\n\n content_data = json.loads(version[\"content\"]) if isinstance(version[\"content\"], str) else version[\"content\"]\n content_text = content_data.get(\"text\", \"\")\n\n # Parse selected_critics from order (JSON array of IDs)\n selected_critics_raw = version.get(\"selected_critics\")\n if selected_critics_raw:\n if isinstance(selected_critics_raw, str):\n selected_critic_ids = json.loads(selected_critics_raw)\n else:\n selected_critic_ids = selected_critics_raw\n else:\n selected_critic_ids = []\n\n # Get critics - filter by selected_critics if specified\n if selected_critic_ids:\n # Only use selected critics\n placeholders = \", \".join([\"%s\"] * len(selected_critic_ids))\n sql = (\n \"SELECT id, name FROM content_config \"\n f\"WHERE type = 'critic' AND status = 'active' AND id IN ({placeholders}) \"\n \"ORDER BY sort_order\"\n )\n cursor = db.execute(sql, tuple(selected_critic_ids))\n else:\n # Fallback: use all active critics if none selected\n sql = \"SELECT id, name FROM content_config WHERE type = 'critic' AND status = 'active' ORDER BY sort_order\"\n cursor = db.execute(sql)\n critics = cursor.fetchall()\n cursor.close()\n\n # Increment critique round\n new_round = (version[\"current_critique_round\"] or 0) + 1\n cursor = db.execute(\n \"UPDATE content_orders SET current_critique_round = %s WHERE id = %s\", (new_round, version[\"order_id\"])\n )\n db.commit()\n cursor.close()\n\n # Run each critic\n results = []\n all_passed = True\n\n for critic in critics:\n db.disconnect() # Disconnect before calling run_critic\n feedback = run_critic(content_text, critic[\"id\"], model)\n db.connect() # Reconnect\n\n if \"error\" not in feedback:\n # Save critique\n cursor = db.execute(\n \"\"\"INSERT INTO content_critiques (version_id, critic_id, round, feedback)\n VALUES (%s, %s, %s, %s)\"\"\",\n (version_id, critic[\"id\"], new_round, json.dumps(feedback)),\n )\n db.commit()\n cursor.close()\n\n if not feedback.get(\"passed\", True):\n all_passed = False\n\n results.append(feedback)\n\n # Update order status based on results\n if all_passed:\n update_order_status(version[\"order_id\"], \"validate\")\n else:\n update_order_status(version[\"order_id\"], \"revision\")\n\n return {\"success\": True, \"round\": new_round, \"critiques\": results, \"all_passed\": all_passed}\n\n except Exception as e:\n return {\"error\": str(e)}\n finally:\n db.disconnect()\n\n\ndef revise_content(version_id: int, model: str = \"anthropic\") -> dict:\n \"\"\"\n Create a revision based on critique feedback.\n\n Returns:\n dict with new version info\n \"\"\"\n db.connect()\n\n try:\n # Get version and critiques\n cursor = db.execute(\n \"\"\"SELECT cv.*, co.id as order_id, co.briefing, co.current_critique_round,\n ap.content as profile_config,\n cs.content as structure_config\n FROM content_versions cv\n JOIN content_orders co ON cv.order_id = co.id\n LEFT JOIN content_config ap ON co.author_profile_id = ap.id AND ap.type = 'author_profile'\n LEFT JOIN content_config cs ON co.structure_id = cs.id AND cs.type = 'structure'\n WHERE cv.id = %s\"\"\",\n (version_id,),\n )\n version = cursor.fetchone()\n cursor.close()\n\n if not version:\n return {\"error\": \"Version not found\"}\n\n content_data = json.loads(version[\"content\"]) if isinstance(version[\"content\"], str) else version[\"content\"]\n content_text = content_data.get(\"text\", \"\")\n\n # Get latest critiques (critics now in con... [TRUNCATED-3e66f0b54b0a0e04]",
"structuredPatch": [
{
"oldStart": 117,
"oldLines": 33,
"newStart": 117,
"newLines": 16,
"lines": [
" ",
" response = call_llm(prompt, model, client_name=\"content-studio-critique\")",
" ",
"- # Parse JSON from response with robust error handling",
"- json_match = re.search(r\"\\{[\\s\\S]*\\}\", response)",
"- if json_match:",
"- json_str = json_match.group()",
"- try:",
"- feedback = json.loads(json_str)",
"- feedback[\"critic_name\"] = critic[\"name\"]",
"- return feedback",
"- except json.JSONDecodeError:",
"- # Try to repair common JSON issues",
"- repaired = repair_json(json_str)",
"- try:",
"- feedback = json.loads(repaired)",
"- feedback[\"critic_name\"] = critic[\"name\"]",
"- return feedback",
"- except json.JSONDecodeError:",
"- pass",
"+ # Parse JSON from response using DRY utility",
"+ feedback = parse_critic_response(response)",
"+ feedback[\"critic_name\"] = critic[\"name\"]",
" ",
"- return {",
"- \"critic_name\": critic[\"name\"],",
"- \"rating\": 5,",
"- \"passed\": False,",
"- \"issues\": [\"Konnte Feedback nicht parsen\"],",
"- \"suggestions\": [],",
"- \"summary\": response[:500],",
"- }",
"+ # If parsing failed completely, add raw response summary",
"+ if not feedback.get(\"summary\") and response:",
"+ feedback[\"summary\"] = response[:500]",
" ",
"+ return feedback",
"+",
" except Exception as e:",
" return {\"error\": str(e)}",
" finally:"
]
}
],
"userModified": false,
"replaceAll": false
}
}