Protokoll #15189

ID15189
Zeitstempel2025-12-25 15:38:35.153740
Clientroot
IP145.224.72.140
Modellclaude-sonnet-4-20250514
Statuscompleted
Tokens1,973 (Input: 984, Output: 989)
Dauer439 ms
Request-Zeit2025-12-25 15:38:35.153740
Response-Zeit2025-12-25 15:38:35.592840

Request

{
    "event": "PreToolUse",
    "tool_name": "Write",
    "tool_input": {
        "file_path": "\/var\/www\/mcp-servers\/mcp-tasks\/tools\/task_tools\/execute_tool.py",
        "content": "\"\"\"Task execution tool - Ollama integration.\"\"\"\nimport json\nimport time\nimport requests\n\nimport sys\nsys.path.insert(0, \"\/var\/www\/mcp-servers\/mcp-tasks\")\n\nfrom config import Config\nfrom domain.contracts import TaskResult, ExecutorType\nfrom .base import get_repo, get_task_logger, log_tool_call\n\n\ndef register_execute_tools(mcp):\n    \"\"\"Register task execution tools.\"\"\"\n\n    repo = get_repo()\n    logger = get_task_logger()\n\n    @mcp.tool()\n    def tasks_execute(\n        id: int,\n        model: str = \"mistral\",\n        auto_complete: bool = False,\n    ) -> dict:\n        \"\"\"\n        Führt einen Task direkt mit lokalem Ollama aus.\n\n        Args:\n            id: Task-ID (required)\n            model: Ollama-Modell (default: mistral)\n            auto_complete: Task nach Erfolg abschließen\n\n        Returns:\n            Ausführungsergebnis mit Response, Tokens, Dauer\n        \"\"\"\n        start = time.time()\n        request_str = json.dumps({\"id\": id, \"model\": model})\n\n        try:\n            task = repo.find_by_id(id)\n            if not task:\n                log_tool_call(logger, \"tasks_execute\", request_str, \"denied\", task_id=id, error_message=\"Task not found\")\n                return {\"success\": False, \"error\": f\"Task {id} not found\"}\n\n            prompt = f\"Task: {task.title}\\n\\n\"\n            if task.description:\n                prompt += f\"Description:\\n{task.description}\\n\\n\"\n            prompt += \"Please complete this task and provide a detailed response.\"\n\n            try:\n                resp = requests.post(\n                    f\"{Config.OLLAMA_HOST}\/api\/generate\",\n                    json={\"model\": model, \"prompt\": prompt, \"stream\": False},\n                    timeout=Config.OLLAMA_TIMEOUT,\n                )\n                resp.raise_for_status()\n                ollama_result = resp.json()\n            except Exception as e:\n                log_tool_call(logger, \"tasks_execute\", request_str, \"error\", task_id=id, error_message=f\"Ollama error: {e}\")\n                return {\"success\": False, \"error\": f\"Ollama error: {e}\"}\n\n            duration_ms = int((time.time() - start) * 1000)\n\n            tokens_input = ollama_result.get(\"prompt_eval_count\", len(prompt) \/\/ 4)\n            tokens_output = ollama_result.get(\"eval_count\", len(ollama_result.get(\"response\", \"\")) \/\/ 4)\n\n            result = TaskResult(\n                task_id=id,\n                executor=\"mcp-tasks\",\n                executor_type=ExecutorType.OLLAMA,\n                model_name=model,\n                request=prompt[:2000],\n                response=ollama_result.get(\"response\", \"\"),\n                status=\"success\",\n                tokens_input=tokens_input,\n                tokens_output=tokens_output,\n                duration_ms=duration_ms,\n            )\n            result_id = repo.create_result(result)\n            result.id = result_id\n\n            if auto_complete:\n                repo.update(id, {\"status\": \"completed\"})\n\n            log_tool_call(logger, \"tasks_execute\", f\"model={model}, tokens={tokens_input + tokens_output}\", \"success\", duration_ms, task_id=id)\n\n            return {\n                \"success\": True,\n                \"result\": result.to_dict(),\n                \"task_completed\": auto_complete,\n                \"message\": f\"Task #{id} executed with {model}\",\n            }\n\n        except Exception as e:\n            duration = int((time.time() - start) * 1000)\n            log_tool_call(logger, \"tasks_execute\", request_str, \"error\", duration, task_id=id, error_message=str(e))\n            return {\"success\": False, \"error\": str(e)}\n"
    }
}

Response

{
    "tool_response": {
        "type": "create",
        "filePath": "\/var\/www\/mcp-servers\/mcp-tasks\/tools\/task_tools\/execute_tool.py",
        "content": "\"\"\"Task execution tool - Ollama integration.\"\"\"\nimport json\nimport time\nimport requests\n\nimport sys\nsys.path.insert(0, \"\/var\/www\/mcp-servers\/mcp-tasks\")\n\nfrom config import Config\nfrom domain.contracts import TaskResult, ExecutorType\nfrom .base import get_repo, get_task_logger, log_tool_call\n\n\ndef register_execute_tools(mcp):\n    \"\"\"Register task execution tools.\"\"\"\n\n    repo = get_repo()\n    logger = get_task_logger()\n\n    @mcp.tool()\n    def tasks_execute(\n        id: int,\n        model: str = \"mistral\",\n        auto_complete: bool = False,\n    ) -> dict:\n        \"\"\"\n        Führt einen Task direkt mit lokalem Ollama aus.\n\n        Args:\n            id: Task-ID (required)\n            model: Ollama-Modell (default: mistral)\n            auto_complete: Task nach Erfolg abschließen\n\n        Returns:\n            Ausführungsergebnis mit Response, Tokens, Dauer\n        \"\"\"\n        start = time.time()\n        request_str = json.dumps({\"id\": id, \"model\": model})\n\n        try:\n            task = repo.find_by_id(id)\n            if not task:\n                log_tool_call(logger, \"tasks_execute\", request_str, \"denied\", task_id=id, error_message=\"Task not found\")\n                return {\"success\": False, \"error\": f\"Task {id} not found\"}\n\n            prompt = f\"Task: {task.title}\\n\\n\"\n            if task.description:\n                prompt += f\"Description:\\n{task.description}\\n\\n\"\n            prompt += \"Please complete this task and provide a detailed response.\"\n\n            try:\n                resp = requests.post(\n                    f\"{Config.OLLAMA_HOST}\/api\/generate\",\n                    json={\"model\": model, \"prompt\": prompt, \"stream\": False},\n                    timeout=Config.OLLAMA_TIMEOUT,\n                )\n                resp.raise_for_status()\n                ollama_result = resp.json()\n            except Exception as e:\n                log_tool_call(logger, \"tasks_execute\", request_str, \"error\", task_id=id, error_message=f\"Ollama error: {e}\")\n                return {\"success\": False, \"error\": f\"Ollama error: {e}\"}\n\n            duration_ms = int((time.time() - start) * 1000)\n\n            tokens_input = ollama_result.get(\"prompt_eval_count\", len(prompt) \/\/ 4)\n            tokens_output = ollama_result.get(\"eval_count\", len(ollama_result.get(\"response\", \"\")) \/\/ 4)\n\n            result = TaskResult(\n                task_id=id,\n                executor=\"mcp-tasks\",\n                executor_type=ExecutorType.OLLAMA,\n                model_name=model,\n                request=prompt[:2000],\n                response=ollama_result.get(\"response\", \"\"),\n                status=\"success\",\n                tokens_input=tokens_input,\n                tokens_output=tokens_output,\n                duration_ms=duration_ms,\n            )\n            result_id = repo.create_result(result)\n            result.id = result_id\n\n            if auto_complete:\n                repo.update(id, {\"status\": \"completed\"})\n\n            log_tool_call(logger, \"tasks_execute\", f\"model={model}, tokens={tokens_input + tokens_output}\", \"success\", duration_ms, task_id=id)\n\n            return {\n                \"success\": True,\n                \"result\": result.to_dict(),\n                \"task_completed\": auto_complete,\n                \"message\": f\"Task #{id} executed with {model}\",\n            }\n\n        except Exception as e:\n            duration = int((time.time() - start) * 1000)\n            log_tool_call(logger, \"tasks_execute\", request_str, \"error\", duration, task_id=id, error_message=str(e))\n            return {\"success\": False, \"error\": str(e)}\n",
        "structuredPatch": [],
        "originalFile": null
    }
}
← Vorheriger Zur Liste Nächster →