execute_tool.py

Code Hygiene Score: 85

Keine Issues gefunden.

Dependencies 11

Funktionen 1

Code

"""Task execution tool - Ollama integration."""
import json
import time
import requests

import sys
sys.path.insert(0, "/var/www/mcp-servers/mcp_tasks")
sys.path.insert(0, "/var/www/mcp-servers/shared")

from constants import MS_PER_SECOND

from config import Config
from domain.contracts import TaskResult, ExecutorType
from .base import get_repo, get_task_logger, log_tool_call


def register_execute_tools(mcp):
    """Register task execution tools."""

    repo = get_repo()
    logger = get_task_logger()

    @mcp.tool()
    def tasks_execute(
        id: int,
        model: str = "mistral",
        auto_complete: bool = False,
    ) -> dict:
        """
        Führt einen Task direkt mit lokalem Ollama aus.

        Args:
            id: Task-ID (required)
            model: Ollama-Modell (default: mistral)
            auto_complete: Task nach Erfolg abschließen

        Returns:
            Ausführungsergebnis mit Response, Tokens, Dauer
        """
        start = time.time()
        request_str = json.dumps({"id": id, "model": model})

        try:
            task = repo.find_by_id(id)
            if not task:
                log_tool_call(logger, "tasks_execute", request_str, "denied", task_id=id, error_message="Task not found")
                return {"success": False, "error": f"Task {id} not found"}

            prompt = f"Task: {task.title}\n\n"
            if task.description:
                prompt += f"Description:\n{task.description}\n\n"
            prompt += "Please complete this task and provide a detailed response."

            try:
                resp = requests.post(
                    f"{Config.OLLAMA_HOST}/api/generate",
                    json={"model": model, "prompt": prompt, "stream": False},
                    timeout=Config.OLLAMA_TIMEOUT,
                )
                resp.raise_for_status()
                ollama_result = resp.json()
            except Exception as e:
                log_tool_call(logger, "tasks_execute", request_str, "error", task_id=id, error_message=f"Ollama error: {e}")
                return {"success": False, "error": f"Ollama error: {e}"}

            duration_ms = int((time.time() - start) * MS_PER_SECOND)

            tokens_input = ollama_result.get("prompt_eval_count", len(prompt) // 4)
            tokens_output = ollama_result.get("eval_count", len(ollama_result.get("response", "")) // 4)

            result = TaskResult(
                task_id=id,
                executor="mcp-tasks",
                executor_type=ExecutorType.OLLAMA,
                model_name=model,
                request=prompt[:2000],
                response=ollama_result.get("response", ""),
                status="success",
                tokens_input=tokens_input,
                tokens_output=tokens_output,
                duration_ms=duration_ms,
            )
            result_id = repo.create_result(result)
            result.id = result_id

            if auto_complete:
                repo.update(id, {"status": "completed"})

            log_tool_call(logger, "tasks_execute", f"model={model}, tokens={tokens_input + tokens_output}", "success", duration_ms, task_id=id)

            return {
                "success": True,
                "result": result.to_dict(),
                "task_completed": auto_complete,
                "message": f"Task #{id} executed with {model}",
            }

        except Exception as e:
            duration = int((time.time() - start) * MS_PER_SECOND)
            log_tool_call(logger, "tasks_execute", request_str, "error", duration, task_id=id, error_message=str(e))
            return {"success": False, "error": str(e)}
← Übersicht