protokoll.py

Code Hygiene Score: 99

Issues 1

Zeile Typ Beschreibung
116 magic_number Magic Number gefunden: 1000

Dependencies 4

Klassen 2

Code

"""
KI-Protokoll Integration for Pipeline
Logs all LLM calls to ki_dev.protokoll for tracking and cost analysis.
"""

import time
from datetime import datetime

import mysql.connector

from config import DB_PROTOKOLL_CONFIG


class Protokoll:
    """Logger for LLM calls to ki_dev.protokoll."""

    def __init__(self, client_name="pipeline"):
        self.client_name = client_name
        self.conn = None

    def connect(self):
        """Connect to protokoll database."""
        if not self.conn or not self.conn.is_connected():
            self.conn = mysql.connector.connect(**DB_PROTOKOLL_CONFIG)

    def disconnect(self):
        """Disconnect from database."""
        if self.conn and self.conn.is_connected():
            self.conn.close()
            self.conn = None

    def log_llm_call(
        self,
        request: str,
        response: str = None,
        model_name: str = None,
        tokens_input: int = 0,
        tokens_output: int = 0,
        duration_ms: int = 0,
        status: str = "completed",
        error_message: str = None,
    ):
        """
        Log an LLM call to ki_dev.protokoll.

        Args:
            request: The prompt/request sent to the LLM
            response: The LLM response (truncated if too long)
            model_name: Name of the model used
            tokens_input: Input token count
            tokens_output: Output token count
            duration_ms: Duration in milliseconds
            status: 'pending', 'completed', or 'error'
            error_message: Error message if status is 'error'
        """
        self.connect()

        try:
            cursor = self.conn.cursor()
            now = datetime.now()

            # Truncate response if too long (TEXT limit ~65KB)
            if response and len(response) > 60000:
                response = response[:60000] + "...[truncated]"

            cursor.execute(
                """INSERT INTO protokoll
                   (request_ip, client_name, request, request_timestamp,
                    response, response_timestamp, duration_ms,
                    tokens_input, tokens_output, tokens_total,
                    model_name, status, error_message)
                   VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)""",
                (
                    "127.0.0.1",  # Pipeline runs locally
                    self.client_name,
                    request[:10000] if request else "",  # Truncate request too
                    now,
                    response,
                    now if response else None,
                    duration_ms,
                    tokens_input,
                    tokens_output,
                    tokens_input + tokens_output,
                    model_name,
                    status,
                    error_message,
                ),
            )
            self.conn.commit()
            cursor.close()
            return cursor.lastrowid

        except Exception as e:
            print(f"Protokoll logging failed: {e}")
            return None


class LLMCallContext:
    """Context manager for timing and logging LLM calls."""

    def __init__(self, protokoll: Protokoll, request: str, model_name: str):
        self.protokoll = protokoll
        self.request = request
        self.model_name = model_name
        self.start_time = None
        self.response = None
        self.tokens_input = 0
        self.tokens_output = 0
        self.error = None

    def __enter__(self):
        self.start_time = time.time()
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        duration_ms = int((time.time() - self.start_time) * 1000)

        if exc_type is not None:
            self.protokoll.log_llm_call(
                request=self.request,
                response=None,
                model_name=self.model_name,
                duration_ms=duration_ms,
                status="error",
                error_message=str(exc_val),
            )
        else:
            self.protokoll.log_llm_call(
                request=self.request,
                response=self.response,
                model_name=self.model_name,
                tokens_input=self.tokens_input,
                tokens_output=self.tokens_output,
                duration_ms=duration_ms,
                status="completed",
            )

        return False  # Don't suppress exceptions

    def set_response(self, response: str, tokens_input: int = 0, tokens_output: int = 0):
        """Set response data for logging."""
        self.response = response
        self.tokens_input = tokens_input
        self.tokens_output = tokens_output


# Global instance for pipeline
protokoll = Protokoll("pipeline")
← Übersicht Graph