Verified Commit 93e24a49 authored by Richard Glosner's avatar Richard Glosner
Browse files

add onstart check for LLM communication

parent e2e6efaf
Loading
Loading
Loading
Loading
+25 −1
Original line number Diff line number Diff line
import threading

from django.apps import AppConfig
from django.conf import settings


def test_llm_connection():
    from common_lib.logger import logger
    from . import llm_classes as client_module

    try:
        client_module.LLM_CLIENT.query_llm(
            [{"role": "user", "content": "pinging, respond with pong"}]
        )
        logger.info("LLM connection test successful.")
    except Exception as e:
        logger.error(f"LLM connection test failed: {e}")


class LlmConfig(AppConfig):
@@ -6,15 +22,23 @@ class LlmConfig(AppConfig):
    name = "llm"

    def ready(self):
        from common_lib.logger import logger
        from . import llm_classes as client_module
        from django.conf import settings

        if client_module.LLM_CLIENT is not None:
            return

        if settings.LLM_MODEL == "":
            logger.info(
                "LLM model not configured, LLM client will not be initialized."
            )
            return

        # TODO: implement switch for OpenAI child class and possibly other
        client_module.LLM_CLIENT = client_module.OllamaLLM(
            model=settings.LLM_MODEL,
            llm_url=settings.LLM_URL,
            timeout=settings.LLM_TIMEOUT,
        )

        threading.Thread(target=test_llm_connection).start()