Skip to content

connector

dandy.llm.connector

LlmConnector

Bases: BaseConnector

Source code in dandy/llm/connector.py
def __init__(
    self,
    recorder_event_id: str,
    llm_config: LlmConfig,
    intel_class: type[IntelType] | None,
    system_prompt: Prompt | str,
    diligence: float = 1.0,
):
    self.recorder_event_id = recorder_event_id

    self.llm_config = llm_config

    self.intel = None
    self.intel_class = intel_class

    self.prompt_retry_attempt = 0

    self.request_body = self.llm_config.generate_request_body()
    self.response_str = None

    self.system_prompt_str = str(system_prompt)

    self.diligence = diligence

recorder_event_id = recorder_event_id instance-attribute

llm_config = llm_config instance-attribute

intel = None instance-attribute

intel_class = intel_class instance-attribute

prompt_retry_attempt = 0 instance-attribute

request_body = self.llm_config.generate_request_body() instance-attribute

response_str = None instance-attribute

system_prompt_str = str(system_prompt) instance-attribute

diligence = diligence instance-attribute

has_retry_attempts_available property

prompt_to_intel

Source code in dandy/llm/connector.py
def prompt_to_intel(
    self,
    prompt: Prompt | str | None = None,
    intel_class: type[IntelType] | None = None,
    intel_object: IntelType | None = None,
    audio_urls: list[str] | None = None,
    audio_file_paths: list[str | Path] | None = None,
    audio_base64_strings: list[str] | None = None,
    image_urls: list[str] | None = None,
    image_file_paths: list[str | Path] | None = None,
    image_base64_strings: list[str] | None = None,
    include_fields: IncEx | None = None,
    exclude_fields: IncEx | None = None,
    message_history: MessageHistory | None = None,
    replace_message_history: bool = False,
) -> IntelType:
    self._update_request_body()

    self._set_intel(intel_class=intel_class, intel_object=intel_object)

    self.request_body.json_schema = IntelFactory.intel_to_json_inc_ex_schema(
        intel=self.intel, include=include_fields, exclude=exclude_fields
    )

    if not self.request_body.messages.has_system_message:
        self._prepend_system_message()

    if message_history:
        if replace_message_history:
            self.request_body.messages = message_history
        else:
            self.request_body.messages.extend(message_history.messages)

    if prompt is not None:
        self.request_body.messages.add_message(role='user', text=Prompt(prompt).to_str())

    if audio_urls or audio_file_paths or audio_base64_strings:
        self.request_body.messages.add_message(
            role='user',
            audio_urls=audio_urls,
            audio_file_paths=audio_file_paths,
            audio_base64_strings=audio_base64_strings,
        )

    if image_urls or image_file_paths or image_base64_strings:
        self.request_body.messages.add_message(
            role='user',
            image_urls=image_urls,
            image_file_paths=image_file_paths,
            image_base64_strings=image_base64_strings,
        )

    if len(self.request_body.messages) <= 1:
        message = (
            'You cannot prompt the LlmService without at least one system and one user message.'
        )
        raise LlmCriticalError(message)

    if self._changed_diligence:
        PreDiligenceHandler(level=self.diligence).apply(llm_connector=self)

    response_intel_object = self._request_to_intel()

    if self._changed_diligence:
        PostDiligenceHandler(level=self.diligence).apply(llm_connector=self)

        if PostDiligenceHandler(level=self.diligence).requires_new_llm_request:
            response_intel_object = self._request_to_intel()

    return response_intel_object

reset

Source code in dandy/llm/connector.py
def reset(self):
    self.llm_config.reset()
    self.request_body = self.llm_config.generate_request_body()

retry_request_to_intel

Source code in dandy/llm/connector.py
def retry_request_to_intel(
    self, retry_event_description: str, retry_user_prompt: Prompt | str
) -> IntelType:
    if self.has_retry_attempts_available:
        self.prompt_retry_attempt += 1

        recorder_add_llm_retry_event(
            retry_event_description,
            self.recorder_event_id,
            remaining_attempts=self.llm_config.options.prompt_retry_count
            - self.prompt_retry_attempt,
        )

        self.request_body.messages.add_message(
            role='user', text=Prompt(retry_user_prompt).to_str()
        )

        return self._request_to_intel()

    message = f'Failed to get the correct response from the LlmService after {self.llm_config.options.prompt_retry_count} attempts.'
    raise LlmRecoverableError(message)