Skip to content

service

dandy.llm.service

LlmService

Bases: BaseService['dandy.llm.mixin.LlmServiceMixin'], DecoderServiceMixin

Source code in dandy/core/service/service.py
def __init__(self, obj: T_co) -> None:
    self.recorder_event_id = generate_recorder_event_id()
    self.obj = obj

    self.__post_init__()

messages property

options property

__post_init__

Source code in dandy/llm/service.py
def __post_init__(self):
    self._llm_connector: LlmConnector = LlmConnector(
        recorder_event_id=self.recorder_event_id,
        diligence=self.obj.diligence,
        system_prompt=service_system_prompt(
            role=self.obj.role,
            task=self.obj.task,
            guidelines=self.obj.guidelines,
            system_override_prompt=self.obj.system_override_prompt,
        ).to_str(),
        llm_config=self.obj.get_llm_config(),
        intel_class=self.obj.intel_class,
    )

prompt_to_intel

Source code in dandy/llm/service.py
def prompt_to_intel(
        self,
        prompt: Prompt | str | None = None,
        intel_class: type[IntelType] | None = None,
        intel_object: IntelType | None = None,
        audio_urls: list[str] | None = None,
        audio_file_paths: list[str | Path] | None = None,
        audio_base64_strings: list[str] | None = None,
        image_urls: list[str] | None = None,
        image_file_paths: list[str | Path] | None = None,
        image_base64_strings: list[str] | None = None,
        include_fields: IncEx | None = None,
        exclude_fields: IncEx | None = None,
        message_history: MessageHistory | None = None,
        replace_message_history: bool = False,
) -> IntelType:
    return self._llm_connector.prompt_to_intel(
        prompt=prompt,
        intel_class=intel_class,
        intel_object=intel_object,
        audio_urls=audio_urls,
        audio_file_paths=audio_file_paths,
        audio_base64_strings=audio_base64_strings,
        image_urls=image_urls,
        image_file_paths=image_file_paths,
        image_base64_strings=image_base64_strings,
        include_fields=include_fields,
        exclude_fields=exclude_fields,
        message_history=message_history,
        replace_message_history=replace_message_history,
    )

prompt_to_intel_future

Source code in dandy/llm/service.py
def prompt_to_intel_future(self, **kwargs) -> AsyncFuture:
    return process_to_future(self.prompt_to_intel, **kwargs)

reset

Source code in dandy/llm/service.py
def reset(self):
    self._llm_connector.reset()
    self.reset_messages()

reset_messages

Source code in dandy/llm/service.py
def reset_messages(self):
    self._llm_connector.request_body.reset_messages()