Skip to content

service

dandy.llm.service.service

LlmService

Bases: BaseService['LlmServiceMixin']

Source code in dandy/core/service/service.py
def __init__(self, obj: Any = None):
    if self.has_obj_service_instance(obj):
        return

    self._obj_type_name: str = str(
        list(self.__class__.__annotations__.values())[0]
    ).split('.')[-1]

    if obj is None:
        return

    self._obj_mro_type_names = [cls.__name__ for cls in obj.__class__.__mro__]

    if not self._obj_type_name in self._obj_mro_type_names:
        message = f'{self.__class__.__name__} was instantiated with obj type "{obj.__class__.__name__}" and failed as it was expecting "{self._obj_type_name}".'
        raise ServiceCriticalException(message)

    self._obj_type: type[TypeAny] = obj.__class__

    if self._obj_type is None or self._obj_type is ...:
        message = f'{self.__class__.__name__} top class attribute must have an annotated type.'
        raise ServiceCriticalException(message)

    self.obj: TypeAny = obj

    if ABC not in self.__class__.__bases__:
        if not self._obj_is_valid:
            message = f'{self._obj_type_name} failed to validate on {self.__class__.__name__}'
            raise ServiceCriticalException(message)

    self.__post_init__()

    if not hasattr(obj, self.generate_service_instance_name(self.__class__)):
        message = f'To use "{self.__class__.__name__}" can only be attached to an object with a "{self.generate_service_instance_name(self.__class__)}" attribute.'
        raise ServiceCriticalException(message)

    self.set_obj_service_instance(obj, self)

obj instance-attribute

Prompt = Prompt class-attribute instance-attribute

has_retry_attempts_available property

__post_init__

Source code in dandy/llm/service/service.py
def __post_init__(self):
    self._event_id = generate_new_recorder_event_id()

    if isinstance(self.obj.llm_config, str):
        self._llm_config = llm_configs[self.obj.llm_config]
    else:
        self._llm_config = self.obj.llm_config

    self._llm_options = self.obj.llm_config_options

    self._intel = None
    self._intel_json_schema = None

    self._request_body = self._llm_config.generate_request_body(
        max_input_tokens=self._llm_options.max_input_tokens,
        max_output_tokens=self._llm_options.max_output_tokens,
        seed=self._llm_options.seed,
        temperature=self._llm_options.temperature,
    )
    self._response_str = None
    self._retry_max_attempts = 0
    self._retry_attempt = 0

prompt_to_intel

Source code in dandy/llm/service/service.py
def prompt_to_intel(
        self,
        prompt: PromptOrStr,
        intel_class: type[IntelType] | None = None,
        intel_object: IntelType | None = None,
        images: list[str] | None = None,
        image_files: List[str | Path] | None = None,
        include_fields: IncEx | None = None,
        exclude_fields: IncEx | None = None,
        postfix_system_prompt: PromptOrStrOrNone = None,
        message_history: MessageHistory | None = None,
) -> IntelType:

    if intel_class and intel_object:
        message = 'Cannot specify both intel_class and intel_object.'
        raise LlmCriticalException(message)

    if intel_class is None and intel_object is None:
        if self.obj_class.llm_intel_class:
            intel_class = self.obj_class.llm_intel_class
        else:
            raise LlmCriticalException(
                'Must specify either intel_class, intel_object or llm_intel_class on the processor.')

    if image_files:
        images = [] if images is None else images

        for image_file in image_files:
            images.append(encode_file_to_base64(image_file))

    self._intel = intel_class or intel_object

    self._intel_json_schema = IntelFactory.intel_to_json_inc_ex_schema(
        intel=self._intel,
        include=include_fields,
        exclude=exclude_fields
    )

    self._request_body.set_format_to_json_schema(
        self._intel_json_schema
    )

    self._request_body.add_message(
        role='system',
        content=self._generate_system_prompt_str(postfix_system_prompt)
    )

    if message_history:
        for message in message_history.messages:
            self._request_body.add_message(
                role=message.role,
                content=message.content,
                images=message.images
            )

    self._request_body.add_message(
        role='user',
        content=service_user_prompt(
            prompt if isinstance(prompt, Prompt) else Prompt(prompt)
        ).to_str(),
        images=images,
    )

    return self._request_to_intel()

retry_request_to_intel

Source code in dandy/llm/service/service.py
def retry_request_to_intel(
        self,
        retry_event_description: str,
        retry_user_prompt: PromptOrStr,
) -> IntelType:
    if self.has_retry_attempts_available:
        self._retry_attempt += 1

        recorder_add_llm_retry_event(
            retry_event_description,
            self._event_id,
            remaining_attempts=self._llm_config.options.prompt_retry_count - self._retry_attempt
        )

        self._request_body.add_message(
            role='assistant',
            content=self._response_str
        )

        self._request_body.add_message(
            role='user',
            content=Prompt(retry_user_prompt).to_str()
        )

        return self._request_to_intel(
        )

    else:
        message = f'Failed to get the correct response from the LlmService after {self._llm_config.options.prompt_retry_count} attempts.'
        raise LlmRecoverableException(message)