Skip to content

request

dandy.llm.request.request

LlmRequestBody

Bases: BaseModel

model instance-attribute

messages = Field(default_factory=MessageHistory) class-attribute instance-attribute

stream = False class-attribute instance-attribute

response_format = {'type': 'json_schema', 'json_schema': {'name': 'response_data', 'strict': True, 'schema': ...}} class-attribute instance-attribute

model_config = ConfigDict(extra='allow') class-attribute instance-attribute

estimated_token_count property

json_schema property writable

model_dump

Source code in dandy/llm/request/request.py
def model_dump(self, *args, **kwargs) -> dict:
    model_dict = super().model_dump(*args, exclude_none=True, **kwargs)
    model_dict['messages'] = model_dict.pop('messages')['messages']

    return model_dict

reset_messages

Source code in dandy/llm/request/request.py
def reset_messages(self):
    self.messages = MessageHistory()

to_dict

Source code in dandy/llm/request/request.py
def to_dict(self) -> dict:
    model_dict = self.model_dump()

    formated_messages = []

    for message in model_dict['messages']:
        for content in message['content']:
            if content['type'] == 'text':
                formated_messages.append(
                    {
                        'role': message['role'],
                        'content': content['text'],
                    }
                )
            elif content['type'] == 'image_url':
                formated_messages.append(
                    {
                        'role': message['role'],
                        'content': content['image_url']['url'].split(';base64,')[1],
                    }
                )

    model_dict['messages'] = formated_messages

    return model_dict