Skip to content

ollama

dandy.llm.config.ollama

OllamaLlmConfig

Bases: BaseLlmConfig

Source code in dandy/llm/config/config.py
def __init__(
        self,
        host: str,
        port: int,
        model: str,
        path_parameters: Union[List[str], None] = None,
        query_parameters: Union[dict, None] = None,
        headers: Union[dict, None] = None,
        api_key: Union[str, None] = None,
        seed: Union[int, None] = None,
        randomize_seed: Union[bool, None] = None,
        max_input_tokens: Union[int, None] = None,
        max_output_tokens: Union[int, None] = None,
        temperature: Union[float, None] = None,
        prompt_retry_count: Union[int, None] = None,
):

    self.http_config = HttpConnectorConfig(
        url=Url(
            host=host,
            port=port,
            path_parameters=path_parameters,
            query_parameters=query_parameters,
        ),
        headers=headers,
        basic_auth=api_key,
    )
    self.model = model

    self.options = LlmConfigOptions(
        prompt_retry_count=prompt_retry_count,
        max_input_tokens=max_input_tokens,
        max_output_tokens=max_output_tokens,
        seed=seed,
        randomize_seed=randomize_seed,
        temperature=temperature,
    )

    self.register_settings(
        'host',
        'port',
        'model',
        'path_parameters',
        'query_parameters',
        'headers',
        'api_key',
        'seed',
        'randomize_seed',
        'max_input_tokens',
        'max_output_tokens',
        'temperature',
        'prompt_retry_count',
    )

    self.__llm_config_post_init__()

__llm_config_post_init__

Source code in dandy/llm/config/ollama.py
def __llm_config_post_init__(self):
    self.http_config.url.path_parameters = [
        'api',
        'chat',
    ]

generate_request_body

Source code in dandy/llm/config/ollama.py
def generate_request_body(
        self,
        max_input_tokens: Union[int, None] = None,
        max_output_tokens: Union[int, None] = None,
        seed: Union[int, None] = None,
        temperature: Union[float, None] = None,
) -> BaseRequestBody:

    return OllamaRequestBody(
        model=self.model,
        options=OllamaRequestOptions(
            num_ctx=self.options.max_input_tokens if max_input_tokens is None else max_input_tokens,
            num_predict=self.options.max_output_tokens if max_output_tokens is None else max_output_tokens,
            seed=self.options.seed if seed is None else seed,
            temperature=self.options.temperature if temperature is None else temperature
        )
    )

get_response_content

Source code in dandy/llm/config/ollama.py
def get_response_content(self, response) -> str:
    return response['message']['content']