Coverage for mindsdb / integrations / libs / llm / config.py: 100%
94 statements
« prev ^ index » next coverage.py v7.13.1, created at 2026-01-21 00:36 +0000
« prev ^ index » next coverage.py v7.13.1, created at 2026-01-21 00:36 +0000
1from typing import Any, Dict, List, Optional
3from pydantic import BaseModel, ConfigDict, Field
6class BaseLLMConfig(BaseModel):
7 # Remove 'model_' prefix from protected namespaces since Langchain constructor
8 # kwargs share the same prefix.
9 model_config = ConfigDict(protected_namespaces=())
12# See https://api.python.langchain.com/en/latest/chat_models/langchain_community.chat_models.openai.ChatOpenAI.html#langchain_community.chat_models.openai.ChatOpenAI
13# This config does not have to be exclusively used with Langchain.
14class OpenAIConfig(BaseLLMConfig):
15 model_name: str
16 temperature: Optional[float]
17 max_retries: Optional[int]
18 max_tokens: Optional[int]
19 openai_api_base: Optional[str]
20 # Inferred from OPENAI_API_KEY if not provided.
21 openai_api_key: Optional[str]
22 openai_organization: Optional[str]
23 request_timeout: Optional[float]
26# See https://api.python.langchain.com/en/latest/chat_models/langchain_community.chat_models.anthropic.ChatAnthropic.html
27# This config does not have to be exclusively used with Langchain.
28class AnthropicConfig(BaseLLMConfig):
29 model: str
30 temperature: Optional[float]
31 max_tokens: Optional[int]
32 top_p: Optional[float]
33 top_k: Optional[int]
34 default_request_timeout: Optional[float]
35 # Inferred from ANTHROPIC_API_KEY if not provided.
36 anthropic_api_key: Optional[str]
37 anthropic_api_url: Optional[str]
40# See https://api.python.langchain.com/en/latest/chat_models/langchain_community.chat_models.litellm.ChatLiteLLM.html
41# This config does not have to be exclusively used with Langchain.
42class LiteLLMConfig(BaseLLMConfig):
43 model: str
44 api_base: Optional[str]
45 max_retries: Optional[int]
46 max_tokens: Optional[int]
47 top_p: Optional[float]
48 top_k: Optional[int]
49 temperature: Optional[float]
50 custom_llm_provider: Optional[str]
51 model_kwargs: Optional[Dict[str, Any]]
54# See https://api.python.langchain.com/en/latest/chat_models/langchain_community.chat_models.ollama.ChatOllama.html
55# This config does not have to be exclusively used with Langchain.
56class OllamaConfig(BaseLLMConfig):
57 base_url: str
58 model: str
59 temperature: Optional[float]
60 top_p: Optional[float]
61 top_k: Optional[int]
62 timeout: Optional[int]
63 format: Optional[str]
64 headers: Optional[Dict]
65 num_predict: Optional[int]
66 num_ctx: Optional[int]
67 num_gpu: Optional[int]
68 repeat_penalty: Optional[float]
69 stop: Optional[List[str]]
70 template: Optional[str]
73class NvidiaNIMConfig(BaseLLMConfig):
74 base_url: str
75 model: str
76 temperature: Optional[float]
77 top_p: Optional[float]
78 timeout: Optional[int]
79 format: Optional[str]
80 headers: Optional[Dict]
81 num_predict: Optional[int]
82 num_ctx: Optional[int]
83 num_gpu: Optional[int]
84 repeat_penalty: Optional[float]
85 stop: Optional[List[str]]
86 template: Optional[str]
87 nvidia_api_key: Optional[str]
90class MindsdbConfig(BaseLLMConfig):
91 model_name: str
92 project_name: str
95# See https://python.langchain.com/api_reference/google_genai/chat_models/langchain_google_genai.chat_models.ChatGoogleGenerativeAI.html
96class GoogleConfig(BaseLLMConfig):
97 model: str = Field(description="Gemini model name to use (e.g., 'gemini-1.5-pro')")
98 temperature: Optional[float] = Field(default=None, description="Controls randomness in responses")
99 top_p: Optional[float] = Field(default=None, description="Nucleus sampling parameter")
100 top_k: Optional[int] = Field(default=None, description="Number of highest probability tokens to consider")
101 max_output_tokens: Optional[int] = Field(default=None, description="Maximum number of tokens to generate")
102 google_api_key: Optional[str] = Field(default=None, description="API key for Google Generative AI")
105# See https://api.python.langchain.com/en/latest/llms/langchain_community.llms.writer.Writer.html
106class WriterConfig(BaseLLMConfig):
107 model_name: str = Field(default="palmyra-x5", alias="model_id")
108 temperature: Optional[float] = Field(default=0.7)
109 max_tokens: Optional[int] = Field(default=None)
110 top_p: Optional[float] = Field(default=None)
111 stop: Optional[List[str]] = Field(default=None)
112 best_of: Optional[int] = Field(default=None)
113 writer_api_key: Optional[str] = Field(default=None)
114 writer_org_id: Optional[str] = Field(default=None)
115 base_url: Optional[str] = Field(default=None)
118# https://api.python.langchain.com/en/latest/llms/langchain_aws.llms.bedrock.BedrockLLM.html#langchain_aws.llms.bedrock.BedrockLLM
119class BedrockConfig(BaseLLMConfig):
120 model_id: str
121 aws_access_key_id: Optional[str] = Field(default=None)
122 aws_secret_access_key: Optional[str] = Field(default=None)
123 aws_session_token: Optional[str] = Field(default=None)
124 region_name: Optional[str] = Field(default=None)
125 credentials_profile_name: Optional[str] = Field(default=None)
126 endpoint_url: Optional[str] = Field(default=None)
127 stop: Optional[List[str]] = Field(default=None)
128 temperature: Optional[float] = Field(default=0.7)
129 max_tokens: Optional[int] = Field(default=None)
130 model_kwargs: Optional[Dict[str, Any]] = Field(default=None)