Coverage for mindsdb / interfaces / agents / constants.py: 75%
40 statements
« prev ^ index » next coverage.py v7.13.1, created at 2026-01-21 00:36 +0000
« prev ^ index » next coverage.py v7.13.1, created at 2026-01-21 00:36 +0000
1import os
3from types import MappingProxyType
5# the same as
6# from mindsdb.integrations.handlers.openai_handler.constants import CHAT_MODELS
7OPEN_AI_CHAT_MODELS = (
8 "gpt-3.5-turbo",
9 "gpt-3.5-turbo-16k",
10 "gpt-3.5-turbo-instruct",
11 "gpt-4",
12 "gpt-4-32k",
13 "gpt-4-1106-preview",
14 "gpt-4-0125-preview",
15 "gpt-4.1",
16 "gpt-4.1-mini",
17 "gpt-4o",
18 "o4-mini",
19 "o3-mini",
20 "o1-mini",
21)
23SUPPORTED_PROVIDERS = {
24 "openai",
25 "anthropic",
26 "litellm",
27 "ollama",
28 "nvidia_nim",
29 "vllm",
30 "google",
31 "writer",
32}
33# Chat models
34ANTHROPIC_CHAT_MODELS = (
35 "claude-3-opus-20240229",
36 "claude-3-sonnet-20240229",
37 "claude-3-haiku-20240307",
38 "claude-2.1",
39 "claude-2.0",
40 "claude-instant-1.2",
41)
43OLLAMA_CHAT_MODELS = (
44 "gemma",
45 "llama2",
46 "mistral",
47 "mixtral",
48 "llava",
49 "neural-chat",
50 "codellama",
51 "dolphin-mixtral",
52 "qwen",
53 "llama2-uncensored",
54 "mistral-openorca",
55 "deepseek-coder",
56 "nous-hermes2",
57 "phi",
58 "orca-mini",
59 "dolphin-mistral",
60 "wizard-vicuna-uncensored",
61 "vicuna",
62 "tinydolphin",
63 "llama2-chinese",
64 "openhermes",
65 "zephyr",
66 "nomic-embed-text",
67 "tinyllama",
68 "openchat",
69 "wizardcoder",
70 "phind-codellama",
71 "starcoder",
72 "yi",
73 "orca2",
74 "falcon",
75 "starcoder2",
76 "wizard-math",
77 "dolphin-phi",
78 "nous-hermes",
79 "starling-lm",
80 "stable-code",
81 "medllama2",
82 "bakllava",
83 "codeup",
84 "wizardlm-uncensored",
85 "solar",
86 "everythinglm",
87 "sqlcoder",
88 "nous-hermes2-mixtral",
89 "stable-beluga",
90 "yarn-mistral",
91 "samantha-mistral",
92 "stablelm2",
93 "meditron",
94 "stablelm-zephyr",
95 "magicoder",
96 "yarn-llama2",
97 "wizard-vicuna",
98 "llama-pro",
99 "deepseek-llm",
100 "codebooga",
101 "mistrallite",
102 "dolphincoder",
103 "nexusraven",
104 "open-orca-platypus2",
105 "all-minilm",
106 "goliath",
107 "notux",
108 "alfred",
109 "megadolphin",
110 "xwinlm",
111 "wizardlm",
112 "duckdb-nsql",
113 "notus",
114)
116NVIDIA_NIM_CHAT_MODELS = (
117 "microsoft/phi-3-mini-4k-instruct",
118 "mistralai/mistral-7b-instruct-v0.2",
119 "writer/palmyra-med-70b",
120 "mistralai/mistral-large",
121 "mistralai/codestral-22b-instruct-v0.1",
122 "nvidia/llama3-chatqa-1.5-70b",
123 "upstage/solar-10.7b-instruct",
124 "google/gemma-2-9b-it",
125 "adept/fuyu-8b",
126 "google/gemma-2b",
127 "databricks/dbrx-instruct",
128 "meta/llama-3_1-8b-instruct",
129 "microsoft/phi-3-medium-128k-instruct",
130 "01-ai/yi-large",
131 "nvidia/neva-22b",
132 "meta/llama-3_1-70b-instruct",
133 "google/codegemma-7b",
134 "google/recurrentgemma-2b",
135 "google/gemma-2-27b-it",
136 "deepseek-ai/deepseek-coder-6.7b-instruct",
137 "mediatek/breeze-7b-instruct",
138 "microsoft/kosmos-2",
139 "microsoft/phi-3-mini-128k-instruct",
140 "nvidia/llama3-chatqa-1.5-8b",
141 "writer/palmyra-med-70b-32k",
142 "google/deplot",
143 "meta/llama-3_1-405b-instruct",
144 "aisingapore/sea-lion-7b-instruct",
145 "liuhaotian/llava-v1.6-mistral-7b",
146 "microsoft/phi-3-small-8k-instruct",
147 "meta/codellama-70b",
148 "liuhaotian/llava-v1.6-34b",
149 "nv-mistralai/mistral-nemo-12b-instruct",
150 "microsoft/phi-3-medium-4k-instruct",
151 "seallms/seallm-7b-v2.5",
152 "mistralai/mixtral-8x7b-instruct-v0.1",
153 "mistralai/mistral-7b-instruct-v0.3",
154 "google/paligemma",
155 "google/gemma-7b",
156 "mistralai/mixtral-8x22b-instruct-v0.1",
157 "google/codegemma-1.1-7b",
158 "nvidia/nemotron-4-340b-instruct",
159 "meta/llama3-70b-instruct",
160 "microsoft/phi-3-small-128k-instruct",
161 "ibm/granite-8b-code-instruct",
162 "meta/llama3-8b-instruct",
163 "snowflake/arctic",
164 "microsoft/phi-3-vision-128k-instruct",
165 "meta/llama2-70b",
166 "ibm/granite-34b-code-instruct",
167)
169GOOGLE_GEMINI_CHAT_MODELS = (
170 "gemini-2.5-pro",
171 "gemini-2.5-flash",
172 "gemini-2.5-pro-preview-03-25",
173 "gemini-2.0-flash",
174 "gemini-2.0-flash-lite",
175 "gemini-1.5-flash",
176 "gemini-1.5-flash-8b",
177 "gemini-1.5-pro",
178)
180WRITER_CHAT_MODELS = ("palmyra-x5", "palmyra-x4")
182# Define a read-only dictionary mapping providers to their models
183PROVIDER_TO_MODELS = MappingProxyType(
184 {
185 "anthropic": ANTHROPIC_CHAT_MODELS,
186 "ollama": OLLAMA_CHAT_MODELS,
187 "openai": OPEN_AI_CHAT_MODELS,
188 "nvidia_nim": NVIDIA_NIM_CHAT_MODELS,
189 "google": GOOGLE_GEMINI_CHAT_MODELS,
190 "writer": WRITER_CHAT_MODELS,
191 }
192)
194ASSISTANT_COLUMN = "answer"
195CONTEXT_COLUMN = "context"
196TRACE_ID_COLUMN = "trace_id"
197DEFAULT_AGENT_TIMEOUT_SECONDS = 300
198# These should require no additional arguments.
199DEFAULT_AGENT_TOOLS = []
202def get_default_agent_type():
203 try:
204 from langchain.agents import AgentType
206 return AgentType.CONVERSATIONAL_REACT_DESCRIPTION
207 except ImportError:
208 raise ImportError("langchain is required for agent type. Install with: pip install mindsdb[agent]")
211DEFAULT_MAX_ITERATIONS = 10
212DEFAULT_MAX_TOKENS = 8096
213DEFAULT_MODEL_NAME = "gpt-4o"
214DEFAULT_TEMPERATURE = 0.0
215USER_COLUMN = "question"
216DEFAULT_EMBEDDINGS_MODEL_PROVIDER = "openai"
219def get_default_embeddings_model_class():
220 try:
221 from langchain_openai import OpenAIEmbeddings
223 return OpenAIEmbeddings
224 except ImportError:
225 raise ImportError("langchain_openai is required for embeddings. Install with: pip install mindsdb[agent]")
228MAX_INSERT_BATCH_SIZE = int(os.getenv("KB_MAX_INSERT_BATCH_SIZE", 50_000))
229DEFAULT_TIKTOKEN_MODEL_NAME = os.getenv("DEFAULT_TIKTOKEN_MODEL_NAME", "gpt-4")
230AGENT_CHUNK_POLLING_INTERVAL_SECONDS = os.getenv("AGENT_CHUNK_POLLING_INTERVAL_SECONDS", 1.0)
231DEFAULT_TEXT2SQL_DATABASE = "mindsdb"
232DEFAULT_AGENT_SYSTEM_PROMPT = """
233You are an AI assistant powered by MindsDB. You have access to conversation history and should use it to provide contextual responses. When answering questions, follow these guidelines:
235**CONVERSATION CONTEXT:**
236- You have access to previous messages in this conversation through your memory system
237- When users ask about previous questions, topics, or context, refer to the conversation history
238- Maintain conversational continuity and reference earlier parts of the conversation when relevant
239- When asked to retrieve or list past user questions, examine your conversation memory to identify and list previous user queries
240- You can reference specific past questions by their content or by their position in the conversation (e.g., "your first question", "the question you asked earlier about...")
2421. For factual questions about specific topics, use the knowledge base tools in this sequence:
243 - First use kb_list_tool to see available knowledge bases
244 - Then use kb_info_tool to understand the structure of relevant knowledge bases
245 - Finally use kb_query_tool to query the knowledge base for specific information
2472. For questions about database tables and their contents:
248 - Use the sql_db_query to query the tables directly
249 - You can join tables if needed to get comprehensive information
250 - You are running on a federated query engine, so joins across multiple databases are allowed and supported
251 - **Important Rule for SQL Queries:** If you formulate an SQL query as part of answering a user's question, you *must* then use the `sql_db_query` tool to execute that query and get its results. The SQL query string itself is NOT the final answer to the user unless the user has specifically asked for the query. Your final AI response should be based on the *results* obtained from executing the query.
254For factual questions, ALWAYS use the available tools to look up information rather than relying on your internal knowledge.
256"""
258MINDSDB_PREFIX = """You are an AI assistant powered by MindsDB. You have access to conversation history and should use it to provide contextual responses. When answering questions, follow these guidelines:
260**CONVERSATION CONTEXT:**
261- You have access to previous messages in this conversation through your memory system
262- When users ask about previous questions, topics, or context, refer to the conversation history
263- Maintain conversational continuity and reference earlier parts of the conversation when relevant
264- When asked to retrieve or list past user questions, examine your conversation memory to identify and list previous user queries
265- You can reference specific past questions by their content or by their position in the conversation (e.g., "your first question", "the question you asked earlier about...")
2671. For questions about database tables and their contents:
268 - Use the sql_db_query to query the tables directly
269 - You can join tables if needed to get comprehensive information
270 - You are running on a federated query engine, so joins across multiple databases are allowed and supported
271 - **Important Rule for SQL Queries:** If you formulate an SQL query as part of answering a user's question, you *must* then use the `sql_db_query` tool to execute that query and get its results. The SQL query string itself is NOT the final answer to the user unless the user has specifically asked for the query. Your final AI response should be based on the *results* obtained from executing the query.
2732. For factual questions about specific topics, use the knowledge base tools, if available, in this sequence:
274- First use kb_list_tool to see available knowledge bases
275- Then use kb_info_tool to understand the structure of relevant knowledge bases
276- Finally use kb_query_tool to query the knowledge base for specific information
278For factual questions, ALWAYS use the available tools to look up information rather than relying on your internal knowledge.
280TOOLS:
281------
283Assistant has access to the following tools:"""
285EXPLICIT_FORMAT_INSTRUCTIONS = """
286<< TOOL CALLING INSTRUCTIONS >>
288**It is critical you use the following format to call a tool**
290```
291Thought: Do I need to use a tool? Yes
292Action: the action to take, should be one of [{tool_names}]
293Action Input: the input to the action
294Observation: the result of the action
295```
297When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:
299```
300Thought: Do I need to use a tool? No
301{ai_prefix}: [your response here]
302```
303"""