Coverage for mindsdb / integrations / handlers / leonardoai_handler / leonardo_ai_handler.py: 0%
105 statements
« prev ^ index » next coverage.py v7.13.1, created at 2026-01-21 00:36 +0000
« prev ^ index » next coverage.py v7.13.1, created at 2026-01-21 00:36 +0000
1import os
2import requests
3import time
4from typing import Dict, Optional
6import pandas as pd
8from mindsdb.integrations.libs.base import BaseMLEngine
9from mindsdb.integrations.libs.llm.utils import get_completed_prompts
10from mindsdb.utilities import log
11from mindsdb.interfaces.storage.model_fs import HandlerStorage
12from mindsdb.utilities.config import Config
14logger = log.getLogger(__name__)
16LEONARDO_API_BASE = 'https://cloud.leonardo.ai/api/rest/v1'
19class LeonardoAIHandler(BaseMLEngine):
20 """
21 This integration seamlessly combines MindsDB and Leonardo AI to create a powerful
22 AI-driven solution for creative content generation.
24 Content Generation with Leonardo AI: Harness the power of advanced generative
25 models for creative content production. From realistic images to artistic text, Leonardo
26 AI opens up new possibilities for content creators.
27 """
28 name = "leonardo_ai"
30 def __init__(self, *args, **kwargs):
31 super().__init__(*args, **kwargs)
32 self.all_models = []
33 self.default_model = '6bef9f1b-29cb-40c7-b9df-32b51c1f67d3'
34 self.base_api = LEONARDO_API_BASE
35 self.rate_limit = 50
36 self.max_batch_size = 5 # default value
38 def create(self, target: str, args=None, **kwargs):
40 args['target'] = target
42 if "using" not in args:
43 raise Exception(
44 "Leornardo Engine requires a USING clause!"
45 )
47 self.model_storage.json_set("args", args)
48 api_key = self._get_leonardo_api_key(args, self.engine_storage) # fetch api key
50 # check if API key is valid
51 self.connection = requests.get(
52 "https://cloud.leonardo.ai/api/rest/v1/me",
53 headers={
54 "accept": "application/json",
55 "authorization": f"Bearer {api_key}"
56 }
57 )
59 # if valid, check if the model is valid
60 try:
61 if (self.connection.status_code == 200):
62 # get all the available models
63 available_models = self._get_platform_model(args)
64 if not args['using']['model']:
65 args['using']['model'] = self.default_model
66 elif args['using']['model'] not in available_models: # if invalid model_id is provided
67 raise Exception("Invalid Model ID. Please use a valid Model")
68 else:
69 raise Exception("Unable to make connection, please verify the API key.")
70 except Exception:
71 raise Exception("Auth Connection Error, please the modelId or API key")
73 def predict(self, df: pd.DataFrame, args: Optional[Dict] = None, **kwargs) -> pd.DataFrame:
75 pred_args = args['predict_params'] if args else {}
76 args = self.model_storage.json_get("args")
78 prompt_template = pred_args.get('prompt_template', args.get('prompt_template', 'Generate a picture of {{{{text}}}}'))
80 # prepare prompts
81 prompts, empty_prompt_id = get_completed_prompts(prompt_template, df)
82 df['__mdb_prompt'] = prompts
84 # generate picture based on the given prompt
85 result_df = pd.DataFrame(self.predict_answer(prompts))
86 return result_df
88 def _get_leonardo_api_key(self, args, engine_storage: HandlerStorage, strict=True):
89 """
90 API_KEY preference order:
91 1. provided at model creation
92 2. provided at engine creation
93 3. LEONARDO_API_KEY env variable
94 4. leonardo.api_key setting in config.json
95 """
97 # 1
98 if "api_key" in args["using"]:
99 return args["using"]["api_key"]
100 # 2
101 connection_args = engine_storage.get_connection_args()
102 if "api_key" in connection_args:
103 return connection_args["api_key"]
104 # 3
105 api_key = os.getenv("LEONARDO_API_KEY")
106 if api_key is not None:
107 return api_key
108 # 4
109 config = Config()
110 leonardo_cfg = config.get("leonardo", {})
111 if "api_key" in leonardo_cfg:
112 return leonardo_cfg["api_key"]
114 if strict:
115 raise Exception(
116 'Missing API key "api_key". Either re-create this ML_ENGINE specifying the `api_key` parameter,\
117 or re-create this model and pass the API key with `USING` syntax.'
118 )
120 def _get_platform_model(self, args):
121 """
122 Returns a list of available model based on the API key provided
123 """
124 model_ids = []
126 args = self.model_storage.json_get('args')
127 api_key = self._get_leonardo_api_key(args, self.engine_storage)
129 self.connection = requests.get(
130 "https://cloud.leonardo.ai/api/rest/v1/platformModels",
131 headers={
132 "accept": "application/json",
133 "authorization": f"Bearer {api_key}"
134 }
135 )
137 models = self.connection.json()
139 # extract the model ids from the response
140 model_ids = [model['id'] for model in models['custom_models']]
142 return model_ids
144 def predict_answer(self, prompts, **kwargs):
145 """
146 Generates pictures based on the prompts and returns URLs with few variations.
148 Request Flow:
149 - POST request with a prompt is sent
150 - `generation_id` is created for the request
151 - POST request will take couple of seconds to generate the picture, till then the process will be kept busy with a simple math calculation.
152 - New GET request with the `generation_id` will fetch the generated pictures as URLs
153 """
154 args = self.model_storage.json_get('args')
155 tmp_args = args['using']
156 height = tmp_args.get('height', 512)
157 width = tmp_args.get('width', 512)
158 api_key = self._get_leonardo_api_key(args, self.engine_storage) # fetch API key
159 generation_id = ''
161 # Endpoint URL
162 generation_url = "https://cloud.leonardo.ai/api/rest/v1/generations"
164 post_headers = {
165 "accept": "application/json",
166 "content-type": "application/json",
167 "authorization": f"Bearer {api_key}"
168 }
170 get_headers = {
171 "accept": "application/json",
172 "authorization": f"Bearer {api_key}"
173 }
175 # payload
176 generation_payload = {
177 "height": height,
178 "modelId": args['using']['model'],
179 "prompt": f"{prompts}",
180 "width": width,
181 }
183 # Make a POST request to generate the image
184 response_generation = requests.post(generation_url, json=generation_payload, headers=post_headers)
185 generation_data = response_generation.json()
187 # Wait for 15 seconds
189 time.sleep(15)
191 # extract generationID from the response
192 generation_id = generation_data['sdGenerationJob']['generationId']
194 # ENDPOINT GET URL
195 retrieve_url = f"https://cloud.leonardo.ai/api/rest/v1/generations/{generation_id}"
197 # GET request to retrieve image URLs
198 response_retrieve = requests.get(retrieve_url, headers=get_headers)
199 retrieve_data = response_retrieve.json()
201 # extract URLs from the response
202 generated_images = retrieve_data["generations_by_pk"]["generated_images"]
203 image_urls = [image["url"] for image in generated_images]
205 url_dicts = []
207 for url in image_urls:
208 url_dicts.append({'url': url})
210 img_urls = pd.DataFrame(url_dicts, columns=['url'])
211 return img_urls
213 def describe(self, attribute: Optional[str] = None) -> pd.DataFrame:
214 args = self.model_storage.json_get('args')
215 model, target = args['using']['model'], args['target']
216 prompt_template = args.get('prompt_template', 'Generate a picture of {{{{text}}}}')
218 if attribute == "features":
219 return pd.DataFrame([[target, prompt_template]], columns=['target_column', 'mindsdb_prompt_template'])
220 elif attribute == "metadata":
221 api_key = self._get_leonardo_api_key(args, self.engine_storage)
222 return pd.DataFrame([[target, api_key, model]], columns=['target', 'api_key', 'model_name'])
223 else:
224 tables = ['args', 'api_key']
225 return pd.DataFrame(tables, columns=['tables'])