Coverage for mindsdb / integrations / handlers / anthropic_handler / anthropic_handler.py: 0%

47 statements  

« prev     ^ index     » next       coverage.py v7.13.1, created at 2026-01-21 00:36 +0000

1from typing import Dict, Optional 

2 

3import pandas as pd 

4from anthropic import Anthropic 

5 

6from mindsdb.integrations.libs.base import BaseMLEngine 

7from mindsdb.utilities import log 

8 

9from mindsdb.integrations.utilities.handler_utils import get_api_key 

10 

11logger = log.getLogger(__name__) 

12 

13 

14class AnthropicHandler(BaseMLEngine): 

15 """ 

16 Integration with the Anthropic LLM Python Library 

17 """ 

18 

19 name = "anthropic" 

20 

21 def __init__(self, *args, **kwargs): 

22 super().__init__(*args, **kwargs) 

23 self.default_chat_model = "claude-2.1" 

24 self.supported_chat_models = ["claude-instant-1.2", "claude-2.1", "claude-3-opus-20240229", "claude-3-sonnet-20240229"] 

25 self.default_max_tokens = 100 

26 self.generative = True 

27 self.connection = None 

28 

29 def create( 

30 self, 

31 target: str, 

32 df: Optional[pd.DataFrame] = None, 

33 args: Optional[Dict] = None, 

34 ) -> None: 

35 

36 if "using" not in args: 

37 raise Exception( 

38 "Anthropic engine requires a USING clause! Refer to its documentation for more details." 

39 ) 

40 

41 if "model" not in args["using"]: 

42 args["using"]["model"] = self.default_chat_model 

43 elif args["using"]["model"] not in self.supported_chat_models: 

44 raise Exception( 

45 f"Invalid chat model. Please use one of {self.supported_chat_models}" 

46 ) 

47 

48 if "max_tokens" not in args["using"]: 

49 args["using"]["max_tokens"] = self.default_max_tokens 

50 

51 self.model_storage.json_set("args", args) 

52 

53 def predict( 

54 self, df: Optional[pd.DataFrame] = None, args: Optional[Dict] = None 

55 ) -> None: 

56 

57 args = self.model_storage.json_get("args") 

58 api_key = get_api_key('anthropic', args["using"], self.engine_storage, strict=False) 

59 

60 self.connection = Anthropic( 

61 api_key=api_key, 

62 ) 

63 

64 input_column = args["using"]["column"] 

65 

66 if input_column not in df.columns: 

67 raise RuntimeError(f'Column "{input_column}" not found in input data') 

68 

69 result_df = pd.DataFrame() 

70 

71 result_df["predictions"] = df[input_column].apply(self.predict_answer) 

72 

73 result_df = result_df.rename(columns={"predictions": args["target"]}) 

74 

75 return result_df 

76 

77 def predict_answer(self, text): 

78 """ 

79 connects with anthropic messages api to predict the answer for the particular question 

80 

81 """ 

82 

83 args = self.model_storage.json_get("args") 

84 

85 message = self.connection.messages.create( 

86 model=args["using"]["model"], 

87 max_tokens=args["using"]["max_tokens"], 

88 messages=[ 

89 {"role": "user", "content": text} 

90 ] 

91 ) 

92 

93 content_blocks = message.content 

94 

95 # assuming that message.content contains one ContentBlock item 

96 # returning text value if type==text and content_blocks value if type!=text 

97 if isinstance(content_blocks, list) and len(content_blocks) > 0: 

98 content_block = content_blocks[0] 

99 if content_block.type == 'text': 

100 return content_block.text 

101 else: 

102 return content_blocks 

103 else: 

104 raise Exception( 

105 f"Invalid output: {content_blocks}" 

106 )