Claude support with langchain antrhopic
This commit is contained in:
parent
6b4ebe1bd0
commit
777d1fa4a6
@ -42,5 +42,5 @@ titleBlacklist:
|
|||||||
- word2
|
- word2
|
||||||
|
|
||||||
llm_model_type: [openai / ollama / claude]
|
llm_model_type: [openai / ollama / claude]
|
||||||
llm_model: [gpt-4o / mistral:v0.3 / anymodel]
|
llm_model: [gpt-4o / mistral:v0.3 / claude-3-5-sonnet-20240620]
|
||||||
llm_api_url: [https://api.pawan.krd/cosmosrp/v1' / http://127.0.0.1:11434/]
|
llm_api_url: [https://api.pawan.krd/cosmosrp/v1', http://127.0.0.1:11434/, https://api.anthropic.com/v1/messages]
|
BIN
requirements.txt
BIN
requirements.txt
Binary file not shown.
31
src/gpt.py
31
src/gpt.py
@ -20,7 +20,7 @@ load_dotenv()
|
|||||||
|
|
||||||
class AIModel(ABC):
|
class AIModel(ABC):
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def generate_response(self, prompt: str) -> str:
|
def invoke(self, prompt: str) -> str:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
class OpenAIModel(AIModel):
|
class OpenAIModel(AIModel):
|
||||||
@ -29,32 +29,29 @@ class OpenAIModel(AIModel):
|
|||||||
self.model = ChatOpenAI(model_name=llm_model, openai_api_key=api_key,
|
self.model = ChatOpenAI(model_name=llm_model, openai_api_key=api_key,
|
||||||
temperature=0.4, base_url=llm_api_url)
|
temperature=0.4, base_url=llm_api_url)
|
||||||
|
|
||||||
def generate_response(self, prompt: str) -> str:
|
def invoke(self, prompt: str) -> str:
|
||||||
|
print("invoke in openai")
|
||||||
response = self.model.invoke(prompt)
|
response = self.model.invoke(prompt)
|
||||||
return response.content
|
return response
|
||||||
|
|
||||||
class ClaudeModel(AIModel):
|
class ClaudeModel(AIModel):
|
||||||
def __init__(self, api_key: str, llm_model: str, llm_api_url: str):
|
def __init__(self, api_key: str, llm_model: str, llm_api_url: str):
|
||||||
from anthropic import Anthropic
|
from langchain_anthropic import ChatAnthropic
|
||||||
self.client = Anthropic(api_key=api_key)
|
self.model = ChatAnthropic(model=llm_model, api_key=api_key,
|
||||||
|
temperature=0.4, base_url=llm_api_url)
|
||||||
|
|
||||||
def generate_response(self, prompt: str) -> str:
|
def invoke(self, prompt: str) -> str:
|
||||||
formatted_prompt = f"\n\nHuman: {prompt}\n\nAssistant:"
|
response = self.model.invoke(prompt)
|
||||||
response = self.client.completions.create(
|
return response
|
||||||
model="claude-2",
|
|
||||||
prompt=formatted_prompt,
|
|
||||||
max_tokens_to_sample=300
|
|
||||||
)
|
|
||||||
return response.completion.strip()
|
|
||||||
|
|
||||||
class OllamaModel(AIModel):
|
class OllamaModel(AIModel):
|
||||||
def __init__(self, api_key: str, llm_model: str, llm_api_url: str):
|
def __init__(self, api_key: str, llm_model: str, llm_api_url: str):
|
||||||
from langchain_ollama import ChatOllama
|
from langchain_ollama import ChatOllama
|
||||||
self.model = ChatOllama(model=llm_model, base_url=llm_api_url)
|
self.model = ChatOllama(model=llm_model, base_url=llm_api_url)
|
||||||
|
|
||||||
def generate_response(self, prompt: str) -> str:
|
def invoke(self, prompt: str) -> str:
|
||||||
response = self.model.invoke(prompt)
|
response = self.model.invoke(prompt)
|
||||||
return response.content
|
return response
|
||||||
|
|
||||||
class AIAdapter:
|
class AIAdapter:
|
||||||
def __init__(self, config: dict, api_key: str):
|
def __init__(self, config: dict, api_key: str):
|
||||||
@ -75,8 +72,8 @@ class AIAdapter:
|
|||||||
else:
|
else:
|
||||||
raise ValueError(f"Unsupported model type: {model_type}")
|
raise ValueError(f"Unsupported model type: {model_type}")
|
||||||
|
|
||||||
def generate_response(self, prompt: str) -> str:
|
def invoke(self, prompt: str) -> str:
|
||||||
return self.model.generate_response(prompt)
|
return self.model.invoke(prompt)
|
||||||
|
|
||||||
class LLMLogger:
|
class LLMLogger:
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user