Adapter class added
This commit is contained in:
parent
ca4f56833a
commit
a9d9e13474
@ -211,11 +211,12 @@ This file defines your job search parameters and bot behavior. Each section cont
|
|||||||
- Marketing
|
- Marketing
|
||||||
```
|
```
|
||||||
- `llm_model_type`:
|
- `llm_model_type`:
|
||||||
- Choose the model type, supported: openai / ollama
|
- Choose the model type, supported: openai / ollama / claude
|
||||||
- `llm_model`:
|
- `llm_model`:
|
||||||
- Choose the LLM model, currently supported:
|
- Choose the LLM model, currently supported:
|
||||||
- openai: gpt-4o
|
- openai: gpt-4o
|
||||||
- ollama: llama2, mistral:v0.3
|
- ollama: llama2, mistral:v0.3
|
||||||
|
- claude: any model
|
||||||
- `llm_api_url`:
|
- `llm_api_url`:
|
||||||
- Link of the API endpoint for the LLM model
|
- Link of the API endpoint for the LLM model
|
||||||
|
|
||||||
|
@ -41,6 +41,6 @@ titleBlacklist:
|
|||||||
- word1
|
- word1
|
||||||
- word2
|
- word2
|
||||||
|
|
||||||
llm_model_type: [openai / ollama]
|
llm_model_type: [openai / ollama / claude]
|
||||||
llm_model: ['gpt-4o' / 'mistral:v0.3']
|
llm_model: ['gpt-4o' / 'mistral:v0.3' / anymodel]
|
||||||
llm_api_url: [https://api.pawan.krd/cosmosrp/v1', http://127.0.0.1:11434/]
|
llm_api_url: [https://api.pawan.krd/cosmosrp/v1' / http://127.0.0.1:11434/]
|
79
src/gpt.py
79
src/gpt.py
@ -3,6 +3,7 @@ import os
|
|||||||
import re
|
import re
|
||||||
import textwrap
|
import textwrap
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
from typing import Dict, List, Union
|
from typing import Dict, List, Union
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from dotenv import load_dotenv
|
from dotenv import load_dotenv
|
||||||
@ -11,17 +12,75 @@ from langchain_core.output_parsers import StrOutputParser
|
|||||||
from langchain_core.prompt_values import StringPromptValue
|
from langchain_core.prompt_values import StringPromptValue
|
||||||
from langchain_core.prompts import ChatPromptTemplate
|
from langchain_core.prompts import ChatPromptTemplate
|
||||||
from langchain_openai import ChatOpenAI
|
from langchain_openai import ChatOpenAI
|
||||||
from langchain_ollama import ChatOllama
|
|
||||||
from Levenshtein import distance
|
from Levenshtein import distance
|
||||||
|
|
||||||
import src.strings as strings
|
import src.strings as strings
|
||||||
|
|
||||||
load_dotenv()
|
load_dotenv()
|
||||||
|
|
||||||
|
class AIModel(ABC):
|
||||||
|
@abstractmethod
|
||||||
|
def generate_response(self, prompt: str) -> str:
|
||||||
|
pass
|
||||||
|
|
||||||
|
class OpenAIModel(AIModel):
|
||||||
|
def __init__(self, api_key: str, llm_model: str, llm_api_url: str):
|
||||||
|
from langchain_openai import ChatOpenAI
|
||||||
|
self.model = ChatOpenAI(model_name=llm_model, openai_api_key=api_key,
|
||||||
|
temperature=0.4, base_url=llm_api_url)
|
||||||
|
|
||||||
|
def generate_response(self, prompt: str) -> str:
|
||||||
|
response = self.model.invoke(prompt)
|
||||||
|
return response.content
|
||||||
|
|
||||||
|
class ClaudeModel(AIModel):
|
||||||
|
def __init__(self, api_key: str, llm_model: str, llm_api_url: str):
|
||||||
|
from anthropic import Anthropic
|
||||||
|
self.client = Anthropic(api_key=api_key)
|
||||||
|
|
||||||
|
def generate_response(self, prompt: str) -> str:
|
||||||
|
formatted_prompt = f"\n\nHuman: {prompt}\n\nAssistant:"
|
||||||
|
response = self.client.completions.create(
|
||||||
|
model="claude-2",
|
||||||
|
prompt=formatted_prompt,
|
||||||
|
max_tokens_to_sample=300
|
||||||
|
)
|
||||||
|
return response.completion.strip()
|
||||||
|
|
||||||
|
class OllamaModel(AIModel):
|
||||||
|
def __init__(self, api_key: str, llm_model: str, llm_api_url: str):
|
||||||
|
from langchain_ollama import ChatOllama
|
||||||
|
self.model = ChatOllama(model=llm_model, base_url=llm_api_url)
|
||||||
|
|
||||||
|
def generate_response(self, prompt: str) -> str:
|
||||||
|
response = self.model.invoke(prompt)
|
||||||
|
return response.content
|
||||||
|
|
||||||
|
class AIAdapter:
|
||||||
|
def __init__(self, config: dict, api_key: str):
|
||||||
|
self.model = self._create_model(config, api_key)
|
||||||
|
|
||||||
|
def _create_model(self, config: dict, api_key: str) -> AIModel:
|
||||||
|
llm_model_type = config['llm_model_type']
|
||||||
|
llm_model = config['llm_model']
|
||||||
|
llm_api_url = config['llm_api_url']
|
||||||
|
print('Using {0} with {1} from {2}'.format(llm_model_type, llm_model, llm_api_url))
|
||||||
|
|
||||||
|
if llm_model_type == "openai":
|
||||||
|
return OpenAIModel(api_key, llm_model, llm_api_url)
|
||||||
|
elif llm_model_type == "claude":
|
||||||
|
return ClaudeModel(api_key, llm_model, llm_api_url)
|
||||||
|
elif llm_model_type == "ollama":
|
||||||
|
return OllamaModel(api_key, llm_model, llm_api_url)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unsupported model type: {model_type}")
|
||||||
|
|
||||||
|
def generate_response(self, prompt: str) -> str:
|
||||||
|
return self.model.generate_response(prompt)
|
||||||
|
|
||||||
class LLMLogger:
|
class LLMLogger:
|
||||||
|
|
||||||
def __init__(self, llm: Union[ChatOpenAI, ChatOllama]):
|
def __init__(self, llm: Union[OpenAIModel, OllamaModel, ClaudeModel]):
|
||||||
self.llm = llm
|
self.llm = llm
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@ -79,7 +138,7 @@ class LLMLogger:
|
|||||||
|
|
||||||
class LoggerChatModel:
|
class LoggerChatModel:
|
||||||
|
|
||||||
def __init__(self, llm: Union[ChatOpenAI, ChatOllama]):
|
def __init__(self, llm: Union[OpenAIModel, OllamaModel, ClaudeModel]):
|
||||||
self.llm = llm
|
self.llm = llm
|
||||||
|
|
||||||
def __call__(self, messages: List[Dict[str, str]]) -> str:
|
def __call__(self, messages: List[Dict[str, str]]) -> str:
|
||||||
@ -115,18 +174,8 @@ class LoggerChatModel:
|
|||||||
|
|
||||||
class GPTAnswerer:
|
class GPTAnswerer:
|
||||||
def __init__(self, config, llm_api_key):
|
def __init__(self, config, llm_api_key):
|
||||||
llm_model_type = config['llm_model_type']
|
self.ai_adapter = AIAdapter(config, llm_api_key)
|
||||||
llm_model = config['llm_model']
|
self.llm_cheap = LoggerChatModel(self.ai_adapter)
|
||||||
llm_api_url = config['llm_api_url']
|
|
||||||
|
|
||||||
print('Using {0} with {1} from {2}'.format(llm_model_type, llm_model, llm_api_url))
|
|
||||||
|
|
||||||
if llm_model_type == "ollama":
|
|
||||||
self.llm_model = ChatOllama(model=llm_model, temperature = 0.4, base_url=llm_api_url)
|
|
||||||
elif llm_model_type == "openai":
|
|
||||||
self.llm_model = ChatOpenAI(model_name=llm_model, openai_api_key=llm_api_key, temperature=0.4,
|
|
||||||
base_url=llm_api_url)
|
|
||||||
self.llm_cheap = LoggerChatModel(self.llm_model)
|
|
||||||
@property
|
@property
|
||||||
def job_description(self):
|
def job_description(self):
|
||||||
return self.job.description
|
return self.job.description
|
||||||
|
Loading…
Reference in New Issue
Block a user