Merge pull request #199 from zaverichintan/v3

Added support for Ollama running locally or publicly hosted api
This commit is contained in:
Federico 2024-09-02 14:27:48 +02:00 committed by GitHub
commit 15bbd61177
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 98 additions and 23 deletions

View File

@ -148,13 +148,12 @@ This file contains sensitive information. Never share or commit this file to ver
- Replace with your LinkedIn account email address
- `password: [Your LinkedIn password]`
- Replace with your LinkedIn account password
- `openai_api_key: [Your OpenAI API key]`
- `llm_api_key: [Your OpenAI or Ollama API key]`
- Replace with your OpenAI API key for GPT integration
- To obtain an API key, follow the tutorial at: https://medium.com/@lorenzozar/how-to-get-your-own-openai-api-key-f4d44e60c327
- Note: You need to add credit to your OpenAI account to use the API. You can add credit by visiting the [OpenAI billing dashboard](https://platform.openai.com/account/billing).
### 2. config.yaml
This file defines your job search parameters and bot behavior. Each section contains options that you can customize:
@ -211,7 +210,22 @@ This file defines your job search parameters and bot behavior. Each section cont
- Sales
- Marketing
```
#### 2.1 config.yaml - Customize LLM model endpoint
- `llm_model_type`:
- Choose the model type, supported: openai / ollama / claude
- `llm_model`:
- Choose the LLM model, currently supported:
- openai: gpt-4o
- ollama: llama2, mistral:v0.3
- claude: any model
- `llm_api_url`:
- Link of the API endpoint for the LLM model
- openai: https://api.pawan.krd/cosmosrp/v1
- ollama: http://127.0.0.1:11434/
- claude: https://api.anthropic.com/v1
- Note: To run local Ollama, follow the guidelines here: [Guide to Ollama deployment](https://github.com/ollama/ollama)
### 3. plain_text_resume.yaml
This file contains your resume information in a structured format. Fill it out with your personal details, education, work experience, and skills. This information is used to auto-fill application forms and generate customized resumes.

View File

@ -39,4 +39,8 @@ companyBlacklist:
titleBlacklist:
- word1
- word2
- word2
llm_model_type: openai
llm_model: gpt-4o
llm_api_url: https://api.pawan.krd/cosmosrp/v1

View File

@ -1,3 +1,3 @@
email: myemaillinkedin@gmail.com
password: ImpossiblePassowrd10
openai_api_key: sk-11KRr4uuTwpRGfeRTfj1T9BlbkFJjP8QTrswHU1yGruru2FR
llm_api_key: 'sk-11KRr4uuTwpRGfeRTfj1T9BlbkFJjP8QTrswHU1yGruru2FR'

View File

@ -37,3 +37,7 @@ companyBlacklist:
- Crossover
titleBlacklist:
llm_model_type: openai
llm_model: 'gpt-4o'
llm_api_url: https://api.pawan.krd/cosmosrp/v1'

View File

@ -1,3 +1,3 @@
email: myemaillinkedin@gmail.com
password: ImpossiblePassowrd10
openai_api_key: sk-11KRr4uuTwpRGfeRTfj1T9BlbkFJjP8QTrswHU1yGruru2FR
llm_api_key: 'sk-11KRr4uuTwpRGfeRTfj1T9BlbkFJjP8QTrswHU1yGruru2FR'

17
main.py
View File

@ -101,7 +101,7 @@ class ConfigValidator:
@staticmethod
def validate_secrets(secrets_yaml_path: Path) -> tuple:
secrets = ConfigValidator.validate_yaml_file(secrets_yaml_path)
mandatory_secrets = ['email', 'password', 'openai_api_key']
mandatory_secrets = ['email', 'password']
for secret in mandatory_secrets:
if secret not in secrets:
@ -111,10 +111,7 @@ class ConfigValidator:
raise ConfigError(f"Invalid email format in secrets file {secrets_yaml_path}.")
if not secrets['password']:
raise ConfigError(f"Password cannot be empty in secrets file {secrets_yaml_path}.")
if not secrets['openai_api_key']:
raise ConfigError(f"OpenAI API key cannot be empty in secrets file {secrets_yaml_path}.")
return secrets['email'], str(secrets['password']), secrets['openai_api_key']
return secrets['email'], str(secrets['password']), secrets['llm_api_key']
class FileManager:
@staticmethod
@ -158,14 +155,14 @@ def init_browser() -> webdriver.Chrome:
except Exception as e:
raise RuntimeError(f"Failed to initialize browser: {str(e)}")
def create_and_run_bot(email: str, password: str, parameters: dict, openai_api_key: str):
def create_and_run_bot(email, password, parameters, llm_api_key):
try:
style_manager = StyleManager()
resume_generator = ResumeGenerator()
with open(parameters['uploads']['plainTextResume'], "r") as file:
plain_text_resume = file.read()
resume_object = Resume(plain_text_resume)
resume_generator_manager = FacadeManager(openai_api_key, style_manager, resume_generator, resume_object, Path("data_folder/output"))
resume_generator_manager = FacadeManager(llm_api_key, style_manager, resume_generator, resume_object, Path("data_folder/output"))
os.system('cls' if os.name == 'nt' else 'clear')
resume_generator_manager.choose_style()
os.system('cls' if os.name == 'nt' else 'clear')
@ -175,7 +172,7 @@ def create_and_run_bot(email: str, password: str, parameters: dict, openai_api_k
browser = init_browser()
login_component = LinkedInAuthenticator(browser)
apply_component = LinkedInJobManager(browser)
gpt_answerer_component = GPTAnswerer(openai_api_key)
gpt_answerer_component = GPTAnswerer(parameters, llm_api_key)
bot = LinkedInBotFacade(login_component, apply_component)
bot.set_secrets(email, password)
bot.set_job_application_profile_and_resume(job_application_profile_object, resume_object)
@ -197,12 +194,12 @@ def main(resume: Path = None):
secrets_file, config_file, plain_text_resume_file, output_folder = FileManager.validate_data_folder(data_folder)
parameters = ConfigValidator.validate_config(config_file)
email, password, openai_api_key = ConfigValidator.validate_secrets(secrets_file)
email, password, llm_api_key = ConfigValidator.validate_secrets(secrets_file)
parameters['uploads'] = FileManager.file_paths_to_dict(resume, plain_text_resume_file)
parameters['outputFileDirectory'] = output_folder
create_and_run_bot(email, password, parameters, openai_api_key)
create_and_run_bot(email, password, parameters, llm_api_key)
except ConfigError as ce:
print(f"Configuration error: {str(ce)}")
print("Refer to the configuration guide for troubleshooting: https://github.com/feder-cr/LinkedIn_AIHawk_automatic_job_application/blob/main/readme.md#configuration")

Binary file not shown.

View File

@ -3,7 +3,8 @@ import os
import re
import textwrap
from datetime import datetime
from typing import Dict, List
from abc import ABC, abstractmethod
from typing import Dict, List, Union
from pathlib import Path
from dotenv import load_dotenv
from langchain_core.messages.ai import AIMessage
@ -17,10 +18,66 @@ import src.strings as strings
load_dotenv()
class AIModel(ABC):
@abstractmethod
def invoke(self, prompt: str) -> str:
pass
class OpenAIModel(AIModel):
def __init__(self, api_key: str, llm_model: str, llm_api_url: str):
from langchain_openai import ChatOpenAI
self.model = ChatOpenAI(model_name=llm_model, openai_api_key=api_key,
temperature=0.4, base_url=llm_api_url)
def invoke(self, prompt: str) -> str:
print("invoke in openai")
response = self.model.invoke(prompt)
return response
class ClaudeModel(AIModel):
def __init__(self, api_key: str, llm_model: str, llm_api_url: str):
from langchain_anthropic import ChatAnthropic
self.model = ChatAnthropic(model=llm_model, api_key=api_key,
temperature=0.4, base_url=llm_api_url)
def invoke(self, prompt: str) -> str:
response = self.model.invoke(prompt)
return response
class OllamaModel(AIModel):
def __init__(self, api_key: str, llm_model: str, llm_api_url: str):
from langchain_ollama import ChatOllama
self.model = ChatOllama(model=llm_model, base_url=llm_api_url)
def invoke(self, prompt: str) -> str:
response = self.model.invoke(prompt)
return response
class AIAdapter:
def __init__(self, config: dict, api_key: str):
self.model = self._create_model(config, api_key)
def _create_model(self, config: dict, api_key: str) -> AIModel:
llm_model_type = config['llm_model_type']
llm_model = config['llm_model']
llm_api_url = config['llm_api_url']
print('Using {0} with {1} from {2}'.format(llm_model_type, llm_model, llm_api_url))
if llm_model_type == "openai":
return OpenAIModel(api_key, llm_model, llm_api_url)
elif llm_model_type == "claude":
return ClaudeModel(api_key, llm_model, llm_api_url)
elif llm_model_type == "ollama":
return OllamaModel(api_key, llm_model, llm_api_url)
else:
raise ValueError(f"Unsupported model type: {model_type}")
def invoke(self, prompt: str) -> str:
return self.model.invoke(prompt)
class LLMLogger:
def __init__(self, llm: ChatOpenAI):
def __init__(self, llm: Union[OpenAIModel, OllamaModel, ClaudeModel]):
self.llm = llm
@staticmethod
@ -78,12 +135,12 @@ class LLMLogger:
class LoggerChatModel:
def __init__(self, llm: ChatOpenAI):
def __init__(self, llm: Union[OpenAIModel, OllamaModel, ClaudeModel]):
self.llm = llm
def __call__(self, messages: List[Dict[str, str]]) -> str:
# Call the LLM with the provided messages and log the response.
reply = self.llm(messages)
reply = self.llm.invoke(messages)
parsed_reply = self.parse_llmresult(reply)
LLMLogger.log_request(prompts=messages, parsed_reply=parsed_reply)
return reply
@ -113,10 +170,9 @@ class LoggerChatModel:
class GPTAnswerer:
def __init__(self, openai_api_key):
self.llm_cheap = LoggerChatModel(
ChatOpenAI(model_name="gpt-4o-mini", openai_api_key=openai_api_key, temperature=0.4)
)
def __init__(self, config, llm_api_key):
self.ai_adapter = AIAdapter(config, llm_api_key)
self.llm_cheap = LoggerChatModel(self.ai_adapter)
@property
def job_description(self):
return self.job.description