Added support for ollama endpoint and defining the LLM model in the config

This commit is contained in:
user 2024-08-31 22:58:40 +02:00
parent d964f599be
commit ca4f56833a
7 changed files with 38 additions and 33 deletions

View File

@ -148,17 +148,10 @@ This file contains sensitive information. Never share or commit this file to ver
- Replace with your LinkedIn account email address
- `password: [Your LinkedIn password]`
- Replace with your LinkedIn account password
- `openai_api_key: [Your OpenAI API key]`
- `llm_api_key: [Your OpenAI or Ollama API key]`
- Replace with your OpenAI API key for GPT integration
- To obtain an API key, follow the tutorial at: https://medium.com/@lorenzozar/how-to-get-your-own-openai-api-key-f4d44e60c327
- Note: You need to add credit to your OpenAI account to use the API. You can add credit by visiting the [OpenAI billing dashboard](https://platform.openai.com/account/billing).
- `openai_api_free_hosted_url`:
- Optional paramter, if you want to use freely hosted GPT model, set `openai_api_key: "freehosted"` and `openai_api_free_hosted_url` with the URL of the endpoint
- Ollama local support
- If you want to use Ollama which is deployed locally, leave `openai_api_key` blank.
- To setup Ollama to run locally follow the instructions here: [Ollama installation](https://github.com/ollama/ollama).
- Download mistral model by pulling mistral:v0.3
### 2. config.yaml
@ -217,6 +210,14 @@ This file defines your job search parameters and bot behavior. Each section cont
- Sales
- Marketing
```
- `llm_model_type`:
- Choose the model type, supported: openai / ollama
- `llm_model`:
- Choose the LLM model, currently supported:
- openai: gpt-4o
- ollama: llama2, mistral:v0.3
- `llm_api_url`:
- Link of the API endpoint for the LLM model
### 3. plain_text_resume.yaml

View File

@ -39,4 +39,8 @@ companyBlacklist:
titleBlacklist:
- word1
- word2
- word2
llm_model_type: [openai / ollama]
llm_model: ['gpt-4o' / 'mistral:v0.3']
llm_api_url: [https://api.pawan.krd/cosmosrp/v1', http://127.0.0.1:11434/]

View File

@ -1,4 +1,3 @@
email: myemaillinkedin@gmail.com
password: ImpossiblePassowrd10
openai_api_key: sk-11KRr4uuTwpRGfeRTfj1T9BlbkFJjP8QTrswHU1yGruru2FR
openai_api_free_hosted_url: https://api.pawan.krd/cosmosrp/v1
llm_api_key: 'sk-11KRr4uuTwpRGfeRTfj1T9BlbkFJjP8QTrswHU1yGruru2FR'

View File

@ -37,3 +37,7 @@ companyBlacklist:
- Crossover
titleBlacklist:
llm_model_type: openai
llm_model: 'gpt-4o'
llm_api_url: https://api.pawan.krd/cosmosrp/v1'

View File

@ -1,4 +1,3 @@
email: myemaillinkedin@gmail.com
password: ImpossiblePassowrd10
openai_api_key: sk-11KRr4uuTwpRGfeRTfj1T9BlbkFJjP8QTrswHU1yGruru2FR
openai_api_free_hosted_url: https://api.pawan.krd/cosmosrp/v1
llm_api_key: 'sk-11KRr4uuTwpRGfeRTfj1T9BlbkFJjP8QTrswHU1yGruru2FR'

15
main.py
View File

@ -111,10 +111,7 @@ class ConfigValidator:
raise ConfigError(f"Invalid email format in secrets file {secrets_yaml_path}.")
if not secrets['password']:
raise ConfigError(f"Password cannot be empty in secrets file {secrets_yaml_path}.")
if not secrets['openai_api_key']:
raise ConfigError(f"OpenAI API key cannot be empty in secrets file {secrets_yaml_path}.")
return secrets['email'], str(secrets['password']), secrets['openai_api_key'], secrets['openai_api_free_hosted_url']
return secrets['email'], str(secrets['password']), secrets['llm_api_key']
class FileManager:
@staticmethod
@ -158,14 +155,14 @@ def init_browser() -> webdriver.Chrome:
except Exception as e:
raise RuntimeError(f"Failed to initialize browser: {str(e)}")
def create_and_run_bot(email, password, parameters, openai_api_key, openai_api_free_hosted_url):
def create_and_run_bot(email, password, parameters, llm_api_key):
try:
style_manager = StyleManager()
resume_generator = ResumeGenerator()
with open(parameters['uploads']['plainTextResume'], "r") as file:
plain_text_resume = file.read()
resume_object = Resume(plain_text_resume)
resume_generator_manager = FacadeManager(openai_api_key, style_manager, resume_generator, resume_object, Path("data_folder/output"))
resume_generator_manager = FacadeManager(llm_api_key, style_manager, resume_generator, resume_object, Path("data_folder/output"))
os.system('cls' if os.name == 'nt' else 'clear')
resume_generator_manager.choose_style()
os.system('cls' if os.name == 'nt' else 'clear')
@ -175,7 +172,7 @@ def create_and_run_bot(email, password, parameters, openai_api_key, openai_api_f
browser = init_browser()
login_component = LinkedInAuthenticator(browser)
apply_component = LinkedInJobManager(browser)
gpt_answerer_component = GPTAnswerer(openai_api_key, openai_api_free_hosted_url)
gpt_answerer_component = GPTAnswerer(parameters, llm_api_key)
bot = LinkedInBotFacade(login_component, apply_component)
bot.set_secrets(email, password)
bot.set_job_application_profile_and_resume(job_application_profile_object, resume_object)
@ -197,12 +194,12 @@ def main(resume: Path = None):
secrets_file, config_file, plain_text_resume_file, output_folder = FileManager.validate_data_folder(data_folder)
parameters = ConfigValidator.validate_config(config_file)
email, password, openai_api_key, openai_api_free_hosted_url = ConfigValidator.validate_secrets(secrets_file)
email, password, llm_api_key = ConfigValidator.validate_secrets(secrets_file)
parameters['uploads'] = FileManager.file_paths_to_dict(resume, plain_text_resume_file)
parameters['outputFileDirectory'] = output_folder
create_and_run_bot(email, password, parameters, openai_api_key, openai_api_free_hosted_url)
create_and_run_bot(email, password, parameters, llm_api_key)
except ConfigError as ce:
print(f"Configuration error: {str(ce)}")
print("Refer to the configuration guide for troubleshooting: https://github.com/feder-cr/LinkedIn_AIHawk_automatic_job_application/blob/main/readme.md#configuration")

View File

@ -114,17 +114,18 @@ class LoggerChatModel:
class GPTAnswerer:
def __init__(self, openai_api_key, openai_api_free_hosted_url):
if openai_api_key == "":
print('Using locally hosted mistral:v0.3')
self.llm_model = ChatOllama(model = "mistral:v0.3", temperature = 0.4, num_predict = 256)
elif openai_api_key == "freehosted":
print('Using free hosted gpt-4o-mini')
self.llm_model = ChatOpenAI(model_name="gpt-4o-mini", openai_api_key="anything", temperature=0.4,
base_url=openai_api_free_hosted_url)
else:
print("Using gpt-4o-mini")
self.llm_model = ChatOpenAI(model_name="gpt-4o-mini", openai_api_key=openai_api_key, temperature=0.4)
def __init__(self, config, llm_api_key):
llm_model_type = config['llm_model_type']
llm_model = config['llm_model']
llm_api_url = config['llm_api_url']
print('Using {0} with {1} from {2}'.format(llm_model_type, llm_model, llm_api_url))
if llm_model_type == "ollama":
self.llm_model = ChatOllama(model=llm_model, temperature = 0.4, base_url=llm_api_url)
elif llm_model_type == "openai":
self.llm_model = ChatOpenAI(model_name=llm_model, openai_api_key=llm_api_key, temperature=0.4,
base_url=llm_api_url)
self.llm_cheap = LoggerChatModel(self.llm_model)
@property
def job_description(self):