Added support for Ollama running locally or publicly hosted api

This commit is contained in:
user 2024-08-31 20:19:33 +02:00
parent 6f3b3252ba
commit d964f599be
5 changed files with 34 additions and 17 deletions

View File

@ -152,7 +152,13 @@ This file contains sensitive information. Never share or commit this file to ver
- Replace with your OpenAI API key for GPT integration
- To obtain an API key, follow the tutorial at: https://medium.com/@lorenzozar/how-to-get-your-own-openai-api-key-f4d44e60c327
- Note: You need to add credit to your OpenAI account to use the API. You can add credit by visiting the [OpenAI billing dashboard](https://platform.openai.com/account/billing).
- `openai_api_free_hosted_url`:
- Optional paramter, if you want to use freely hosted GPT model, set `openai_api_key: "freehosted"` and `openai_api_free_hosted_url` with the URL of the endpoint
- Ollama local support
- If you want to use Ollama which is deployed locally, leave `openai_api_key` blank.
- To setup Ollama to run locally follow the instructions here: [Ollama installation](https://github.com/ollama/ollama).
- Download mistral model by pulling mistral:v0.3
### 2. config.yaml

View File

@ -1,3 +1,4 @@
email: myemaillinkedin@gmail.com
password: ImpossiblePassowrd10
openai_api_key: sk-11KRr4uuTwpRGfeRTfj1T9BlbkFJjP8QTrswHU1yGruru2FR
openai_api_key: sk-11KRr4uuTwpRGfeRTfj1T9BlbkFJjP8QTrswHU1yGruru2FR
openai_api_free_hosted_url: https://api.pawan.krd/cosmosrp/v1

View File

@ -1,3 +1,4 @@
email: myemaillinkedin@gmail.com
password: ImpossiblePassowrd10
openai_api_key: sk-11KRr4uuTwpRGfeRTfj1T9BlbkFJjP8QTrswHU1yGruru2FR
openai_api_key: sk-11KRr4uuTwpRGfeRTfj1T9BlbkFJjP8QTrswHU1yGruru2FR
openai_api_free_hosted_url: https://api.pawan.krd/cosmosrp/v1

12
main.py
View File

@ -101,7 +101,7 @@ class ConfigValidator:
@staticmethod
def validate_secrets(secrets_yaml_path: Path) -> tuple:
secrets = ConfigValidator.validate_yaml_file(secrets_yaml_path)
mandatory_secrets = ['email', 'password', 'openai_api_key']
mandatory_secrets = ['email', 'password']
for secret in mandatory_secrets:
if secret not in secrets:
@ -114,7 +114,7 @@ class ConfigValidator:
if not secrets['openai_api_key']:
raise ConfigError(f"OpenAI API key cannot be empty in secrets file {secrets_yaml_path}.")
return secrets['email'], str(secrets['password']), secrets['openai_api_key']
return secrets['email'], str(secrets['password']), secrets['openai_api_key'], secrets['openai_api_free_hosted_url']
class FileManager:
@staticmethod
@ -158,7 +158,7 @@ def init_browser() -> webdriver.Chrome:
except Exception as e:
raise RuntimeError(f"Failed to initialize browser: {str(e)}")
def create_and_run_bot(email: str, password: str, parameters: dict, openai_api_key: str):
def create_and_run_bot(email, password, parameters, openai_api_key, openai_api_free_hosted_url):
try:
style_manager = StyleManager()
resume_generator = ResumeGenerator()
@ -175,7 +175,7 @@ def create_and_run_bot(email: str, password: str, parameters: dict, openai_api_k
browser = init_browser()
login_component = LinkedInAuthenticator(browser)
apply_component = LinkedInJobManager(browser)
gpt_answerer_component = GPTAnswerer(openai_api_key)
gpt_answerer_component = GPTAnswerer(openai_api_key, openai_api_free_hosted_url)
bot = LinkedInBotFacade(login_component, apply_component)
bot.set_secrets(email, password)
bot.set_job_application_profile_and_resume(job_application_profile_object, resume_object)
@ -197,12 +197,12 @@ def main(resume: Path = None):
secrets_file, config_file, plain_text_resume_file, output_folder = FileManager.validate_data_folder(data_folder)
parameters = ConfigValidator.validate_config(config_file)
email, password, openai_api_key = ConfigValidator.validate_secrets(secrets_file)
email, password, openai_api_key, openai_api_free_hosted_url = ConfigValidator.validate_secrets(secrets_file)
parameters['uploads'] = FileManager.file_paths_to_dict(resume, plain_text_resume_file)
parameters['outputFileDirectory'] = output_folder
create_and_run_bot(email, password, parameters, openai_api_key)
create_and_run_bot(email, password, parameters, openai_api_key, openai_api_free_hosted_url)
except ConfigError as ce:
print(f"Configuration error: {str(ce)}")
print("Refer to the configuration guide for troubleshooting: https://github.com/feder-cr/LinkedIn_AIHawk_automatic_job_application/blob/main/readme.md#configuration")

View File

@ -3,7 +3,7 @@ import os
import re
import textwrap
from datetime import datetime
from typing import Dict, List
from typing import Dict, List, Union
from pathlib import Path
from dotenv import load_dotenv
from langchain_core.messages.ai import AIMessage
@ -11,6 +11,7 @@ from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompt_values import StringPromptValue
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
from langchain_ollama import ChatOllama
from Levenshtein import distance
import src.strings as strings
@ -20,7 +21,7 @@ load_dotenv()
class LLMLogger:
def __init__(self, llm: ChatOpenAI):
def __init__(self, llm: Union[ChatOpenAI, ChatOllama]):
self.llm = llm
@staticmethod
@ -78,12 +79,12 @@ class LLMLogger:
class LoggerChatModel:
def __init__(self, llm: ChatOpenAI):
def __init__(self, llm: Union[ChatOpenAI, ChatOllama]):
self.llm = llm
def __call__(self, messages: List[Dict[str, str]]) -> str:
# Call the LLM with the provided messages and log the response.
reply = self.llm(messages)
reply = self.llm.invoke(messages)
parsed_reply = self.parse_llmresult(reply)
LLMLogger.log_request(prompts=messages, parsed_reply=parsed_reply)
return reply
@ -113,10 +114,18 @@ class LoggerChatModel:
class GPTAnswerer:
def __init__(self, openai_api_key):
self.llm_cheap = LoggerChatModel(
ChatOpenAI(model_name="gpt-4o-mini", openai_api_key=openai_api_key, temperature=0.4)
)
def __init__(self, openai_api_key, openai_api_free_hosted_url):
if openai_api_key == "":
print('Using locally hosted mistral:v0.3')
self.llm_model = ChatOllama(model = "mistral:v0.3", temperature = 0.4, num_predict = 256)
elif openai_api_key == "freehosted":
print('Using free hosted gpt-4o-mini')
self.llm_model = ChatOpenAI(model_name="gpt-4o-mini", openai_api_key="anything", temperature=0.4,
base_url=openai_api_free_hosted_url)
else:
print("Using gpt-4o-mini")
self.llm_model = ChatOpenAI(model_name="gpt-4o-mini", openai_api_key=openai_api_key, temperature=0.4)
self.llm_cheap = LoggerChatModel(self.llm_model)
@property
def job_description(self):
return self.job.description