Merge pull request #1 from parth-20-07/bug/rate-limit-error

Add Sleep Timer for OpenAI Rate Limit Error
This commit is contained in:
Parth Patel 2024-09-03 11:54:08 -04:00
commit c06801ddba
2 changed files with 75 additions and 23 deletions

1
.gitignore vendored
View File

@ -11,4 +11,5 @@ generated_cv*
.vscode .vscode
chrome_profile chrome_profile
answers.json answers.json
virtual/*
data* data*

View File

@ -3,6 +3,7 @@ import os
import re import re
import textwrap import textwrap
from datetime import datetime from datetime import datetime
import time
from typing import Dict, List from typing import Dict, List
from pathlib import Path from pathlib import Path
from dotenv import load_dotenv from dotenv import load_dotenv
@ -12,14 +13,14 @@ from langchain_core.prompt_values import StringPromptValue
from langchain_core.prompts import ChatPromptTemplate from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI from langchain_openai import ChatOpenAI
from Levenshtein import distance from Levenshtein import distance
from openai import RateLimitError # Make sure to import the necessary exception class
import src.strings as strings import src.strings as strings
load_dotenv() load_dotenv()
class LLMLogger: class LLMLogger:
def __init__(self, llm: ChatOpenAI): def __init__(self, llm: ChatOpenAI):
self.llm = llm self.llm = llm
@ -83,7 +84,27 @@ class LoggerChatModel:
def __call__(self, messages: List[Dict[str, str]]) -> str: def __call__(self, messages: List[Dict[str, str]]) -> str:
# Call the LLM with the provided messages and log the response. # Call the LLM with the provided messages and log the response.
reply = self.llm(messages) while True:
try:
# Try to get the reply
reply = self.llm(messages)
break # Break the loop if successful
except RateLimitError as e:
# Handle the rate limit error specifically
if "429" in str(e):
# print(f"Rate Limit Error: {e}")
print("Sleeping for 450 seconds before retrying...")
time.sleep(450) # Sleep for 7 minutes and 30 seconds
else:
# Handle any other RateLimitError that doesn't match the 429 code
print(f"An unexpected RateLimitError occurred: {e}")
reply = None
break # Exit loop or handle accordingly
except Exception as e:
# Handle other generic exceptions
print(f"An unexpected error occurred: {e}")
reply = None
break # Exit loop or handle accordingly
parsed_reply = self.parse_llmresult(reply) parsed_reply = self.parse_llmresult(reply)
LLMLogger.log_request(prompts=messages, parsed_reply=parsed_reply) LLMLogger.log_request(prompts=messages, parsed_reply=parsed_reply)
return reply return reply
@ -114,9 +135,8 @@ class LoggerChatModel:
class GPTAnswerer: class GPTAnswerer:
def __init__(self, openai_api_key): def __init__(self, openai_api_key):
self.llm_cheap = LoggerChatModel( self.llm_cheap = LoggerChatModel(ChatOpenAI(model_name="gpt-4o-mini", openai_api_key=openai_api_key, temperature=0.4))
ChatOpenAI(model_name="gpt-4o-mini", openai_api_key=openai_api_key, temperature=0.4)
)
@property @property
def job_description(self): def job_description(self):
return self.job.description return self.job.description
@ -144,11 +164,13 @@ class GPTAnswerer:
def set_job(self, job): def set_job(self, job):
self.job = job self.job = job
self.job.set_summarize_job_description(self.summarize_job_description(self.job.description)) self.job.set_summarize_job_description(
self.summarize_job_description(self.job.description)
)
def set_job_application_profile(self, job_application_profile): def set_job_application_profile(self, job_application_profile):
self.job_application_profile = job_application_profile self.job_application_profile = job_application_profile
def summarize_job_description(self, text: str) -> str: def summarize_job_description(self, text: str) -> str:
strings.summarize_prompt_template = self._preprocess_template_string( strings.summarize_prompt_template = self._preprocess_template_string(
strings.summarize_prompt_template strings.summarize_prompt_template
@ -157,23 +179,33 @@ class GPTAnswerer:
chain = prompt | self.llm_cheap | StrOutputParser() chain = prompt | self.llm_cheap | StrOutputParser()
output = chain.invoke({"text": text}) output = chain.invoke({"text": text})
return output return output
def _create_chain(self, template: str): def _create_chain(self, template: str):
prompt = ChatPromptTemplate.from_template(template) prompt = ChatPromptTemplate.from_template(template)
return prompt | self.llm_cheap | StrOutputParser() return prompt | self.llm_cheap | StrOutputParser()
def answer_question_textual_wide_range(self, question: str) -> str: def answer_question_textual_wide_range(self, question: str) -> str:
# Define chains for each section of the resume # Define chains for each section of the resume
chains = { chains = {
"personal_information": self._create_chain(strings.personal_information_template), "personal_information": self._create_chain(
"self_identification": self._create_chain(strings.self_identification_template), strings.personal_information_template
"legal_authorization": self._create_chain(strings.legal_authorization_template), ),
"self_identification": self._create_chain(
strings.self_identification_template
),
"legal_authorization": self._create_chain(
strings.legal_authorization_template
),
"work_preferences": self._create_chain(strings.work_preferences_template), "work_preferences": self._create_chain(strings.work_preferences_template),
"education_details": self._create_chain(strings.education_details_template), "education_details": self._create_chain(strings.education_details_template),
"experience_details": self._create_chain(strings.experience_details_template), "experience_details": self._create_chain(
strings.experience_details_template
),
"projects": self._create_chain(strings.projects_template), "projects": self._create_chain(strings.projects_template),
"availability": self._create_chain(strings.availability_template), "availability": self._create_chain(strings.availability_template),
"salary_expectations": self._create_chain(strings.salary_expectations_template), "salary_expectations": self._create_chain(
strings.salary_expectations_template
),
"certifications": self._create_chain(strings.certifications_template), "certifications": self._create_chain(strings.certifications_template),
"languages": self._create_chain(strings.languages_template), "languages": self._create_chain(strings.languages_template),
"interests": self._create_chain(strings.interests_template), "interests": self._create_chain(strings.interests_template),
@ -273,21 +305,38 @@ class GPTAnswerer:
section_name = output.lower().replace(" ", "_") section_name = output.lower().replace(" ", "_")
if section_name == "cover_letter": if section_name == "cover_letter":
chain = chains.get(section_name) chain = chains.get(section_name)
output = chain.invoke({"resume": self.resume, "job_description": self.job_description}) output = chain.invoke(
{"resume": self.resume, "job_description": self.job_description}
)
return output return output
resume_section = getattr(self.resume, section_name, None) or getattr(self.job_application_profile, section_name, None) resume_section = getattr(self.resume, section_name, None) or getattr(
self.job_application_profile, section_name, None
)
if resume_section is None: if resume_section is None:
raise ValueError(f"Section '{section_name}' not found in either resume or job_application_profile.") raise ValueError(
f"Section '{section_name}' not found in either resume or job_application_profile."
)
chain = chains.get(section_name) chain = chains.get(section_name)
if chain is None: if chain is None:
raise ValueError(f"Chain not defined for section '{section_name}'") raise ValueError(f"Chain not defined for section '{section_name}'")
return chain.invoke({"resume_section": resume_section, "question": question}) return chain.invoke({"resume_section": resume_section, "question": question})
def answer_question_numeric(self, question: str, default_experience: int = 3) -> int: def answer_question_numeric(
func_template = self._preprocess_template_string(strings.numeric_question_template) self, question: str, default_experience: int = 3
) -> int:
func_template = self._preprocess_template_string(
strings.numeric_question_template
)
prompt = ChatPromptTemplate.from_template(func_template) prompt = ChatPromptTemplate.from_template(func_template)
chain = prompt | self.llm_cheap | StrOutputParser() chain = prompt | self.llm_cheap | StrOutputParser()
output_str = chain.invoke({"resume_educations": self.resume.education_details,"resume_jobs": self.resume.experience_details,"resume_projects": self.resume.projects , "question": question}) output_str = chain.invoke(
{
"resume_educations": self.resume.education_details,
"resume_jobs": self.resume.experience_details,
"resume_projects": self.resume.projects,
"question": question,
}
)
try: try:
output = self.extract_number_from_string(output_str) output = self.extract_number_from_string(output_str)
except ValueError: except ValueError:
@ -305,10 +354,12 @@ class GPTAnswerer:
func_template = self._preprocess_template_string(strings.options_template) func_template = self._preprocess_template_string(strings.options_template)
prompt = ChatPromptTemplate.from_template(func_template) prompt = ChatPromptTemplate.from_template(func_template)
chain = prompt | self.llm_cheap | StrOutputParser() chain = prompt | self.llm_cheap | StrOutputParser()
output_str = chain.invoke({"resume": self.resume, "question": question, "options": options}) output_str = chain.invoke(
{"resume": self.resume, "question": question, "options": options}
)
best_option = self.find_best_match(output_str, options) best_option = self.find_best_match(output_str, options)
return best_option return best_option
def resume_or_cover(self, phrase: str) -> str: def resume_or_cover(self, phrase: str) -> str:
# Define the prompt template # Define the prompt template
prompt_template = """ prompt_template = """