use gpt-4o-mini
This commit is contained in:
parent
67543499ab
commit
369c23791d
@ -80,22 +80,12 @@ def generate_yaml_from_resume(resume_text: str, schema: Dict[str, Any], api_key:
|
||||
Enclose your response in <resume_yaml> tags. Only include the YAML content within these tags, without any additional text or code block markers.
|
||||
"""
|
||||
|
||||
model = "gpt-3.5-turbo-16k" # This model has a 16k token limit
|
||||
tokens = num_tokens_from_string(prompt, model)
|
||||
max_tokens = min(16385 - tokens, 4000) # Ensure we don't exceed model's limit
|
||||
|
||||
if tokens > 16385:
|
||||
print(f"Warning: The input exceeds the model's context length. Tokens: {tokens}")
|
||||
|
||||
response = client.chat.completions.create(
|
||||
model=model,
|
||||
model="gpt-4o-mini",
|
||||
messages=[
|
||||
{"role": "system", "content": "You are a helpful assistant that generates structured YAML content from resume files, paying close attention to format requirements and schema structure."},
|
||||
{"role": "user", "content": prompt}
|
||||
],
|
||||
max_tokens=max_tokens,
|
||||
n=1,
|
||||
stop=None,
|
||||
temperature=0.5,
|
||||
)
|
||||
|
||||
@ -107,7 +97,6 @@ def generate_yaml_from_resume(resume_text: str, schema: Dict[str, Any], api_key:
|
||||
return match.group(1).strip()
|
||||
else:
|
||||
raise ValueError("YAML content not found in the expected format")
|
||||
|
||||
def save_yaml(data: str, output_file: str):
|
||||
with open(output_file, 'w') as file:
|
||||
file.write(data)
|
||||
|
Loading…
Reference in New Issue
Block a user