|
import os |
|
from openai import OpenAI |
|
|
|
|
|
|
|
|
|
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) |
|
|
|
def embed_question_openai(texts, model="text-embedding-3-small"): |
|
response = client.embeddings.create( |
|
input=texts, |
|
model=model |
|
) |
|
return response.data[0].embedding |
|
|
|
|
|
def openai_domain_specific_answer_generation(system_prompt, question, model="gpt4o-mini", temperature=0.3, top_p=0.1): |
|
|
|
prompt = f""" |
|
Question: |
|
{question} |
|
|
|
Answer (provide a precise, domain-specific response): |
|
""" |
|
|
|
response = client.chat.completions.create( |
|
model=model, |
|
messages=[ |
|
{ |
|
"role": "system", |
|
"content": system_prompt |
|
}, |
|
{ |
|
"role": "user", |
|
"content": prompt |
|
} |
|
], |
|
temperature=temperature, |
|
top_p=top_p, |
|
frequency_penalty=0.1, |
|
presence_penalty=0.0 |
|
) |
|
|
|
return response.choices[0].message.content |
|
|
|
def openai_context_integration(system_prompt, query, expert_answer, retrieved_context, model="gpt4o-mini", temperature=0.3, top_p=0.3): |
|
|
|
prompt = f""" |
|
Question: |
|
{query} |
|
|
|
Direct Answer: |
|
{expert_answer} |
|
|
|
Retrieved Context: |
|
{retrieved_context} |
|
|
|
Final Answer: |
|
""" |
|
|
|
response = client.chat.completions.create( |
|
model=model, |
|
messages=[ |
|
{ |
|
"role": "system", |
|
"content": system_prompt |
|
}, |
|
{ |
|
"role": "user", |
|
"content": prompt |
|
} |
|
], |
|
temperature=temperature, |
|
top_p=top_p, |
|
frequency_penalty=0.1, |
|
presence_penalty=0.0 |
|
) |
|
|
|
return response.choices[0].message.content |
|
|