File size: 2,440 Bytes
f7429e0
 
 
0f9d3df
 
 
f7429e0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4a6114a
f7429e0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4a6114a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import os
from openai import OpenAI

# from dotenv import load_dotenv
# load_dotenv()  #

#--------------------------------------------------------
# Initialize OpenAI client
#--------------------------------------------------------
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))

def embed_question_openai(texts, model="text-embedding-3-small"):
    response = client.embeddings.create(
        input=texts,
        model=model
    )
    return response.data[0].embedding


def openai_domain_specific_answer_generation(system_prompt, question, model="gpt4o-mini", temperature=0.3, top_p=0.1):
    
    prompt = f"""
    Question:
    {question}

    Answer (provide a precise, domain-specific response):
    """
    
    response = client.chat.completions.create(
        model=model,
        messages=[
            {
                "role": "system",
                "content": system_prompt
            },
            {
                "role": "user",
                "content": prompt
            }
        ],
        temperature=temperature,      # Set low for deterministic and precise responses.
        top_p=top_p,                  # Focus on high-probability outputs to ensure accuracy.
        frequency_penalty=0.1,        # Reduce repetition of technical terms.
        presence_penalty=0.0          # Prevent introduction of unrelated ideas.
    )
    
    return response.choices[0].message.content

def openai_context_integration(system_prompt, query, expert_answer, retrieved_context, model="gpt4o-mini", temperature=0.3, top_p=0.3):
    
    prompt = f"""
    Question:
    {query}

    Direct Answer:
    {expert_answer}

    Retrieved Context:
    {retrieved_context}

    Final Answer:
    """
    
    response = client.chat.completions.create(
        model=model,
        messages=[
            {
                "role": "system",
                "content": system_prompt
            },
            {
                "role": "user",
                "content": prompt
            }
        ],
        temperature=temperature,      # Maintain some flexibility for smooth blending.
        top_p=top_p,                  # Prioritize high-probability outputs to stay focused on the inputs.
        frequency_penalty=0.1,        # Allow necessary repetition for clarity.
        presence_penalty=0.0          # Neutral to avoid introducing unrelated ideas.
    )
    
    return response.choices[0].message.content