File size: 3,274 Bytes
8c4b14a
 
 
f278359
8c4b14a
 
 
 
f278359
8c4b14a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ac7eacd
8c4b14a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b987573
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
import streamlit as st
from groq import Groq
from typing import List, Optional
from dotenv import load_dotenv
import json, os
from pydantic import BaseModel
from dspy_inference import get_expanded_query_and_topic

load_dotenv()

client = Groq(api_key=os.getenv("GROQ_API_KEY"))
USER_AVATAR = "👤"
BOT_AVATAR = "🤖"

if "messages" not in st.session_state:
    st.session_state.messages = [{"role": "assistant", "content": "Hi, How can I help you today?"}]
if "conversation_state" not in st.session_state:
        st.session_state["conversation_state"] = [{"role": "assistant", "content": "Hi, How can I help you today?"}]

def main():
    st.title("Query expansion and tagging")
    
    for message in st.session_state.messages:
        image = USER_AVATAR if message["role"] == "user" else BOT_AVATAR
        with st.chat_message(message["role"], avatar=image):
            st.markdown(message["content"])
    
    system_prompt = f'''You are a helpful assistant who can answer any question that the user asks.
'''
    if prompt := st.chat_input("User input"):
        st.chat_message("user", avatar=USER_AVATAR).markdown(prompt)
        st.session_state.messages.append({"role": "user", "content": prompt})
        conversation_context = st.session_state["conversation_state"]
        conversation_context.append({"role": "user", "content": prompt})
        
        # Use dspy to expand the query and get the topic
        expanded_query = get_expanded_query_and_topic(prompt, conversation_context)
        
        context = []
        context.append({"role": "system", "content": system_prompt})
        context.extend(st.session_state["conversation_state"])
        
        # Add the expanded query to the context
        if expanded_query.expand != "None":
            context.append({"role": "system", "content": f"Expanded query: {expanded_query.expand}"})
        context.append({"role": "system", "content": f"Topic: {expanded_query.topic}"})
        
        response = client.chat.completions.create(
            messages=context,
            model="llama-3.1-405b-reasoning",
            temperature=0,
            top_p=1,
            stop=None,
            stream=True,
        )
        
        with st.chat_message("assistant", avatar=BOT_AVATAR):
            result = ""
            res_box = st.empty()
            for chunk in response:
                if chunk.choices[0].delta.content:
                    new_content = chunk.choices[0].delta.content
                    result += new_content
                    res_box.markdown(f'{result}')
            
            # Display expanded question and tags separately
            st.markdown("---")
            # st.markdown("**Query Analysis:**")
            if expanded_query.expand != "None":
                st.markdown(f"**Expanded Question:** {expanded_query.expand}")
            else:
                st.markdown("**Expanded Question:** No expansion needed")
            st.markdown(f"**Topic:** {expanded_query.topic}")
        
        assistant_response = result
        st.session_state.messages.append({"role": "assistant", "content": assistant_response})
        conversation_context.append({"role": "assistant", "content": assistant_response})

if __name__ == '__main__':
    main()