ensure correct output language
Browse files
climateqa/engine/chains/intent_categorization.py
CHANGED
@@ -1,4 +1,3 @@
|
|
1 |
-
|
2 |
from langchain_core.pydantic_v1 import BaseModel, Field
|
3 |
from typing import List
|
4 |
from typing import Literal
|
@@ -44,7 +43,7 @@ def make_intent_categorization_chain(llm):
|
|
44 |
llm_with_functions = llm.bind(functions = openai_functions,function_call={"name":"IntentCategorizer"})
|
45 |
|
46 |
prompt = ChatPromptTemplate.from_messages([
|
47 |
-
("system", "You are a helpful assistant, you will analyze,
|
48 |
("user", "input: {input}")
|
49 |
])
|
50 |
|
@@ -58,11 +57,19 @@ def make_intent_categorization_node(llm):
|
|
58 |
|
59 |
def categorize_message(state):
|
60 |
print("---- Categorize_message ----")
|
|
|
61 |
|
62 |
output = categorization_chain.invoke({"input": state["user_input"]})
|
63 |
-
print(f"\n\
|
64 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
output["query"] = state["user_input"]
|
|
|
66 |
return output
|
67 |
|
68 |
return categorize_message
|
|
|
|
|
1 |
from langchain_core.pydantic_v1 import BaseModel, Field
|
2 |
from typing import List
|
3 |
from typing import Literal
|
|
|
43 |
llm_with_functions = llm.bind(functions = openai_functions,function_call={"name":"IntentCategorizer"})
|
44 |
|
45 |
prompt = ChatPromptTemplate.from_messages([
|
46 |
+
("system", "You are a helpful assistant, you will analyze, detect the language, and categorize the user input message using the function provided. You MUST detect and return the language of the input message. Categorize the user input as ai ONLY if it is related to Artificial Intelligence, search if it is related to the environment, climate change, energy, biodiversity, nature, etc. and chitchat if it is just general conversation."),
|
47 |
("user", "input: {input}")
|
48 |
])
|
49 |
|
|
|
57 |
|
58 |
def categorize_message(state):
|
59 |
print("---- Categorize_message ----")
|
60 |
+
print(f"Input state: {state}")
|
61 |
|
62 |
output = categorization_chain.invoke({"input": state["user_input"]})
|
63 |
+
print(f"\n\nRaw output from categorization: {output}\n")
|
64 |
+
|
65 |
+
if "language" not in output:
|
66 |
+
print("WARNING: Language field missing from output, setting default to English")
|
67 |
+
output["language"] = "English"
|
68 |
+
else:
|
69 |
+
print(f"Language detected: {output['language']}")
|
70 |
+
|
71 |
output["query"] = state["user_input"]
|
72 |
+
print(f"Final output: {output}")
|
73 |
return output
|
74 |
|
75 |
return categorize_message
|
climateqa/engine/chains/standalone_question.py
CHANGED
@@ -18,7 +18,8 @@ def make_standalone_question_chain(llm):
|
|
18 |
("user", """Chat History: {chat_history}
|
19 |
User Question: {question}
|
20 |
|
21 |
-
Transform this into a standalone question:
|
|
|
22 |
])
|
23 |
|
24 |
chain = prompt | llm
|
@@ -29,6 +30,8 @@ def make_standalone_question_node(llm):
|
|
29 |
|
30 |
def transform_to_standalone(state):
|
31 |
chat_history = state.get("chat_history", "")
|
|
|
|
|
32 |
output = standalone_chain.invoke({
|
33 |
"chat_history": chat_history,
|
34 |
"question": state["user_input"]
|
|
|
18 |
("user", """Chat History: {chat_history}
|
19 |
User Question: {question}
|
20 |
|
21 |
+
Transform this into a standalone question:
|
22 |
+
Make sure to keep the original language of the question.""")
|
23 |
])
|
24 |
|
25 |
chain = prompt | llm
|
|
|
30 |
|
31 |
def transform_to_standalone(state):
|
32 |
chat_history = state.get("chat_history", "")
|
33 |
+
if chat_history == "":
|
34 |
+
return {}
|
35 |
output = standalone_chain.invoke({
|
36 |
"chat_history": chat_history,
|
37 |
"question": state["user_input"]
|
front/tabs/chat_interface.py
CHANGED
@@ -56,7 +56,7 @@ def create_chat_interface(tab):
|
|
56 |
)
|
57 |
with gr.Accordion("Click here for follow up questions examples", elem_id="follow-up-examples",open = False):
|
58 |
follow_up_examples_hidden = gr.Textbox(visible=False, elem_id="follow-up-hidden")
|
59 |
-
follow_up_examples = gr.Examples(examples=[], label="", inputs= [follow_up_examples_hidden], elem_id="follow-up-button", run_on_click=False)
|
60 |
|
61 |
with gr.Row(elem_id="input-message"):
|
62 |
|
|
|
56 |
)
|
57 |
with gr.Accordion("Click here for follow up questions examples", elem_id="follow-up-examples",open = False):
|
58 |
follow_up_examples_hidden = gr.Textbox(visible=False, elem_id="follow-up-hidden")
|
59 |
+
follow_up_examples = gr.Examples(examples=["What evidence do we have of climate change ?"], label="", inputs= [follow_up_examples_hidden], elem_id="follow-up-button", run_on_click=False)
|
60 |
|
61 |
with gr.Row(elem_id="input-message"):
|
62 |
|