app.py
CHANGED
@@ -47,34 +47,36 @@ llm = ChatOpenAI(
|
|
47 |
max_tokens=max_tokens,
|
48 |
api_key=os.getenv("HUGGINGFACEHUB_API_TOKEN")
|
49 |
)
|
|
|
50 |
developer_structure_llm = llm.with_structured_output(GenerateRecommendation, method="json_mode")
|
|
|
|
|
51 |
|
52 |
def travel_recommender(state):
|
53 |
user_requirements = state["messages"][-1].content
|
54 |
system_prompt = f"""
|
55 |
Eres un experto en recomendaciones de viajes.
|
56 |
Con base en las siguientes preferencias del usuario: {user_requirements},
|
57 |
-
selecciona el mejor destino de la siguiente base de datos.
|
58 |
Responde en JSON con la clave `destination` y `explanation`.
|
59 |
"""
|
60 |
messages = [
|
61 |
SystemMessage(content=system_prompt),
|
62 |
-
#
|
63 |
]
|
64 |
|
65 |
-
# 1.
|
66 |
recommendation_obj = developer_structure_llm.invoke(messages)
|
67 |
|
68 |
-
# 2.
|
69 |
text_output = (
|
70 |
f"Destino recomendado: {recommendation_obj.destination}\n"
|
71 |
f"Raz贸n: {recommendation_obj.explanation}"
|
72 |
)
|
73 |
|
74 |
-
# 3.
|
75 |
state["messages"].append(AIMessage(content=text_output))
|
76 |
-
|
77 |
-
# 4. Retornas el state modificado
|
78 |
return state
|
79 |
|
80 |
def recommendation_review(state):
|
@@ -105,12 +107,12 @@ def final_recommendation(state):
|
|
105 |
messages = system_messages + human_messages + ai_messages
|
106 |
final_message = llm.invoke(messages)
|
107 |
|
108 |
-
#
|
109 |
state["final_recommendation"] = final_message.content
|
110 |
state["messages"].append(AIMessage(content=f"Final Recommendation: {final_message.content}"))
|
111 |
return state
|
112 |
|
113 |
-
# Funci贸n para definir la condici贸n de
|
114 |
def quality_gate_condition(state) -> Literal["travel_recommender", "final_recommendation"]:
|
115 |
if state["iterations"] >= num_iterations:
|
116 |
return "final_recommendation"
|
@@ -133,20 +135,25 @@ builder.add_conditional_edges("recommendation_review", quality_gate_condition)
|
|
133 |
|
134 |
graph = builder.compile()
|
135 |
|
136 |
-
# Funci贸n que ejecuta el grafo con la entrada del usuario
|
137 |
def run_graph(user_input: str) -> str:
|
138 |
initial_state = {"messages": [HumanMessage(content=user_input)], "quality": 0, "iterations": 0}
|
139 |
final_state = graph.invoke(initial_state)
|
140 |
|
141 |
-
# Supongamos que guardaste la recomendaci贸n final en state["final_recommendation"]
|
142 |
-
# o en el 煤ltimo mensaje AIMessage. Por ejemplo:
|
143 |
final_messages = [msg for msg in final_state["messages"] if isinstance(msg, AIMessage)]
|
144 |
if not final_messages:
|
145 |
return "No se gener贸 una recomendaci贸n final."
|
146 |
|
147 |
-
# Extraes el 煤ltimo mensaje del flujo
|
148 |
return final_messages[-1].content
|
149 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
150 |
|
151 |
if __name__ == "__main__":
|
152 |
iface.launch()
|
|
|
|
47 |
max_tokens=max_tokens,
|
48 |
api_key=os.getenv("HUGGINGFACEHUB_API_TOKEN")
|
49 |
)
|
50 |
+
# LLM para generar la recomendaci贸n (se convierte la salida a texto posteriormente)
|
51 |
developer_structure_llm = llm.with_structured_output(GenerateRecommendation, method="json_mode")
|
52 |
+
# LLM para la revisi贸n de la recomendaci贸n
|
53 |
+
reviewer_structure_llm = llm.with_structured_output(RecommendationQualityScore, method="json_mode")
|
54 |
|
55 |
def travel_recommender(state):
|
56 |
user_requirements = state["messages"][-1].content
|
57 |
system_prompt = f"""
|
58 |
Eres un experto en recomendaciones de viajes.
|
59 |
Con base en las siguientes preferencias del usuario: {user_requirements},
|
60 |
+
selecciona el mejor destino de la siguiente base de datos: {travel_database}.
|
61 |
Responde en JSON con la clave `destination` y `explanation`.
|
62 |
"""
|
63 |
messages = [
|
64 |
SystemMessage(content=system_prompt),
|
65 |
+
# Puedes agregar m谩s mensajes si lo consideras necesario
|
66 |
]
|
67 |
|
68 |
+
# 1. Invoca el LLM con structured_output
|
69 |
recommendation_obj = developer_structure_llm.invoke(messages)
|
70 |
|
71 |
+
# 2. Convierte el objeto resultante a texto formateado
|
72 |
text_output = (
|
73 |
f"Destino recomendado: {recommendation_obj.destination}\n"
|
74 |
f"Raz贸n: {recommendation_obj.explanation}"
|
75 |
)
|
76 |
|
77 |
+
# 3. Agrega la respuesta al estado para ser utilizada en nodos posteriores
|
78 |
state["messages"].append(AIMessage(content=text_output))
|
79 |
+
state["iterations"] += 1
|
|
|
80 |
return state
|
81 |
|
82 |
def recommendation_review(state):
|
|
|
107 |
messages = system_messages + human_messages + ai_messages
|
108 |
final_message = llm.invoke(messages)
|
109 |
|
110 |
+
# Guarda la recomendaci贸n final en el estado
|
111 |
state["final_recommendation"] = final_message.content
|
112 |
state["messages"].append(AIMessage(content=f"Final Recommendation: {final_message.content}"))
|
113 |
return state
|
114 |
|
115 |
+
# Funci贸n para definir la condici贸n de bifurcaci贸n del grafo
|
116 |
def quality_gate_condition(state) -> Literal["travel_recommender", "final_recommendation"]:
|
117 |
if state["iterations"] >= num_iterations:
|
118 |
return "final_recommendation"
|
|
|
135 |
|
136 |
graph = builder.compile()
|
137 |
|
138 |
+
# Funci贸n que ejecuta el grafo con la entrada del usuario y devuelve el resultado en texto
|
139 |
def run_graph(user_input: str) -> str:
|
140 |
initial_state = {"messages": [HumanMessage(content=user_input)], "quality": 0, "iterations": 0}
|
141 |
final_state = graph.invoke(initial_state)
|
142 |
|
|
|
|
|
143 |
final_messages = [msg for msg in final_state["messages"] if isinstance(msg, AIMessage)]
|
144 |
if not final_messages:
|
145 |
return "No se gener贸 una recomendaci贸n final."
|
146 |
|
|
|
147 |
return final_messages[-1].content
|
148 |
|
149 |
+
# Definir la interfaz Gradio
|
150 |
+
iface = gr.Interface(
|
151 |
+
fn=run_graph,
|
152 |
+
inputs=gr.Textbox(label="Ingrese sus preferencias de viaje"),
|
153 |
+
outputs=gr.Textbox(label="Recomendaci贸n Final"),
|
154 |
+
title="Sistema de Recomendaci贸n de Viajes"
|
155 |
+
)
|
156 |
|
157 |
if __name__ == "__main__":
|
158 |
iface.launch()
|
159 |
+
|