File size: 6,468 Bytes
8ea5933 83c68ab 8ea5933 0552d31 7fcb17d 2b1e178 8ea5933 2b1e178 8ea5933 2b1e178 e3a7212 8ea5933 e3a7212 2b1e178 e3a7212 8ea5933 2b1e178 e3a7212 2b1e178 e3a7212 8ea5933 2b1e178 e3a7212 2b1e178 8ea5933 2b1e178 8ea5933 2b1e178 8ea5933 2b1e178 8ea5933 e3a7212 2b1e178 7fcb17d 8ea5933 2b1e178 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 |
import os
from dotenv import load_dotenv
from langgraph.graph import StateGraph, START, END
from langgraph.graph.message import add_messages
from typing import TypedDict, Annotated, Literal
from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
from langchain_openai import ChatOpenAI
from pydantic import BaseModel, Field
import gradio as gr
load_dotenv()
# Configuraci贸n
max_tokens = 2000
num_iterations = 2
quality_threshold = 8
# Base de datos simulada de destinos tur铆sticos
travel_database = {
"paris": {"destination": "Paris", "price": 1500, "features": ["romantic", "cultural", "historic"]},
"bali": {"destination": "Bali", "price": 1200, "features": ["beach", "relaxing", "adventurous"]},
"new_york": {"destination": "New York", "price": 2000, "features": ["urban", "shopping", "nightlife"]},
"tokyo": {"destination": "Tokyo", "price": 1800, "features": ["modern", "cultural", "tech-savvy"]},
}
# Modelos estructurados para la salida de cada nodo
class GenerateRecommendation(BaseModel):
destination: str = Field(description="El destino tur铆stico recomendado")
explanation: str = Field(description="Explicaci贸n breve de la recomendaci贸n")
class RecommendationQualityScore(BaseModel):
score: int = Field(description="Puntuaci贸n de la recomendaci贸n entre 1-10")
comment: str = Field(description="Comentario sobre la calidad de la recomendaci贸n")
# Estado del grafo
class GraphState(TypedDict):
messages: Annotated[list, add_messages]
quality: Annotated[int, 0] # Valor inicial 0
iterations: Annotated[int, 0] # Valor inicial 0
# Inicializaci贸n del grafo
builder = StateGraph(GraphState)
llm = ChatOpenAI(
model="gpt-4o-mini",
temperature=0,
max_tokens=max_tokens,
api_key=os.getenv("HUGGINGFACEHUB_API_TOKEN")
)
# LLM para generar la recomendaci贸n (se convierte la salida a texto posteriormente)
developer_structure_llm = llm.with_structured_output(GenerateRecommendation, method="json_mode")
# LLM para la revisi贸n de la recomendaci贸n
reviewer_structure_llm = llm.with_structured_output(RecommendationQualityScore, method="json_mode")
def travel_recommender(state):
user_requirements = state["messages"][-1].content
system_prompt = f"""
Eres un experto en recomendaciones de viajes.
Con base en las siguientes preferencias del usuario: {user_requirements},
selecciona el mejor destino de la siguiente base de datos: {travel_database}.
Responde en JSON con la clave `destination` y `explanation`.
"""
messages = [
SystemMessage(content=system_prompt),
# Puedes agregar m谩s mensajes si lo consideras necesario
]
# 1. Invoca el LLM con structured_output
recommendation_obj = developer_structure_llm.invoke(messages)
# 2. Convierte el objeto resultante a texto formateado
text_output = (
f"Destino recomendado: {recommendation_obj.destination}\n"
f"Raz贸n: {recommendation_obj.explanation}"
)
# 3. Agrega la respuesta al estado para ser utilizada en nodos posteriores
state["messages"].append(AIMessage(content=text_output))
state["iterations"] += 1
return state
def recommendation_review(state):
system_prompt = """
Eres un revisor de recomendaciones con altos est谩ndares.
Revisa la recomendaci贸n proporcionada y asigna una puntuaci贸n de calidad entre 1-10.
Eval煤a la relevancia, precisi贸n y alineaci贸n con las necesidades del cliente.
Responde en JSON con las claves `score` y `comment`.
"""
human_messages = [msg for msg in state["messages"] if isinstance(msg, HumanMessage)]
ai_messages = [msg for msg in state["messages"] if isinstance(msg, AIMessage)]
system_messages = [SystemMessage(content=system_prompt)]
messages = system_messages + human_messages + ai_messages
message = reviewer_structure_llm.invoke(messages)
review_comment = f"Review Score: {message.score}\nComment: {message.comment}"
state["messages"].append(AIMessage(content=review_comment))
state["quality"] = message.score
return state
def final_recommendation(state):
system_prompt = "Revisa la recomendaci贸n final y proporciona una respuesta final para el usuario."
human_messages = [msg for msg in state["messages"] if isinstance(msg, HumanMessage)]
ai_messages = [msg for msg in state["messages"] if isinstance(msg, AIMessage)]
system_messages = [SystemMessage(content=system_prompt)]
messages = system_messages + human_messages + ai_messages
final_message = llm.invoke(messages)
# Guarda la recomendaci贸n final en el estado
state["final_recommendation"] = final_message.content
state["messages"].append(AIMessage(content=f"Final Recommendation: {final_message.content}"))
return state
# Funci贸n para definir la condici贸n de bifurcaci贸n del grafo
def quality_gate_condition(state) -> Literal["travel_recommender", "final_recommendation"]:
if state["iterations"] >= num_iterations:
return "final_recommendation"
if state["quality"] < quality_threshold:
return "travel_recommender"
else:
return "final_recommendation"
# Agregar nodos al grafo
builder.add_node("travel_recommender", travel_recommender)
builder.add_node("recommendation_review", recommendation_review)
builder.add_node("final_recommendation", final_recommendation)
# Conectar nodos
builder.add_edge(START, "travel_recommender")
builder.add_edge("travel_recommender", "recommendation_review")
builder.add_edge("final_recommendation", END)
builder.add_conditional_edges("recommendation_review", quality_gate_condition)
graph = builder.compile()
# Funci贸n que ejecuta el grafo con la entrada del usuario y devuelve el resultado en texto
def run_graph(user_input: str) -> str:
initial_state = {"messages": [HumanMessage(content=user_input)], "quality": 0, "iterations": 0}
final_state = graph.invoke(initial_state)
final_messages = [msg for msg in final_state["messages"] if isinstance(msg, AIMessage)]
if not final_messages:
return "No se gener贸 una recomendaci贸n final."
return final_messages[-1].content
# Definir la interfaz Gradio
iface = gr.Interface(
fn=run_graph,
inputs=gr.Textbox(label="Ingrese sus preferencias de viaje"),
outputs=gr.Textbox(label="Recomendaci贸n Final"),
title="Sistema de Recomendaci贸n de Viajes"
)
if __name__ == "__main__":
iface.launch()
|