Spaces:
Sleeping
Sleeping
from negmas import make_issue, SAOMechanism | |
from negmas.preferences import LinearAdditiveUtilityFunction as LUFun | |
from negmas.preferences.value_fun import AffineFun, LinearFun, IdentityFun | |
from negmas.sao import TimeBasedConcedingNegotiator | |
from together import Together | |
def run_debt_negotiation(min_debt, max_debt, min_payments, max_payments, weight_debt, weight_payments, total_debt_debtor_weight = 1.0, num_payments_debtor_weight=1.0): | |
issues = [ | |
make_issue(name="total_debt", values=(min_debt, max_debt)), | |
make_issue(name="num_payments", values=(min_payments, max_payments)), | |
] | |
session = SAOMechanism(issues=issues, n_steps=50) | |
collector_utility = LUFun( | |
values=[ | |
IdentityFun(), | |
LinearFun(1), | |
], | |
weights={ | |
"total_debt": weight_debt, | |
"num_payments": weight_payments, | |
}, # Collector's weights for each issue | |
outcome_space=session.outcome_space, | |
) | |
debtor_utility = LUFun( | |
values={ | |
"total_debt": AffineFun(-1), | |
"num_payments": LinearFun(-1), | |
}, | |
weights={ | |
"total_debt": total_debt_debtor_weight, | |
"num_payments": num_payments_debtor_weight, | |
}, | |
outcome_space=session.outcome_space, | |
) | |
session.add(TimeBasedConcedingNegotiator(name="debtor"), preferences=debtor_utility) | |
session.add(TimeBasedConcedingNegotiator(name="collector"), ufun=collector_utility) | |
result = session.run() | |
return result | |
def offer_to_text(offer): | |
terms = offer.current_offer | |
return f'Monto: {terms[0]*10_000}, Plazo: {terms[1]}' | |
client = Together() | |
class Ticio(): | |
def __init__(self, system_prompt, model = 'meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo'): | |
self.model = model | |
self.messages = [{'role': 'system', 'content': system_prompt}] | |
def inference(self): | |
response = client.chat.completions.create( | |
model=self.model, | |
messages=self.messages, | |
) | |
self.messages.append({"role": 'assistant', "content": response.choices[0].message.content}) | |
return response | |
def add_message(self, content, role = 'user'): | |
self.messages.append({"role": role, "content": content}) | |
return self.messages | |
def last_inference(self): | |
return self.messages[-1]['content'] | |
system_prompt = """ | |
Eres un bot que analiza las respuestas de una negociación por turnos, tu tarea es evaluar las respuesta del usuario a la oferta clasificarla en una de las siguientes opciones: | |
Aceptada: Si el usuario explicitamente dijo que quiere aceptarla | |
Plazo: si el mensaje del usuario menciona que esta en desacuerdo con el plazo | |
Monto: si el mensaje del usuario menciona que esta en desacuerdo con el monto | |
puedes clasificar el mensaje con varias categorias, todas separadas por coma. | |
Por ejemplo: ['Plazo', 'Monto'] | |
solo responde con la lista de categorias separadas por coma. | |
""" | |
MessageEval = Ticio(system_prompt) | |
import gradio as gr | |
mod_plazo = 0 | |
mod_monto = 0 | |
mod_interes = 0 | |
current_offer = [] | |
class Ticio(): | |
def __init__(self, system_prompt, model = 'meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo'): | |
self.model = model | |
self.messages = [{'role': 'system', 'content': system_prompt}] | |
def inference(self): | |
response = client.chat.completions.create( | |
model=self.model, | |
messages=self.messages, | |
) | |
self.messages.append({"role": 'assistant', "content": response.choices[0].message.content}) | |
return response | |
def add_message(self, content, role = 'user'): | |
self.messages.append({"role": role, "content": content}) | |
return self.messages | |
def last_inference(self): | |
return self.messages[-1]['content'] | |
def evaluate_acceptance(offer, user_input): | |
system_prompt = f""" | |
Eres un bot que analiza las respuestas de una negociación por turnos, tu tarea es evaluar si el usuario acepto la oferta. Se considera aceptada la oferta cuando el usuario, explicitamente dice que la acepto (Con expresiones como: acepto, esta bien, ok) | |
Tambien puedes aceptar una oferta del usuario siempre y cuando la oferta sea: | |
Monto: Mayor o igual que {offer[0]} | |
Plazo: Menor o igual que {offer[1]} | |
Si el usuario acepto oferta fue aceptada: 'pingüino' | |
si vas aceptar la oferta del usuario(solo si se cumplen estrictamente las condiciones descritas arriba): 'tesla' | |
Si el usuario no acepto la oferta: 'ferrocarril' | |
""" | |
eval = Ticio(system_prompt) | |
eval.add_message(user_input) | |
eval.inference() | |
ans = eval.last_inference() | |
print(ans) | |
return ans | |
def initial_message(min_debt, max_debt, min_payments, max_payments, | |
weight_debt, weight_payments): | |
global mod_plazo, mod_monto, mod_interes, current_offer | |
mod_plazo = 0 | |
mod_monto = 0 | |
mod_interes = 0 | |
current_offer = (max_debt, max_payments) | |
first_message = ( | |
f"Bot: ¡Hola! Estoy listo para ayudarte a negociar tu deuda.\n" | |
f"\n👉 Mi oferta inicial es: **${max_debt}** en {min_payments} pagos" | |
) | |
return [[None, first_message]] # Limpia historial y muestra mensaje nuevo | |
# Función de respuesta del chatbot | |
def chatbot_response(user_input, min_debt, max_debt, min_payments, max_payments, | |
weight_debt, weight_payments, history): | |
global mod_plazo, mod_monto, mod_interes, current_offer | |
ans = evaluate_acceptance((min_debt, max_payments), user_input) | |
if 'pingüino' in ans: | |
response = 'Excelente! Gracias por aceptar la oferta.\n' | |
history = history + [(user_input, response)] | |
return history, "" | |
if 'tesla' in ans: | |
response = 'Excelente! Aceptamos esos terminos. Gracias por negociár con nosotros \n' | |
history = history + [(user_input, response)] | |
return history, "" | |
MessageEval = Ticio(system_prompt) | |
MessageEval.add_message(user_input) | |
MessageEval.inference() | |
ans = MessageEval.last_inference() | |
if 'Plazo' in ans: | |
mod_plazo += 1 | |
print('plazo +1') | |
if 'Monto' in ans: | |
mod_monto += 1 | |
print('monto +1') | |
if 'Interés' in ans: | |
mod_interes += 1 | |
print('interes +1') | |
difference = max_debt - min_debt | |
new_min_debt = min_debt - difference | |
debt_weight= mod_monto * 100 | |
payments_weight= mod_plazo * 100 | |
current_offer = run_debt_negotiation(new_min_debt, max_debt, min_payments, max_payments, | |
weight_debt, weight_payments, total_debt_debtor_weight= debt_weight, num_payments_debtor_weight= payments_weight) # Corrected line | |
max_debt = current_offer.agreement[0] | |
max_payments = current_offer.agreement[1] | |
response = ( | |
f"Entiendo, considerando lo anterior podemos ofrecerte: ${max_debt} en {max_payments} pagos.\n" | |
) | |
history = history + [(user_input, response)] | |
return history, "" | |
# Interfaz Gradio | |
with gr.Blocks() as demo: | |
with gr.Tabs(): | |
with gr.TabItem("Parámetros de negociación"): | |
gr.Markdown("### Ajusta los parámetros") | |
min_debt = gr.Slider(label="Deuda mínima", minimum=1000, maximum=10000, value=6000) | |
max_debt = gr.Slider(label="Deuda máxima", minimum=1000, maximum=10000, value=8000) | |
min_payments = gr.Slider(label="Pagos mínimos", minimum=1, maximum=36, value=1) | |
max_payments = gr.Slider(label="Pagos máximos", minimum=1, maximum=36, value=6) | |
weight_debt = gr.Slider(label="Peso: Deuda", minimum=0, maximum=2, value=1) | |
weight_payments = gr.Slider(label="Peso: Pagos", minimum=0, maximum=2, value=1) | |
with gr.TabItem("Chat de negociación"): | |
gr.Markdown("### Conversación") | |
chatbot_output = gr.Chatbot() | |
user_input = gr.Textbox(label="Tu mensaje") | |
send_btn = gr.Button("Enviar") | |
restart_btn = gr.Button("🔁 Reiniciar conversación") | |
send_btn.click( | |
fn=chatbot_response, | |
inputs=[ | |
user_input, | |
min_debt, max_debt, min_payments, max_payments, | |
weight_debt, weight_payments, | |
chatbot_output, | |
], | |
outputs=[chatbot_output, user_input] | |
) | |
restart_btn.click( | |
fn=initial_message, | |
inputs=[ | |
min_debt, max_debt, min_payments, max_payments, | |
weight_debt, weight_payments | |
], | |
outputs=chatbot_output | |
) | |
demo.load( | |
fn=initial_message, | |
inputs=[ | |
min_debt, max_debt, min_payments, max_payments, | |
weight_debt, weight_payments | |
], | |
outputs=chatbot_output | |
) | |
demo.launch(debug=True) |