Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,11 +1,10 @@
|
|
1 |
-
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
|
2 |
import gradio as gr
|
|
|
3 |
import torch
|
4 |
|
5 |
-
# Agent 1: Intent Classifier
|
6 |
intent_classifier = pipeline("text-classification", model="MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli")
|
7 |
|
8 |
-
|
9 |
def detect_intent(text):
|
10 |
labels = {
|
11 |
"weather": "The user wants to know the weather.",
|
@@ -21,41 +20,40 @@ def detect_intent(text):
|
|
21 |
best_intent = label
|
22 |
return best_intent
|
23 |
|
24 |
-
# Agent 2: Domain Logic
|
25 |
def handle_logic(intent):
|
26 |
if intent == "weather":
|
27 |
-
return "It
|
28 |
elif intent == "faq":
|
29 |
-
return "
|
30 |
else:
|
31 |
-
return "
|
32 |
|
33 |
-
# Agent 3:
|
34 |
tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-small")
|
35 |
model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-small")
|
36 |
|
37 |
-
def generate_reply(
|
38 |
-
input_ids = tokenizer.encode(
|
39 |
-
|
40 |
-
|
41 |
-
return
|
42 |
|
43 |
-
#
|
44 |
def chatbot(user_input):
|
45 |
intent = detect_intent(user_input)
|
46 |
logic = handle_logic(intent)
|
47 |
-
|
48 |
-
return
|
49 |
-
|
50 |
-
# Gradio
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
|
60 |
|
61 |
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
|
3 |
import torch
|
4 |
|
5 |
+
# ---------------- Agent 1: Intent Classifier ----------------
|
6 |
intent_classifier = pipeline("text-classification", model="MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli")
|
7 |
|
|
|
8 |
def detect_intent(text):
|
9 |
labels = {
|
10 |
"weather": "The user wants to know the weather.",
|
|
|
20 |
best_intent = label
|
21 |
return best_intent
|
22 |
|
23 |
+
# ---------------- Agent 2: Domain Logic ----------------
|
24 |
def handle_logic(intent):
|
25 |
if intent == "weather":
|
26 |
+
return "It's sunny and 26°C today."
|
27 |
elif intent == "faq":
|
28 |
+
return "To reset your password, use the 'Forgot Password' option."
|
29 |
else:
|
30 |
+
return "That's great! Anything else you'd like to talk about?"
|
31 |
|
32 |
+
# ---------------- Agent 3: Natural Language Generation ----------------
|
33 |
tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-small")
|
34 |
model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-small")
|
35 |
|
36 |
+
def generate_reply(prompt):
|
37 |
+
input_ids = tokenizer.encode(prompt + tokenizer.eos_token, return_tensors='pt')
|
38 |
+
output_ids = model.generate(input_ids, max_length=100, pad_token_id=tokenizer.eos_token_id)
|
39 |
+
response = tokenizer.decode(output_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
|
40 |
+
return response
|
41 |
|
42 |
+
# ---------------- Chatbot Pipeline ----------------
|
43 |
def chatbot(user_input):
|
44 |
intent = detect_intent(user_input)
|
45 |
logic = handle_logic(intent)
|
46 |
+
response = generate_reply(logic)
|
47 |
+
return response
|
48 |
+
|
49 |
+
# ---------------- Gradio UI ----------------
|
50 |
+
gr.Interface(
|
51 |
+
fn=chatbot,
|
52 |
+
inputs=gr.Textbox(label="User Input"),
|
53 |
+
outputs=gr.Textbox(label="Chatbot Response"),
|
54 |
+
title="3-Agent Chatbot",
|
55 |
+
description="Intent Detection → Domain Logic → Natural Language Generation"
|
56 |
+
).launch()
|
|
|
57 |
|
58 |
|
59 |
|