Spaces:
Sleeping
Sleeping
looker01202
commited on
Commit
·
d3ade6b
1
Parent(s):
50aecff
stable gradio interface but requires inprovement
Browse files
app.py
CHANGED
@@ -78,8 +78,10 @@ def chat(message, history, hotel_id):
|
|
78 |
add_generation_prompt=True
|
79 |
)
|
80 |
inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
|
81 |
-
|
|
|
82 |
decoded = tokenizer.decode(outputs[0], skip_special_tokens=False)
|
|
|
83 |
# Extract assistant response
|
84 |
response = decoded.split("<|im_start|>assistant")[-1]
|
85 |
response = response.split("<|im_end|>")[0].strip()
|
@@ -104,7 +106,8 @@ def chat(message, history, hotel_id):
|
|
104 |
add_generation_prompt=True
|
105 |
)
|
106 |
inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
|
107 |
-
|
|
|
108 |
decoded = tokenizer.decode(outputs[0], skip_special_tokens=False)
|
109 |
response = decoded.split("<|start_of_role|>assistant<|end_of_role|>")[-1]
|
110 |
response = response.split("<|end_of_text|>")[0].strip()
|
@@ -118,24 +121,45 @@ def chat(message, history, hotel_id):
|
|
118 |
# Available hotels
|
119 |
hotel_ids = ["cyprus-guesthouse-family", "coastal-villa-family", "village-inn-family"]
|
120 |
|
|
|
121 |
# Gradio UI
|
122 |
with gr.Blocks() as demo:
|
123 |
-
|
124 |
-
gr.
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
135 |
gr.Markdown("⚠️ Pause the Space when done to avoid charges.")
|
136 |
|
137 |
# Enable streaming queue for generator-based chat
|
138 |
-
demo.queue()
|
139 |
|
140 |
if __name__ == "__main__":
|
141 |
demo.launch()
|
|
|
78 |
add_generation_prompt=True
|
79 |
)
|
80 |
inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
|
81 |
+
with torch.no_grad():
|
82 |
+
outputs = model.generate(inputs, max_new_tokens=1024, do_sample=True)
|
83 |
decoded = tokenizer.decode(outputs[0], skip_special_tokens=False)
|
84 |
+
print(decoded)
|
85 |
# Extract assistant response
|
86 |
response = decoded.split("<|im_start|>assistant")[-1]
|
87 |
response = response.split("<|im_end|>")[0].strip()
|
|
|
106 |
add_generation_prompt=True
|
107 |
)
|
108 |
inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
|
109 |
+
with torch.no_grad():
|
110 |
+
outputs = model.generate(inputs, max_new_tokens=1024, do_sample=True)
|
111 |
decoded = tokenizer.decode(outputs[0], skip_special_tokens=False)
|
112 |
response = decoded.split("<|start_of_role|>assistant<|end_of_role|>")[-1]
|
113 |
response = response.split("<|end_of_text|>")[0].strip()
|
|
|
121 |
# Available hotels
|
122 |
hotel_ids = ["cyprus-guesthouse-family", "coastal-villa-family", "village-inn-family"]
|
123 |
|
124 |
+
# Gradio UI
|
125 |
# Gradio UI
|
126 |
with gr.Blocks() as demo:
|
127 |
+
# ⬇️ NEW panel wrapper
|
128 |
+
with gr.Column(variant="panel"):
|
129 |
+
|
130 |
+
gr.Markdown("### 🏨 Multi‑Hotel Chatbot Demo")
|
131 |
+
gr.Markdown(f"**Running:** {model_name}")
|
132 |
+
|
133 |
+
hotel_selector = gr.Dropdown(
|
134 |
+
hotel_ids,
|
135 |
+
label="Hotel",
|
136 |
+
value=hotel_ids[0]
|
137 |
+
)
|
138 |
+
|
139 |
+
# Chat window in its own row so it stretches
|
140 |
+
with gr.Row():
|
141 |
+
chatbot = gr.Chatbot(type="messages")
|
142 |
+
|
143 |
+
msg = gr.Textbox(
|
144 |
+
show_label=False,
|
145 |
+
placeholder="Ask about the hotel..."
|
146 |
+
)
|
147 |
+
|
148 |
+
# Clear‑history button
|
149 |
+
gr.Button("Clear").click(lambda: ([], ""), None, [chatbot, msg])
|
150 |
+
|
151 |
+
# Wire the textbox to the chat function
|
152 |
+
msg.submit(
|
153 |
+
fn=chat,
|
154 |
+
inputs=[msg, chatbot, hotel_selector],
|
155 |
+
outputs=[chatbot, msg]
|
156 |
+
)
|
157 |
+
|
158 |
+
# Anything outside the column shows below the panel
|
159 |
gr.Markdown("⚠️ Pause the Space when done to avoid charges.")
|
160 |
|
161 |
# Enable streaming queue for generator-based chat
|
162 |
+
demo.queue(default_concurrency_limit=2, max_size=32)
|
163 |
|
164 |
if __name__ == "__main__":
|
165 |
demo.launch()
|