Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
import torch
|
|
|
2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
|
4 |
device = "cuda" if torch.cuda.is_available() else "cpu" # Automatically detect GPU or CPU
|
@@ -43,14 +44,19 @@ def generate_response(user_input):
|
|
43 |
return chatbot_response
|
44 |
|
45 |
# Continuous conversation loop
|
46 |
-
while True:
|
47 |
user_input = input("You: ") # Take user input
|
48 |
if user_input.lower() in ["exit", "quit", "stop"]:
|
49 |
print("Chatbot: Goodbye!")
|
50 |
break
|
51 |
|
52 |
response = generate_response(user_input)
|
53 |
-
print("Chatbot:", response)
|
|
|
|
|
|
|
|
|
|
|
54 |
|
55 |
|
56 |
'''
|
|
|
1 |
import torch
|
2 |
+
import gradio as gr
|
3 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
4 |
|
5 |
device = "cuda" if torch.cuda.is_available() else "cpu" # Automatically detect GPU or CPU
|
|
|
44 |
return chatbot_response
|
45 |
|
46 |
# Continuous conversation loop
|
47 |
+
'''while True:
|
48 |
user_input = input("You: ") # Take user input
|
49 |
if user_input.lower() in ["exit", "quit", "stop"]:
|
50 |
print("Chatbot: Goodbye!")
|
51 |
break
|
52 |
|
53 |
response = generate_response(user_input)
|
54 |
+
print("Chatbot:", response)'''
|
55 |
+
|
56 |
+
|
57 |
+
# Initialize the ChatInterface
|
58 |
+
chatbot = gr.ChatInterface(fn=generate_response, title="Mental Health Chatbot")
|
59 |
+
chatbot.launch()
|
60 |
|
61 |
|
62 |
'''
|