Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
import torch | |
if torch.cuda.is_available(): | |
device = "cuda" | |
else: | |
device = "cpu" | |
model_id = "thrishala/mental_health_chatbot" | |
try: | |
model = AutoModelForCausalLM.from_pretrained( | |
model_id, | |
device_map=device, # Use the determined device | |
torch_dtype=torch.float16, | |
low_cpu_mem_usage=True, | |
max_memory={device: "15GB"}, # Use device-specific memory management | |
offload_folder="offload", | |
) | |
tokenizer = AutoTokenizer.from_pretrained(model_id) | |
tokenizer.model_max_length = 512 # Set maximum length | |
except Exception as e: | |
print(f"Error loading model: {e}") | |
exit() | |
def generate_text(prompt, max_new_tokens=128): | |
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.device) | |
with torch.no_grad(): | |
output = model.generate( | |
input_ids=input_ids, | |
max_new_tokens=max_new_tokens, | |
do_sample=False, # Or True for sampling | |
eos_token_id=tokenizer.eos_token_id, | |
) | |
generated_text = tokenizer.decode(output[0], skip_special_tokens=True) | |
return generated_text | |
def generate_text_streaming(prompt, max_new_tokens=128): | |
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.device) | |
with torch.no_grad(): | |
for i in range(max_new_tokens): | |
output = model.generate( | |
input_ids=input_ids, | |
max_new_tokens=1, # Generate only 1 new token at a time | |
do_sample=False, # Or True for sampling | |
eos_token_id=tokenizer.eos_token_id, | |
return_dict=True, #Return a dictionary | |
output_scores=True #Return the scores | |
) | |
generated_token = tokenizer.decode(output.logits[0][-1].argmax(), skip_special_tokens=True) #Decode the last token only | |
yield generated_token #Yield the last token | |
input_ids = torch.cat([input_ids, output.sequences[:, -1:]], dim=-1) #Append the new token to the input | |
if output.sequences[0][-1] == tokenizer.eos_token_id: #Check if the end of sequence token was generated | |
break #Break the loop | |
def respond(message, history, system_message, max_tokens): | |
prompt = f"{system_message}\n" | |
for user_msg, bot_msg in history: | |
prompt += f"User: {user_msg}\nAssistant: {bot_msg}\n" | |
prompt += f"User: {message}\nAssistant:" | |
try: | |
for token in generate_text_streaming(prompt, max_tokens): #Iterate over the generator | |
yield token #Yield each token individually | |
except Exception as e: | |
print(f"Error during generation: {e}") | |
yield "An error occurred." | |
demo = gr.ChatInterface( | |
respond, | |
additional_inputs=[ | |
gr.Textbox( | |
value="You are a friendly and helpful mental health chatbot.", | |
label="System message", | |
), | |
gr.Slider(minimum=1, maximum=128, value=32, step=10, label="Max new tokens"), | |
], | |
) | |
if __name__ == "__main__": | |
demo.launch() |