import os import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer import torch model_name = "jimy26/Chatbot" hf_token = os.getenv("HF_TOKEN") tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=hf_token) model = AutoModelForCausalLM.from_pretrained( model_name, device_map="auto", torch_dtype=torch.float16, use_auth_token=hf_token ) def chat(prompt): inputs = tokenizer(prompt, return_tensors="pt").to(model.device) outputs = model.generate(**inputs, max_new_tokens=100) return tokenizer.decode(outputs[0], skip_special_tokens=True) gr.Interface(fn=chat, inputs="text", outputs="text").launch()