Spaces:
Build error
Build error
File size: 1,198 Bytes
8ba8ba9 2372d69 cde97aa 617855e 8ba8ba9 2372d69 8ba8ba9 2372d69 8ba8ba9 2372d69 cde97aa 2372d69 617855e c54660f b8969f6 8ba8ba9 2372d69 8ba8ba9 2372d69 8ba8ba9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
import gradio as gr
import torch
from transformers import AutoModel, AutoTokenizer, AutoModelForCausalLM
import deepspeed
# Model name
model_name = "OpenGVLab/InternVideo2_5_Chat_8B"
# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
# Load model efficiently
model = AutoModelForCausalLM.from_pretrained(
model_name,
trust_remote_code=True,
torch_dtype=torch.float16, # Use float16 for lower memory usage
device_map="auto", # Automatically place model on available GPU
deepspeed={"stage": 3} # Enable DeepSpeed ZeRO-3
)
# Define inference function
def chat_with_model(prompt):
inputs = tokenizer(prompt, return_tensors="pt").to("cuda") # Move inputs to GPU
output = model.generate(**inputs, max_length=200)
return tokenizer.decode(output[0], skip_special_tokens=True)
# Create Gradio UI
demo = gr.Interface(
fn=chat_with_model,
inputs=gr.Textbox(placeholder="Type your prompt here..."),
outputs="text",
title="InternVideo2.5 Chatbot",
description="A chatbot powered by InternVideo2_5_Chat_8B.",
theme="compact"
)
# Run the Gradio app
if __name__ == "__main__":
demo.launch()
|