|
import spaces |
|
import gradio as gr |
|
from transformers import pipeline |
|
import torch |
|
print(f"Is CUDA available: {torch.cuda.is_available()}") |
|
|
|
print(f"CUDA device: {torch.cuda.get_device_name(torch.cuda.current_device())}") |
|
|
|
|
|
|
|
generator = pipeline('text-generation', model='gpt2') |
|
|
|
|
|
|
|
|
|
@spaces.gpu |
|
def generate_text(prompt): |
|
response = generator(prompt, max_length=100, num_return_sequences=1, truncation=True) |
|
return response[0]['generated_text'] |
|
|
|
|
|
def chatbot(message,history): |
|
str(message) |
|
return generate_text(message) |
|
|
|
|
|
|
|
gr.ChatInterface(chatbot).launch() |
|
|