turtlegraphics's picture
Now using predictive text.
1609e16 verified
raw
history blame
610 Bytes
#
# gradio demo
#
import gradio as gr
from transformers import pipeline
model = pipeline("text-generation")
def predict(prompt):
completion = model(prompt)[0]["generated_text"]
return completion
demo = gr.Interface(fn=predict, inputs="text", outputs="text")
demo.launch()