|
import gradio as gr |
|
from huggingface_hub import InferenceClient |
|
""" |
|
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference |
|
""" |
|
|
|
model = InferenceClient("HuggingFaceH4/zephyr-7b-beta") |
|
|
|
|
|
def predict(prompt): |
|
completion = model(prompt)[0]["generated_text"] |
|
return completion |
|
|
|
|
|
""" |
|
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface |
|
""" |
|
demo = gr.Interface(fn=predict, inputs="text", outputs="text").launch() |
|
|
|
|