Spaces:
Sleeping
Sleeping
File size: 1,587 Bytes
acc2627 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
import os
import gradio as gr
from huggingface_hub import InferenceClient
# Retrieve the API key from environment variables
api_key = os.getenv("API_KEY")
# Initialize the InferenceClient with your provider and the API key from the environment variable
client = InferenceClient(
provider="together",
api_key=api_key
)
def chatbot_response(user_input, chat_history):
"""
Sends the user's input to the inference client and appends the response to the conversation history.
"""
messages = [{"role": "user", "content": user_input}]
# Get the response from the Hugging Face model
completion = client.chat.completions.create(
model="deepseek-ai/DeepSeek-R1",
messages=messages,
max_tokens=500,
)
# Extract the model's response
bot_message = completion.choices[0].message
chat_history.append((user_input, bot_message))
# Return an empty string to clear the input textbox and the updated chat history
return "", chat_history
# Create the Gradio Blocks interface
with gr.Blocks() as demo:
gr.Markdown("# DeepSeek-R1")
chatbot = gr.Chatbot()
state = gr.State([])
with gr.Row():
txt = gr.Textbox(placeholder="Type your message here...", show_label=False)
send_btn = gr.Button("Send")
txt.submit(
chatbot_response,
inputs=[txt, state],
outputs=[txt, chatbot]
)
send_btn.click(
chatbot_response,
inputs=[txt, state],
outputs=[txt, chatbot]
)
# Launch the interface
demo.launch()
|