Spaces:
Sleeping
Sleeping
File size: 2,115 Bytes
b99a3bf ef9cde4 eff6cb5 ef9cde4 b99a3bf ef9cde4 eff6cb5 c102310 b99a3bf 8239d17 7b56d83 b4ab413 b4da61e b4ab413 51eae2b ef9cde4 d1de333 8239d17 d1de333 d8cb72d d1de333 7b56d83 53a67f8 8239d17 7b56d83 8239d17 d8cb72d 53a67f8 b99a3bf |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
import os
import gradio as gr
from huggingface_hub import InferenceApi
model_id = 'dicta-il/dictalm-7b-instruct'
# ืืฆืืจืช API ืฉื Hugging Face
api_key = os.getenv('HUGGINGFACE_API_KEY', '')
generator = InferenceApi(repo_id=model_id, token=api_key)
# ืคืื ืงืฆืืืช ืืฆืืจืช ืืืงืกื
def chat_with_model(history):
prompt = history[-1]["content"]
try:
response = generator(prompt)
if isinstance(response, dict) and "generated_text" in response:
result = response["generated_text"]
else:
result = response
except Exception as e:
result = f"Error: {str(e)}"
return history + [{"role": "bot", "content": result}]
# ืืฆืืจืช ืืืฉืง ืืชืงืื ืขื Gradio ืืฆืืจืช ืฆ'ื-ืืื ืืกืื ืื ืืงืืื
with gr.Blocks(theme="default") as demo:
gr.HTML("""
<div style="background-color: #f5f5f5; padding: 20px; text-align: center;">
<h1 style="color: #003366; font-family: Arial, sans-serif;">ืฆ'ืื ืขื ืืืื DictaLM</h1>
<p style="font-family: Arial, sans-serif; color: #333;">ืืจืืืื ืืืืื ืืฆ'ืื ืืืื ืืจืืงืืืื ืฉืื ื, ืืืืคืฉืจ ืืื ืืืชื ืกืืช ืืฉืืื ืขื ืืืื AI ืืชืงืื.</p>
</div>
""")
chatbot = gr.Chatbot(label="ืฆ'ืื ืขื ืืืื DictaLM", type="messages")
with gr.Row():
user_input = gr.Textbox(placeholder="ืืื ืก ืืช ืืืืืขื ืฉืื ืืื...", label="", lines=1)
send_button = gr.Button("ืฉืื")
def user_chat(history, message):
return history + [{"role": "user", "content": message}], ""
# ืฉืืืืช ืืืืืขื ืื ืืืืืฆื ืขื Enter ืืื ืขื ืืื ืืืืฆื ืขื ืืคืชืืจ "ืฉืื"
user_input.submit(fn=user_chat, inputs=[chatbot, user_input], outputs=[chatbot, user_input], queue=False).then(
fn=chat_with_model, inputs=chatbot, outputs=chatbot
)
send_button.click(fn=user_chat, inputs=[chatbot, user_input], outputs=[chatbot, user_input], queue=False).then(
fn=chat_with_model, inputs=chatbot, outputs=chatbot
)
demo.launch() |