Spaces:
Sleeping
Sleeping
File size: 1,859 Bytes
a1f3bda b278fd0 1c56c6e 3242064 b278fd0 a1f3bda c64e053 a1f3bda c64e053 3242064 a1f3bda b278fd0 a1f3bda b278fd0 a1f3bda 3242064 a1f3bda 3242064 a1f3bda b278fd0 3242064 a1f3bda b278fd0 a1f3bda b278fd0 a1f3bda |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 |
# app.py
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
import torch
# تحميل النموذج والمحول
model_name = "aubmindlab/bert-base-arabertv2"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)
def generate_answer(question, context):
input_text = f"سؤال: {question} سياق: {context}"
inputs = tokenizer(input_text, return_tensors="pt", padding=True).to(device)
output = model.generate(**inputs, max_length=256)
answer = tokenizer.decode(output[0], skip_special_tokens=True)
return answer
def ask_question(question, context):
if not question.strip():
return "❌ الرجاء كتابة سؤال."
answer = generate_answer(question, context)
return f"✅ الإجابة: {answer}"
with gr.Blocks(title="سؤال وجواب من الكتب") as demo:
gr.Markdown("""
# 📚 اسأل كتبك!
اطرح أي سؤال وسنبحث لك عن الجواب من محتوى الكتب بدقة وفهم!
""")
with gr.Row():
question = gr.Textbox(label="✍️ اكتب سؤالك هنا:", placeholder="مثال: ما معنى الذكاء الاصطناعي؟")
with gr.Row():
context = gr.Textbox(label="📖 اكتب أو الصق نص من كتابك هنا:", placeholder="انسخ فقرة أو أكثر من الكتاب...")
with gr.Row():
ask_btn = gr.Button("🔍 احصل على الإجابة")
output = gr.Textbox(label="💬 الإجابة:")
ask_btn.click(fn=ask_question, inputs=[question, context], outputs=output)
demo.launch()
|