File size: 983 Bytes
9e01d1f
 
 
 
 
 
 
 
 
 
27c7574
9e01d1f
f9df685
 
9e01d1f
 
 
f9df685
 
9e01d1f
 
f9df685
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model = AutoModelForSeq2SeqLM.from_pretrained("memorease/memorease-flan-t5")
tokenizer = AutoTokenizer.from_pretrained("memorease/memorease-flan-t5")

def generate_question(description):
    prompt = f"Only generate a factual and relevant question about this memory: {description}"
    inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True)
    outputs = model.generate(**inputs, max_new_tokens=64)
    return tokenizer.decode(outputs[0], skip_special_tokens=True)

# ✅ DOĞRU Interface tanımı
demo = gr.Interface(
    fn=generate_question,
    inputs=gr.Textbox(label="Memory Description"),
    outputs=gr.Textbox(label="Generated Question"),
    allow_flagging="never",  # Opsiyonel: kullanıcı flag'lamasın diye
    live=False  # Gerek yoksa canlı inference yok
)

demo.launch(server_name="0.0.0.0", server_port=7860)  # Space sunucusunda doğru çalışması için