Spaces:
Sleeping
Sleeping
import torch | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
import gradio as gr | |
import json | |
model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" | |
tokenizer = AutoTokenizer.from_pretrained(model_id) | |
model = AutoModelForCausalLM.from_pretrained(model_id) | |
# Örnek veri setini yükle | |
with open("memory_questions.json", "r") as f: | |
memory_data = json.load(f) | |
# İlk 3-4 örnekten prompt hazırla | |
few_shot_examples = "\n".join( | |
[f"Memory: {item['description']}\nQuestion: {item['question']}" for item in memory_data[:5]] | |
) | |
def generate_question(memory): | |
prompt = f"""{few_shot_examples} | |
Memory: {memory} | |
Question:""" | |
input_ids = tokenizer(prompt, return_tensors="pt").input_ids | |
output = model.generate(input_ids, max_new_tokens=50, do_sample=False) | |
result = tokenizer.decode(output[0], skip_special_tokens=True) | |
# Çıktıdan yalnızca son soruyu ayrıştır | |
lines = result.strip().split("Question:") | |
return lines[-1].strip() if len(lines) > 1 else result.strip() | |
# Gradio UI | |
iface = gr.Interface( | |
fn=generate_question, | |
inputs=gr.Textbox(label="Your Memory"), | |
outputs=gr.Textbox(label="Generated Question"), | |
title="Memory-Aware Question Generator (TinyLLaMA)" | |
) | |
iface.launch() | |