File size: 934 Bytes
350f453
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
import streamlit as st
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline

# Load the locally saved fine-tuned model inside your space
MODEL_DIR = "./laptop-tinyllama"

@st.cache_resource
def load_pipeline():
    tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR)
    model = AutoModelForCausalLM.from_pretrained(MODEL_DIR)
    return pipeline("text-generation", model=model, tokenizer=tokenizer)

# Load model pipeline
generator = load_pipeline()

# Streamlit UI
st.title("💻 Laptop Recommendation with TinyLlama")
st.write("Enter a question like: *Suggest a laptop for gaming under 1 lakh BDT.*")

# Prompt input
prompt = st.text_area("Enter your query", value="Suggest a laptop for programming under 70000 BDT.")

if st.button("Generate Response"):
    with st.spinner("Generating..."):
        result = generator(prompt, max_new_tokens=100, temperature=0.7)
        st.success(result[0]["generated_text"])