|
|
|
|
|
import streamlit as st |
|
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM |
|
from peft import PeftModel |
|
|
|
|
|
base_model_path = "google/gemma-3-1b-it" |
|
model = AutoModelForCausalLM.from_pretrained( |
|
base_model_path, |
|
load_in_4bit=True, |
|
device_map="auto" |
|
) |
|
|
|
|
|
model = PeftModel.from_pretrained(model, "fine_tuned_gemma_3_1b/") |
|
|
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_path) |
|
|
|
|
|
|
|
text_generator = pipeline("text-generation", model=model, tokenizer=tokenizer) |
|
|
|
st.title("Fine-tuned Gemma 3.1B LLM") |
|
|
|
|
|
user_input = st.text_area("Enter your prompt:") |
|
|
|
if st.button("Generate Text"): |
|
if user_input: |
|
|
|
output = text_generator(user_input, max_length=150, num_return_sequences=1) |
|
generated_text = output[0]['generated_text'] |
|
|
|
|
|
st.write("Generated Text:") |
|
st.write(generated_text) |
|
else: |
|
st.warning("Please enter a prompt.") |