Spaces:
Sleeping
Sleeping
import streamlit as st | |
import torch | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
# Load the model and tokenizer | |
MODEL_NAME = "google/gemma-2b-it" | |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) | |
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.float16, device_map="auto") | |
# Streamlit UI | |
st.title("Gemma-2B Code Assistant") | |
user_input = st.text_area("Enter your coding query:") | |
if st.button("Generate Code"): | |
if user_input: | |
inputs = tokenizer(user_input, return_tensors="pt").to("cuda") | |
output = model.generate(**inputs, max_new_tokens=100) | |
response = tokenizer.decode(output[0], skip_special_tokens=True) | |
st.write(response) | |
else: | |
st.warning("Please enter a query!") | |