File size: 2,429 Bytes
37fedd6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4ce15d7
8b8242c
 
 
32414d2
37fedd6
4ce15d7
fc92fad
4ce15d7
37fedd6
bde561e
8b8242c
b479b99
 
fc92fad
b479b99
4ce15d7
 
6161368
a75db23
 
b479b99
 
6161368
8b8242c
 
 
 
 
 
 
6161368
 
a75db23
6161368
 
 
a75db23
8b8242c
6161368
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
# import os
# import streamlit as st
# import torch
# from transformers import AutoTokenizer, AutoModelForCausalLM
# from huggingface_hub import login

# # Load Hugging Face Token from Secrets
# hf_token = os.getenv("HF_TOKEN")

# if not hf_token:
#     st.error("Hugging Face token is missing! Please add it to Hugging Face Secrets.")
#     st.stop()

# # Authenticate
# login(token=hf_token)

# # Load the model and tokenizer with authentication
# MODEL_NAME = "google/gemma-2b-it"
# tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, token=hf_token)
# model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, token=hf_token, torch_dtype=torch.float16, device_map="auto")

# # Streamlit UI
# st.title("Gemma-2B Code Assistant")
# user_input = st.text_area("Enter your coding query:")

# if st.button("Generate Code"):
#     if user_input:
#         inputs = tokenizer(user_input, return_tensors="pt").to("cuda")
#         output = model.generate(**inputs, max_new_tokens=100)
#         response = tokenizer.decode(output[0], skip_special_tokens=True)
#         st.write(response)
#     else:
#         st.warning("Please enter a query!")

import os
import streamlit as st
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load Hugging Face Token
hf_token = os.getenv("HF_TOKEN")

if not hf_token:
    st.error("❌ Hugging Face token is missing! Please add it to Secrets.")
    st.stop()

# Set device to CPU (because CUDA is unavailable)
device = "cpu"

# Load tokenizer and model in CPU mode (without bitsandbytes)
MODEL_NAME = "google/gemma-2b-it"
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, token=hf_token)
model = AutoModelForCausalLM.from_pretrained(
    MODEL_NAME, 
    token=hf_token,
    torch_dtype=torch.float32,  # 👈 Use standard float32 for CPU
    device_map="cpu"
)

# Streamlit UI
st.title("Gemma-2B Code Assistant")
user_input = st.text_area("Enter your coding query:")

if st.button("Generate Code"):
    if user_input:
        with st.spinner("⏳ Generating response... Please wait!"):
            inputs = tokenizer(user_input, return_tensors="pt").to(device)
            output = model.generate(**inputs, max_new_tokens=50)
            response = tokenizer.decode(output[0], skip_special_tokens=True)
        
        st.subheader("📝 Generated Code:")
        st.code(response, language="python")
    else:
        st.warning("⚠️ Please enter a query!")