Update app.py
Browse files
app.py
CHANGED
@@ -178,6 +178,33 @@ if 'history' not in st.session_state:
|
|
178 |
# Create the Streamlit interface
|
179 |
st.title("Pattern Completion")
|
180 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
181 |
# API key input
|
182 |
api_key = st.text_input("Enter your Gemini API key:", type="password")
|
183 |
|
@@ -187,15 +214,15 @@ if api_key:
|
|
187 |
|
188 |
# Create the model
|
189 |
generation_config = {
|
190 |
-
"temperature":
|
191 |
-
"top_p":
|
192 |
"top_k": 64,
|
193 |
"max_output_tokens": 8192,
|
194 |
"response_mime_type": "text/plain",
|
195 |
}
|
196 |
|
197 |
model = genai.GenerativeModel(
|
198 |
-
model_name=
|
199 |
generation_config=generation_config,
|
200 |
)
|
201 |
|
@@ -207,7 +234,7 @@ if api_key:
|
|
207 |
# Create prompt with history and new input
|
208 |
prompt = st.session_state.history.copy()
|
209 |
prompt.extend([f"input: {user_input}", "output: "])
|
210 |
-
|
211 |
# Generate response
|
212 |
try:
|
213 |
response = model.generate_content(prompt)
|
|
|
178 |
# Create the Streamlit interface
|
179 |
st.title("Pattern Completion")
|
180 |
|
181 |
+
with st.sidebar:
|
182 |
+
st.header("Model Configuration")
|
183 |
+
|
184 |
+
# Model selection dropdown
|
185 |
+
model_name = st.selectbox(
|
186 |
+
"Select Model",
|
187 |
+
["gemini-exp-1121", "gemini-exp-1206", "gemini-1.5-pro"]
|
188 |
+
)
|
189 |
+
|
190 |
+
# Temperature slider
|
191 |
+
temperature = st.slider(
|
192 |
+
"Temperature",
|
193 |
+
min_value=0.0,
|
194 |
+
max_value=1.0,
|
195 |
+
value=1.0,
|
196 |
+
step=0.1
|
197 |
+
)
|
198 |
+
|
199 |
+
# Top_p slider
|
200 |
+
top_p = st.slider(
|
201 |
+
"Top P",
|
202 |
+
min_value=0.0,
|
203 |
+
max_value=1.0,
|
204 |
+
value=0.95,
|
205 |
+
step=0.05
|
206 |
+
)
|
207 |
+
|
208 |
# API key input
|
209 |
api_key = st.text_input("Enter your Gemini API key:", type="password")
|
210 |
|
|
|
214 |
|
215 |
# Create the model
|
216 |
generation_config = {
|
217 |
+
"temperature": temperature,
|
218 |
+
"top_p": top_p,
|
219 |
"top_k": 64,
|
220 |
"max_output_tokens": 8192,
|
221 |
"response_mime_type": "text/plain",
|
222 |
}
|
223 |
|
224 |
model = genai.GenerativeModel(
|
225 |
+
model_name=model_name,
|
226 |
generation_config=generation_config,
|
227 |
)
|
228 |
|
|
|
234 |
# Create prompt with history and new input
|
235 |
prompt = st.session_state.history.copy()
|
236 |
prompt.extend([f"input: {user_input}", "output: "])
|
237 |
+
print(prompt)
|
238 |
# Generate response
|
239 |
try:
|
240 |
response = model.generate_content(prompt)
|