Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1332,8 +1332,8 @@ def StreamMedChatResponse(topic):
|
|
1332 |
|
1333 |
# 17. Main
|
1334 |
def arxivmain():
|
|
|
1335 |
prompt = '''
|
1336 |
-
|
1337 |
What is MoE?
|
1338 |
What are Multi Agent Systems?
|
1339 |
What is Self Rewarding AI?
|
@@ -1342,26 +1342,39 @@ What is AutoGen?
|
|
1342 |
What is ChatDev?
|
1343 |
What is Omniverse?
|
1344 |
What is Lumiere?
|
1345 |
-
What is SORA?
|
1346 |
-
|
1347 |
-
|
1348 |
'''
|
|
|
1349 |
with st.expander("Prompts π", expanded=True):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1350 |
|
1351 |
-
|
1352 |
-
example_input = st.text_input("Enter your prompt text:", value=prompt, help="Enter text to get a response.")
|
1353 |
if st.button("Run Prompt", help="Click to run."):
|
1354 |
try:
|
1355 |
response=StreamLLMChatResponse(example_input)
|
1356 |
create_file(filename, example_input, response, should_save)
|
1357 |
except:
|
1358 |
st.write('model is asleep. Starting now on A10 GPU. Please wait one minute then retry. KEDA triggered.')
|
|
|
1359 |
openai.api_key = os.getenv('OPENAI_API_KEY')
|
1360 |
if openai.api_key == None: openai.api_key = st.secrets['OPENAI_API_KEY']
|
1361 |
menu = ["txt", "htm", "xlsx", "csv", "md", "py"]
|
1362 |
choice = st.sidebar.selectbox("Output File Type:", menu)
|
1363 |
model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
|
1364 |
user_prompt = st.text_area("Enter prompts, instructions & questions:", '', height=100)
|
|
|
|
|
1365 |
collength, colupload = st.columns([2,3]) # adjust the ratio as needed
|
1366 |
with collength:
|
1367 |
max_length = st.slider("File section length for large files", min_value=1000, max_value=128000, value=12000, step=1000)
|
@@ -1391,6 +1404,7 @@ What is SORA?
|
|
1391 |
filename = generate_filename(f"{user_prompt}_section_{i+1}", choice)
|
1392 |
create_file(filename, user_prompt, response, should_save)
|
1393 |
st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
|
|
|
1394 |
if st.button('π¬ Chat'):
|
1395 |
st.write('Reasoning with your inputs...')
|
1396 |
user_prompt_sections = divide_prompt(user_prompt, max_length)
|
|
|
1332 |
|
1333 |
# 17. Main
|
1334 |
def arxivmain():
|
1335 |
+
|
1336 |
prompt = '''
|
|
|
1337 |
What is MoE?
|
1338 |
What are Multi Agent Systems?
|
1339 |
What is Self Rewarding AI?
|
|
|
1342 |
What is ChatDev?
|
1343 |
What is Omniverse?
|
1344 |
What is Lumiere?
|
1345 |
+
What is SORA?
|
|
|
|
|
1346 |
'''
|
1347 |
+
|
1348 |
with st.expander("Prompts π", expanded=True):
|
1349 |
+
#example_input = st.text_input("Enter your prompt text:", value=prompt, help="Enter text to get a response.")
|
1350 |
+
#example_input = st.text_area("Enter Prompt :", '', height=100
|
1351 |
+
|
1352 |
+
# Search History Text Input
|
1353 |
+
session_state = {}
|
1354 |
+
if "search_queries" not in session_state:
|
1355 |
+
session_state["search_queries"] = []
|
1356 |
+
example_input = st.text_input("Search", value=session_state["search_queries"][-1] if session_state["search_queries"] else "")
|
1357 |
+
if search_query:
|
1358 |
+
session_state["search_queries"].append(search_query)
|
1359 |
+
st.write("Search history:")
|
1360 |
+
for search_query in session_state["search_queries"]:
|
1361 |
+
st.write(search_query)
|
1362 |
|
|
|
|
|
1363 |
if st.button("Run Prompt", help="Click to run."):
|
1364 |
try:
|
1365 |
response=StreamLLMChatResponse(example_input)
|
1366 |
create_file(filename, example_input, response, should_save)
|
1367 |
except:
|
1368 |
st.write('model is asleep. Starting now on A10 GPU. Please wait one minute then retry. KEDA triggered.')
|
1369 |
+
|
1370 |
openai.api_key = os.getenv('OPENAI_API_KEY')
|
1371 |
if openai.api_key == None: openai.api_key = st.secrets['OPENAI_API_KEY']
|
1372 |
menu = ["txt", "htm", "xlsx", "csv", "md", "py"]
|
1373 |
choice = st.sidebar.selectbox("Output File Type:", menu)
|
1374 |
model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
|
1375 |
user_prompt = st.text_area("Enter prompts, instructions & questions:", '', height=100)
|
1376 |
+
|
1377 |
+
|
1378 |
collength, colupload = st.columns([2,3]) # adjust the ratio as needed
|
1379 |
with collength:
|
1380 |
max_length = st.slider("File section length for large files", min_value=1000, max_value=128000, value=12000, step=1000)
|
|
|
1404 |
filename = generate_filename(f"{user_prompt}_section_{i+1}", choice)
|
1405 |
create_file(filename, user_prompt, response, should_save)
|
1406 |
st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
|
1407 |
+
|
1408 |
if st.button('π¬ Chat'):
|
1409 |
st.write('Reasoning with your inputs...')
|
1410 |
user_prompt_sections = divide_prompt(user_prompt, max_length)
|