Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -337,7 +337,7 @@ def search_glossary(query):
|
|
337 |
)
|
338 |
st.markdown("# Mixtral-8x7B-Instruct-v0.1")
|
339 |
st.markdown(result)
|
340 |
-
st.code(result, language="python", line_numbers=True)
|
341 |
|
342 |
|
343 |
# ๐ ArXiv RAG researcher expert ~-<>-~ Paper Summary & Ask LLM - api_name: /ask_llm
|
@@ -349,7 +349,7 @@ def search_glossary(query):
|
|
349 |
)
|
350 |
st.markdown("# Mistral-7B-Instruct-v0.2")
|
351 |
st.markdown(result2)
|
352 |
-
st.code(result2, language="python", line_numbers=True)
|
353 |
|
354 |
|
355 |
# ๐ ArXiv RAG researcher expert ~-<>-~ Paper Summary & Ask LLM - api_name: /ask_llm
|
@@ -361,7 +361,7 @@ def search_glossary(query):
|
|
361 |
)
|
362 |
st.markdown("# Gemma-7b-it")
|
363 |
st.markdown(result3)
|
364 |
-
st.code(result3, language="python", line_numbers=True)
|
365 |
|
366 |
|
367 |
# ๐ ArXiv RAG researcher expert ~-<>-~ Paper Summary & Ask LLM - api_name: /update_with_rag_md
|
@@ -374,11 +374,11 @@ def search_glossary(query):
|
|
374 |
)
|
375 |
st.markdown("# Mistral-7B-Instruct-v0.2 update_with_rag_md 0")
|
376 |
st.markdown(response2[0])
|
377 |
-
st.code(response2[0], language="python", line_numbers=True, wrap_lines=True)
|
378 |
|
379 |
st.markdown("# Mistral-7B-Instruct-v0.2 update_with_rag_md 1")
|
380 |
st.markdown(response2[1])
|
381 |
-
st.code(response2[1], language="python", line_numbers=True, wrap_lines=True)
|
382 |
|
383 |
|
384 |
# Persist AI Results to Markdown Files
|
|
|
337 |
)
|
338 |
st.markdown("# Mixtral-8x7B-Instruct-v0.1")
|
339 |
st.markdown(result)
|
340 |
+
#st.code(result, language="python", line_numbers=True)
|
341 |
|
342 |
|
343 |
# ๐ ArXiv RAG researcher expert ~-<>-~ Paper Summary & Ask LLM - api_name: /ask_llm
|
|
|
349 |
)
|
350 |
st.markdown("# Mistral-7B-Instruct-v0.2")
|
351 |
st.markdown(result2)
|
352 |
+
#st.code(result2, language="python", line_numbers=True)
|
353 |
|
354 |
|
355 |
# ๐ ArXiv RAG researcher expert ~-<>-~ Paper Summary & Ask LLM - api_name: /ask_llm
|
|
|
361 |
)
|
362 |
st.markdown("# Gemma-7b-it")
|
363 |
st.markdown(result3)
|
364 |
+
#st.code(result3, language="python", line_numbers=True)
|
365 |
|
366 |
|
367 |
# ๐ ArXiv RAG researcher expert ~-<>-~ Paper Summary & Ask LLM - api_name: /update_with_rag_md
|
|
|
374 |
)
|
375 |
st.markdown("# Mistral-7B-Instruct-v0.2 update_with_rag_md 0")
|
376 |
st.markdown(response2[0])
|
377 |
+
#st.code(response2[0], language="python", line_numbers=True, wrap_lines=True)
|
378 |
|
379 |
st.markdown("# Mistral-7B-Instruct-v0.2 update_with_rag_md 1")
|
380 |
st.markdown(response2[1])
|
381 |
+
#st.code(response2[1], language="python", line_numbers=True, wrap_lines=True)
|
382 |
|
383 |
|
384 |
# Persist AI Results to Markdown Files
|