Spaces:
Sleeping
Sleeping
asked gpt to convert existing to app.py
Browse files
app.py
CHANGED
@@ -7,22 +7,8 @@ from bs4 import BeautifulSoup
|
|
7 |
import gradio as gr
|
8 |
|
9 |
# Configure Gemini API key
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
try:
|
14 |
-
GOOGLE_API_KEY = 'AIzaSyA0yLvySmj8xjMd0sedSgklg1fj0wBDyyw'
|
15 |
-
genai.configure(api_key=GOOGLE_API_KEY)
|
16 |
-
except userdata.SecretNotFoundError as e:
|
17 |
-
print(f'Secret not found\n\nThis expects you to create a secret named {gemini_api_secret_name} in Colab\n\nVisit https://makersuite.google.com/app/apikey to create an API key\n\nStore that in the secrets section on the left side of the notebook (key icon)\n\nName the secret {gemini_api_secret_name}')
|
18 |
-
raise e
|
19 |
-
except userdata.NotebookAccessError as e:
|
20 |
-
print(f'You need to grant this notebook access to the {gemini_api_secret_name} secret in order for the notebook to access Gemini on your behalf.')
|
21 |
-
raise e
|
22 |
-
except Exception as e:
|
23 |
-
# unknown error
|
24 |
-
print(f"There was an unknown error. Ensure you have a secret {gemini_api_secret_name} stored in Colab and it's a valid key from https://makersuite.google.com/app/apikey")
|
25 |
-
raise e
|
26 |
|
27 |
# Fetch lecture notes and model architectures
|
28 |
def fetch_lecture_notes():
|
@@ -125,7 +111,7 @@ def generate_concise_response(prompt, context):
|
|
125 |
return "An error occurred while generating the concise response."
|
126 |
|
127 |
# Main function to execute the pipeline
|
128 |
-
def chatbot(message
|
129 |
lecture_notes = fetch_lecture_notes()
|
130 |
model_architectures = fetch_model_architectures()
|
131 |
|
@@ -139,7 +125,6 @@ def chatbot(message , history):
|
|
139 |
# Initialize FAISS index
|
140 |
faiss_index = initialize_faiss_index(np.array(embeddings))
|
141 |
|
142 |
-
|
143 |
response, sources = handle_query(message, faiss_index, all_texts, embedding_model)
|
144 |
print("Query:", message)
|
145 |
print("Response:", response)
|
@@ -148,7 +133,7 @@ def chatbot(message , history):
|
|
148 |
print("Sources:", sources)
|
149 |
relevant_source = ""
|
150 |
for source in sources:
|
151 |
-
|
152 |
total_text += "\n\nSources:\n" + relevant_source
|
153 |
|
154 |
else:
|
@@ -161,23 +146,24 @@ def chatbot(message , history):
|
|
161 |
concise_response = generate_concise_response(prompt, user_queries_summary)
|
162 |
print("Concise Response:")
|
163 |
print(concise_response)
|
|
|
164 |
return total_text
|
165 |
|
166 |
-
iface = gr.
|
167 |
-
chatbot,
|
|
|
|
|
168 |
title="LLM Research Assistant",
|
169 |
description="Ask questions about LLM architectures, datasets, and training techniques.",
|
170 |
examples=[
|
171 |
-
"What are some milestone model architectures in LLMs?",
|
172 |
-
"Explain the transformer architecture.",
|
173 |
-
"Tell me about datasets used to train LLMs.",
|
174 |
-
"How are LLM training datasets cleaned and preprocessed?",
|
175 |
-
"Summarize the user queries so far"
|
176 |
],
|
177 |
-
|
178 |
-
undo_btn="Undo",
|
179 |
-
clear_btn="Clear",
|
180 |
)
|
181 |
|
182 |
if __name__ == "__main__":
|
183 |
-
iface.launch(
|
|
|
7 |
import gradio as gr
|
8 |
|
9 |
# Configure Gemini API key
|
10 |
+
GOOGLE_API_KEY = 'AIzaSyA0yLvySmj8xjMd0sedSgklg1fj0wBDyyw' # Replace with your API key
|
11 |
+
genai.configure(api_key=GOOGLE_API_KEY)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
# Fetch lecture notes and model architectures
|
14 |
def fetch_lecture_notes():
|
|
|
111 |
return "An error occurred while generating the concise response."
|
112 |
|
113 |
# Main function to execute the pipeline
|
114 |
+
def chatbot(message, history):
|
115 |
lecture_notes = fetch_lecture_notes()
|
116 |
model_architectures = fetch_model_architectures()
|
117 |
|
|
|
125 |
# Initialize FAISS index
|
126 |
faiss_index = initialize_faiss_index(np.array(embeddings))
|
127 |
|
|
|
128 |
response, sources = handle_query(message, faiss_index, all_texts, embedding_model)
|
129 |
print("Query:", message)
|
130 |
print("Response:", response)
|
|
|
133 |
print("Sources:", sources)
|
134 |
relevant_source = ""
|
135 |
for source in sources:
|
136 |
+
relevant_source += source + "\n"
|
137 |
total_text += "\n\nSources:\n" + relevant_source
|
138 |
|
139 |
else:
|
|
|
146 |
concise_response = generate_concise_response(prompt, user_queries_summary)
|
147 |
print("Concise Response:")
|
148 |
print(concise_response)
|
149 |
+
|
150 |
return total_text
|
151 |
|
152 |
+
iface = gr.Interface(
|
153 |
+
fn=chatbot,
|
154 |
+
inputs="text",
|
155 |
+
outputs="text",
|
156 |
title="LLM Research Assistant",
|
157 |
description="Ask questions about LLM architectures, datasets, and training techniques.",
|
158 |
examples=[
|
159 |
+
["What are some milestone model architectures in LLMs?"],
|
160 |
+
["Explain the transformer architecture."],
|
161 |
+
["Tell me about datasets used to train LLMs."],
|
162 |
+
["How are LLM training datasets cleaned and preprocessed?"],
|
163 |
+
["Summarize the user queries so far"]
|
164 |
],
|
165 |
+
allow_flagging="never"
|
|
|
|
|
166 |
)
|
167 |
|
168 |
if __name__ == "__main__":
|
169 |
+
iface.launch(server_name="0.0.0.0", server_port=7860)
|