Spaces:
Running
on
L4
Running
on
L4
Commit
·
f7429e0
1
Parent(s):
eb5f7f4
refactor codes
Browse files- app.py +34 -97
- prompts.py +0 -119
- utils/format.py +20 -0
- utils/help.py +11 -0
- utils.py → utils/llama_utils.py +0 -0
- utils/openai_utils.py +79 -0
- utils/rag_utils.py +45 -0
- utils/system_prompts.py +44 -0
app.py
CHANGED
@@ -1,79 +1,21 @@
|
|
1 |
import os
|
2 |
import json
|
|
|
3 |
import numpy as np
|
4 |
import streamlit as st
|
5 |
-
from sentence_transformers import SentenceTransformer
|
6 |
-
from openai import OpenAI
|
7 |
-
import random
|
8 |
-
import prompts
|
9 |
-
from utils import get_bnb_config, load_base_model, load_fine_tuned_model, generate_response
|
10 |
-
|
11 |
-
st.set_page_config(page_title="AI University")
|
12 |
-
|
13 |
-
# Set the cache directory to persistent storage
|
14 |
-
os.environ["HF_HOME"] = "/data/.cache/huggingface"
|
15 |
-
|
16 |
-
# client = OpenAI(api_key=st.secrets["general"]["OpenAI_API"])
|
17 |
-
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
18 |
-
|
19 |
-
@st.cache_resource
|
20 |
-
def load_youtube_data(base_path, embedding_model_name, chunk_tokens, overlap_tokens):
|
21 |
-
embedding_space_file_name = f'{base_path}/yt_embedding_space_{embedding_model_name}_tpc{chunk_tokens}_o{overlap_tokens}.json'
|
22 |
-
with open(embedding_space_file_name, 'r') as json_file:
|
23 |
-
loaded_data = json.load(json_file)
|
24 |
-
|
25 |
-
embedding_space = np.array(loaded_data['embedding_space'])
|
26 |
-
return loaded_data['chunks'], embedding_space
|
27 |
-
|
28 |
-
@st.cache_resource
|
29 |
-
def load_book_data(base_path, embedding_model_name, chunk_tokens, overlap_tokens):
|
30 |
-
embedding_space_file_name = f'{base_path}/latex_embedding_space_by_sections_{embedding_model_name}_tpc{chunk_tokens}_o{overlap_tokens}.json'
|
31 |
-
with open(embedding_space_file_name, 'r') as json_file:
|
32 |
-
loaded_data = json.load(json_file)
|
33 |
-
|
34 |
-
embedding_space = np.array(loaded_data['embedding_space'])
|
35 |
-
return loaded_data['chunks'], embedding_space
|
36 |
-
|
37 |
-
@st.cache_resource
|
38 |
-
def load_summary(file_path):
|
39 |
-
with open(file_path, 'r') as file:
|
40 |
-
transcripts = json.load(file)
|
41 |
-
return transcripts
|
42 |
-
|
43 |
-
|
44 |
-
def embed_question_openai(texts, model="text-embedding-3-small"):
|
45 |
-
response = client.embeddings.create(
|
46 |
-
input=texts,
|
47 |
-
model=model
|
48 |
-
)
|
49 |
-
return np.array(response.data[0].embedding)
|
50 |
-
|
51 |
-
def embed_question(question, embedding_model):
|
52 |
-
if embedding_model == "text-embedding-3-small":
|
53 |
-
return embed_question_openai(question, embedding_model)
|
54 |
-
else:
|
55 |
-
return embedding_model.encode(question, convert_to_numpy=True)
|
56 |
-
|
57 |
-
def fixed_knn_retrieval(question_embedding, context_embeddings, top_k=5, min_k=1):
|
58 |
-
|
59 |
-
# Normalize
|
60 |
-
question_embedding = question_embedding / np.linalg.norm(question_embedding)
|
61 |
-
context_embeddings = context_embeddings / np.linalg.norm(context_embeddings, axis=1, keepdims=True)
|
62 |
-
|
63 |
-
# Calculate cosine similarities between the question embedding and all context embeddings.
|
64 |
-
similarities = np.dot(context_embeddings, question_embedding)
|
65 |
-
# Sort the similarities in descending order and get the corresponding indices.
|
66 |
-
sorted_indices = np.argsort(similarities)[::-1]
|
67 |
-
# Select the top_k most similar contexts, ensuring at least min_k contexts are selected.
|
68 |
-
selected_indices = sorted_indices[:max(top_k, min_k)].tolist()
|
69 |
-
return selected_indices
|
70 |
-
|
71 |
-
def sec_to_time(start_time):
|
72 |
-
return f"{start_time // 60:02}:{start_time % 60:02}"
|
73 |
-
|
74 |
|
|
|
|
|
|
|
75 |
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
|
|
|
77 |
|
78 |
st.markdown("""
|
79 |
<style>
|
@@ -92,24 +34,12 @@ st.markdown("""
|
|
92 |
</style>
|
93 |
""", unsafe_allow_html=True)
|
94 |
|
95 |
-
def get_youtube_embed(video_id, start_time=0, autoplay=0):
|
96 |
-
embed_code = f'''
|
97 |
-
<div class="video-wrapper">
|
98 |
-
<iframe src="https://www.youtube.com/embed/{video_id}?start={start_time}&autoplay={autoplay}&rel=0"
|
99 |
-
frameborder="0" allowfullscreen></iframe>
|
100 |
-
</div>
|
101 |
-
'''
|
102 |
-
return embed_code
|
103 |
|
|
|
|
|
104 |
|
105 |
-
|
106 |
-
|
107 |
-
:gray[**Main Data Sources:**] [Introduction to Finite Element Methods (FEM) by Prof. Krishna Garikipati](https://www.youtube.com/playlist?list=PLJhG_d-Sp_JHKVRhfTgDqbic_4MHpltXZ) :gray[and] [The Finite Element Method: Linear Static and Dynamic Finite Element Analysis by Thomas J. R. Hughes](https://www.google.com/books/edition/_/cHH2n_qBK0IC?hl=en).
|
108 |
-
|
109 |
-
:gray[**Disclaimer and Copyright Notice:**] :gray[1. AI-Generated Responses: Answers are generated using AI and, while thorough, may not always be 100% accurate. Please verify the information independently. 2. Content Ownership: All video content and lecture material referenced belong to their original creators. We encourage users to view the original material on verified platforms to ensure authenticity and accuracy. 3. Educational Fair Use: This tool is intended solely for educational purposes and operates under the principles of fair use. It is not authorized for commercial applications.]
|
110 |
-
|
111 |
-
:gray[For any questions, concerns, or feedback about this application, please contact the development team directly.]
|
112 |
-
"""
|
113 |
|
114 |
# ---------------------------------------
|
115 |
base_path = "data/"
|
@@ -208,10 +138,7 @@ with st.sidebar:
|
|
208 |
if "question" not in st.session_state:
|
209 |
st.session_state.question = ""
|
210 |
|
211 |
-
|
212 |
-
with open(base_path + "/questions.txt", "r") as file:
|
213 |
-
questions = [line.strip() for line in file]
|
214 |
-
return random.choice(questions)
|
215 |
|
216 |
text_area_placeholder = st.empty()
|
217 |
question_help = "Including details or instructions improves the answer."
|
@@ -257,10 +184,11 @@ if 'answer' not in st.session_state:
|
|
257 |
if 'playing_video_id' not in st.session_state:
|
258 |
st.session_state.playing_video_id = None
|
259 |
|
|
|
260 |
if submit_button_placeholder.button("AI Answer", type="primary"):
|
261 |
if st.session_state.question != "":
|
262 |
with st.spinner("Finding relevant contexts..."):
|
263 |
-
question_embedding =
|
264 |
initial_max_k = int(0.1 * context_embeddings_YT.shape[0])
|
265 |
idx_YT = fixed_knn_retrieval(question_embedding, context_embeddings_YT, top_k=top_k_YT, min_k=0)
|
266 |
idx_Latex = fixed_knn_retrieval(question_embedding, context_embeddings_Latex, top_k=top_k_Latex, min_k=0)
|
@@ -306,7 +234,7 @@ if submit_button_placeholder.button("AI Answer", type="primary"):
|
|
306 |
{"role": "user", "content": st.session_state.question}
|
307 |
]
|
308 |
|
309 |
-
|
310 |
model=st.session_state.tommi_model,
|
311 |
tokenizer=st.session_state.tommi_tokenizer,
|
312 |
messages=messages,
|
@@ -318,23 +246,32 @@ if submit_button_placeholder.button("AI Answer", type="primary"):
|
|
318 |
max_new_tokens=tommi_max_new_tokens
|
319 |
)
|
320 |
else:
|
321 |
-
|
322 |
-
|
323 |
st.session_state.question,
|
324 |
model=model,
|
325 |
temperature=expert_temperature,
|
326 |
top_p=expert_top_p
|
327 |
)
|
|
|
328 |
else:
|
329 |
st.session_state.expert_answer = 'No Expert Answer. Only use the context.'
|
330 |
|
331 |
|
332 |
-
answer =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
333 |
|
334 |
if answer.split()[0] == "NOT_ENOUGH_INFO":
|
335 |
st.markdown("")
|
336 |
st.markdown("#### Query:")
|
337 |
-
st.markdown(
|
338 |
if show_expert_responce:
|
339 |
st.markdown("#### Initial Expert Answer:")
|
340 |
st.markdown(st.session_state.expert_answer)
|
@@ -342,7 +279,7 @@ if submit_button_placeholder.button("AI Answer", type="primary"):
|
|
342 |
st.write(":smiling_face_with_tear:")
|
343 |
st.markdown(answer.split('NOT_ENOUGH_INFO')[1])
|
344 |
st.divider()
|
345 |
-
st.caption(
|
346 |
# st.caption("The AI Teaching Assistant project")
|
347 |
st.session_state.question_answered = False
|
348 |
st.stop()
|
@@ -403,4 +340,4 @@ if st.session_state.question_answered:
|
|
403 |
|
404 |
st.markdown(" ")
|
405 |
st.divider()
|
406 |
-
st.caption(
|
|
|
1 |
import os
|
2 |
import json
|
3 |
+
import re
|
4 |
import numpy as np
|
5 |
import streamlit as st
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
+
# from openai import OpenAI
|
8 |
+
import random
|
9 |
+
# import prompts
|
10 |
|
11 |
+
from utils.help import get_disclaimer
|
12 |
+
from utils.format import sec_to_time, fix_latex, get_youtube_embed
|
13 |
+
from utils.rag_utils import load_youtube_data, load_book_data, load_summary, fixed_knn_retrieval
|
14 |
+
from utils.system_prompts import get_expert_system_prompt, get_synthesis_system_prompt
|
15 |
+
from utils.openai_utils import embed_question_openai, openai_domain_specific_answer_generation, openai_context_integration
|
16 |
+
from utils.llama_utils import get_bnb_config, load_base_model, load_fine_tuned_model, generate_response
|
17 |
|
18 |
+
st.set_page_config(page_title="AI University")
|
19 |
|
20 |
st.markdown("""
|
21 |
<style>
|
|
|
34 |
</style>
|
35 |
""", unsafe_allow_html=True)
|
36 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
|
38 |
+
# Set the cache directory to persistent storage
|
39 |
+
os.environ["HF_HOME"] = "/data/.cache/huggingface"
|
40 |
|
41 |
+
# # client = OpenAI(api_key=st.secrets["general"]["OpenAI_API"])
|
42 |
+
# client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
# ---------------------------------------
|
45 |
base_path = "data/"
|
|
|
138 |
if "question" not in st.session_state:
|
139 |
st.session_state.question = ""
|
140 |
|
141 |
+
|
|
|
|
|
|
|
142 |
|
143 |
text_area_placeholder = st.empty()
|
144 |
question_help = "Including details or instructions improves the answer."
|
|
|
184 |
if 'playing_video_id' not in st.session_state:
|
185 |
st.session_state.playing_video_id = None
|
186 |
|
187 |
+
|
188 |
if submit_button_placeholder.button("AI Answer", type="primary"):
|
189 |
if st.session_state.question != "":
|
190 |
with st.spinner("Finding relevant contexts..."):
|
191 |
+
question_embedding = embed_question_openai(st.session_state.question, model_name)
|
192 |
initial_max_k = int(0.1 * context_embeddings_YT.shape[0])
|
193 |
idx_YT = fixed_knn_retrieval(question_embedding, context_embeddings_YT, top_k=top_k_YT, min_k=0)
|
194 |
idx_Latex = fixed_knn_retrieval(question_embedding, context_embeddings_Latex, top_k=top_k_Latex, min_k=0)
|
|
|
234 |
{"role": "user", "content": st.session_state.question}
|
235 |
]
|
236 |
|
237 |
+
expert_answer = generate_response(
|
238 |
model=st.session_state.tommi_model,
|
239 |
tokenizer=st.session_state.tommi_tokenizer,
|
240 |
messages=messages,
|
|
|
246 |
max_new_tokens=tommi_max_new_tokens
|
247 |
)
|
248 |
else:
|
249 |
+
expert_answer = openai_domain_specific_answer_generation(
|
250 |
+
get_expert_system_prompt(),
|
251 |
st.session_state.question,
|
252 |
model=model,
|
253 |
temperature=expert_temperature,
|
254 |
top_p=expert_top_p
|
255 |
)
|
256 |
+
st.session_state.expert_answer = fix_latex(expert_answer)
|
257 |
else:
|
258 |
st.session_state.expert_answer = 'No Expert Answer. Only use the context.'
|
259 |
|
260 |
|
261 |
+
answer = openai_context_integration(
|
262 |
+
get_synthesis_system_prompt("Finite Element Method"),
|
263 |
+
st.session_state.question,
|
264 |
+
st.session_state.expert_answer,
|
265 |
+
context,
|
266 |
+
model=model,
|
267 |
+
temperature=integration_temperature,
|
268 |
+
top_p=integration_top_p
|
269 |
+
)
|
270 |
|
271 |
if answer.split()[0] == "NOT_ENOUGH_INFO":
|
272 |
st.markdown("")
|
273 |
st.markdown("#### Query:")
|
274 |
+
st.markdown(fix_latex(st.session_state.question))
|
275 |
if show_expert_responce:
|
276 |
st.markdown("#### Initial Expert Answer:")
|
277 |
st.markdown(st.session_state.expert_answer)
|
|
|
279 |
st.write(":smiling_face_with_tear:")
|
280 |
st.markdown(answer.split('NOT_ENOUGH_INFO')[1])
|
281 |
st.divider()
|
282 |
+
st.caption(get_disclaimer())
|
283 |
# st.caption("The AI Teaching Assistant project")
|
284 |
st.session_state.question_answered = False
|
285 |
st.stop()
|
|
|
340 |
|
341 |
st.markdown(" ")
|
342 |
st.divider()
|
343 |
+
st.caption(get_disclaimer())
|
prompts.py
DELETED
@@ -1,119 +0,0 @@
|
|
1 |
-
from openai import OpenAI
|
2 |
-
import re
|
3 |
-
import os
|
4 |
-
|
5 |
-
|
6 |
-
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
7 |
-
|
8 |
-
def fix_latex(text):
|
9 |
-
text = re.sub(r"\\\(", r"$",text)
|
10 |
-
text = re.sub(r"\\\)", r"$",text)
|
11 |
-
text = re.sub(r"\\\[", r"$$",text)
|
12 |
-
text = re.sub(r"\\\]", r"$$",text)
|
13 |
-
return text
|
14 |
-
|
15 |
-
|
16 |
-
# Step 1: Domain-Specific Answer Generation
|
17 |
-
def openai_domain_specific_answer_generation(subject, question, model="gpt4o-mini", temperature=0.3, top_p=0.1):
|
18 |
-
system_prompt = f"""
|
19 |
-
You are a highly specialized assistant for the subject {subject}. Provide a direct and focused answer to the following question based on your specialized training.
|
20 |
-
"""
|
21 |
-
|
22 |
-
prompt = f"""
|
23 |
-
Question:
|
24 |
-
{question}
|
25 |
-
|
26 |
-
Answer (provide a precise, domain-specific response):
|
27 |
-
"""
|
28 |
-
|
29 |
-
response = client.chat.completions.create(
|
30 |
-
model=model,
|
31 |
-
messages=[
|
32 |
-
{
|
33 |
-
"role": "system",
|
34 |
-
"content": system_prompt
|
35 |
-
},
|
36 |
-
{
|
37 |
-
"role": "user",
|
38 |
-
"content": prompt
|
39 |
-
}
|
40 |
-
],
|
41 |
-
temperature=temperature, # Set low for deterministic and precise responses.
|
42 |
-
top_p=top_p, # Focus on high-probability outputs to ensure accuracy.
|
43 |
-
frequency_penalty=0.1, # Reduce repetition of technical terms.
|
44 |
-
presence_penalty=0.0 # Prevent introduction of unrelated ideas.
|
45 |
-
)
|
46 |
-
return fix_latex(response.choices[0].message.content)
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
# Step 2: Context Integration
|
51 |
-
def openai_context_integration(subject_matter, query, expert_answer, retrieved_context, model="gpt4o-mini", temperature=0.3, top_p=0.3):
|
52 |
-
system_prompt = f"""
|
53 |
-
You are an AI teaching assistant for a {subject_matter} course. Your task is to answer questions based EXCLUSIVELY on the content provided from the professor's teaching materials. Do NOT use any external knowledge or information not present in the given context.
|
54 |
-
|
55 |
-
IMPORTANT: Before proceeding, carefully analyze the provided context and the question. If the context lacks sufficient information to answer the question adequately, respond EXACTLY as follows and then STOP:
|
56 |
-
\"NOT_ENOUGH_INFO The provided context doesn't contain enough information to fully answer this question. You may want to increase the number of relevant context passages or adjust the options and try again.\"
|
57 |
-
|
58 |
-
If the context is sufficient, continue with the remaining guidelines.
|
59 |
-
|
60 |
-
Guidelines:
|
61 |
-
1. Strictly adhere to the information in the context. Do not make assumptions or use general knowledge outside of the provided materials.
|
62 |
-
|
63 |
-
2. For partial answers:
|
64 |
-
a) Provide the information you can based on the context.
|
65 |
-
b) Clearly identify which aspects of the question you cannot address due to limited context.
|
66 |
-
|
67 |
-
3. Referencing:
|
68 |
-
a) Always cite your sources by referencing the video number and the given time in brackets and **bold** (e.g., [**Video 3, time 03:14**]) after each piece of information you use in your answer.
|
69 |
-
b) You may cite multiple references if they discuss the same content (e.g., [**Video 3, time 03:14; Video 1, time 12:04**]). However, try to reference them separately if they cover different aspects of the answer.
|
70 |
-
|
71 |
-
4. Length of response:
|
72 |
-
a) Use approximately 120-200 tokens for each video referenced.
|
73 |
-
b) If referencing multiple videos that discuss the same content, you can use a combined total of 120-200 tokens for all refrences.
|
74 |
-
|
75 |
-
5. Style and Formatting:
|
76 |
-
a) Provide the answer in markdown format.
|
77 |
-
b) Do not use any titles, sections, or subsections. Use mainly paragraphs. Bold text, items, and bullet points if it helps.
|
78 |
-
c) Symbols and equations within the text MUST be placed between $ and $, e.g., $x=0$ is the min of $\sigma(x)=x^2$.
|
79 |
-
d) For equations between paragraphs, use \n\n$ and $\n\n. For example, in the following equation: \n\n$ E = mc^2 $\n\n, note $c$ as the speed of light.
|
80 |
-
|
81 |
-
6. If multiple interpretations of the question are possible based on the context, acknowledge this and provide answers for each interpretation.
|
82 |
-
|
83 |
-
7. Use technical language appropriate for a {subject_matter} course, but be prepared to explain complex terms if asked.
|
84 |
-
|
85 |
-
8. If the question involves calculations, show your work step-by-step, citing the relevant formulas or methods from the context.
|
86 |
-
"""
|
87 |
-
|
88 |
-
prompt = f"""
|
89 |
-
Question:
|
90 |
-
{query}
|
91 |
-
|
92 |
-
Direct Answer:
|
93 |
-
{expert_answer}
|
94 |
-
|
95 |
-
Retrieved Context:
|
96 |
-
{retrieved_context}
|
97 |
-
|
98 |
-
Final Answer:
|
99 |
-
|
100 |
-
"""
|
101 |
-
|
102 |
-
response = client.chat.completions.create(
|
103 |
-
model=model,
|
104 |
-
messages=[
|
105 |
-
{
|
106 |
-
"role": "system",
|
107 |
-
"content": system_prompt
|
108 |
-
},
|
109 |
-
{
|
110 |
-
"role": "user",
|
111 |
-
"content": prompt
|
112 |
-
}
|
113 |
-
],
|
114 |
-
temperature=temperature, # Maintain some flexibility for smooth blending.
|
115 |
-
top_p=top_p, # Prioritize high-probability outputs to stay focused on the inputs.
|
116 |
-
frequency_penalty=0.1, # Allow necessary repetition for clarity.
|
117 |
-
presence_penalty=0.0 # Neutral to avoid introducing unrelated ideas.
|
118 |
-
)
|
119 |
-
return fix_latex(response.choices[0].message.content)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
utils/format.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
|
3 |
+
def sec_to_time(start_time):
|
4 |
+
return f"{start_time // 60:02}:{start_time % 60:02}"
|
5 |
+
|
6 |
+
def fix_latex(text):
|
7 |
+
text = re.sub(r"\\\(", r"$",text)
|
8 |
+
text = re.sub(r"\\\)", r"$",text)
|
9 |
+
text = re.sub(r"\\\[", r"$$",text)
|
10 |
+
text = re.sub(r"\\\]", r"$$",text)
|
11 |
+
return text
|
12 |
+
|
13 |
+
def get_youtube_embed(video_id, start_time=0, autoplay=0):
|
14 |
+
embed_code = f'''
|
15 |
+
<div class="video-wrapper">
|
16 |
+
<iframe src="https://www.youtube.com/embed/{video_id}?start={start_time}&autoplay={autoplay}&rel=0"
|
17 |
+
frameborder="0" allowfullscreen></iframe>
|
18 |
+
</div>
|
19 |
+
'''
|
20 |
+
return embed_code
|
utils/help.py
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
def get_disclaimer():
|
3 |
+
disc = """:gray[AI Teaching Assistant is developed at the University of Southern California by Mostafa Faghih Shojaei, Rahul Gulati, Benjamin Jasperson, Shangshang Wang, Simone Cimolato, Dangli Cao, Willie Neiswanger, and Krishna Garikipati.]
|
4 |
+
|
5 |
+
:gray[**Main Data Sources:**] [Introduction to Finite Element Methods (FEM) by Prof. Krishna Garikipati](https://www.youtube.com/playlist?list=PLJhG_d-Sp_JHKVRhfTgDqbic_4MHpltXZ) :gray[and] [The Finite Element Method: Linear Static and Dynamic Finite Element Analysis by Thomas J. R. Hughes](https://www.google.com/books/edition/_/cHH2n_qBK0IC?hl=en).
|
6 |
+
|
7 |
+
:gray[**Disclaimer and Copyright Notice:**] :gray[1. AI-Generated Responses: Answers are generated using AI and, while thorough, may not always be 100% accurate. Please verify the information independently. 2. Content Ownership: All video content and lecture material referenced belong to their original creators. We encourage users to view the original material on verified platforms to ensure authenticity and accuracy. 3. Educational Fair Use: This tool is intended solely for educational purposes and operates under the principles of fair use. It is not authorized for commercial applications.]
|
8 |
+
|
9 |
+
:gray[For any questions, concerns, or feedback about this application, please contact the development team directly.]
|
10 |
+
"""
|
11 |
+
return disc
|
utils.py → utils/llama_utils.py
RENAMED
File without changes
|
utils/openai_utils.py
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from openai import OpenAI
|
3 |
+
|
4 |
+
#--------------------------------------------------------
|
5 |
+
# Initialize OpenAI client
|
6 |
+
#--------------------------------------------------------
|
7 |
+
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
8 |
+
|
9 |
+
def embed_question_openai(texts, model="text-embedding-3-small"):
|
10 |
+
response = client.embeddings.create(
|
11 |
+
input=texts,
|
12 |
+
model=model
|
13 |
+
)
|
14 |
+
return response.data[0].embedding
|
15 |
+
|
16 |
+
|
17 |
+
def openai_domain_specific_answer_generation(system_prompt, question, model="gpt4o-mini", temperature=0.3, top_p=0.1):
|
18 |
+
|
19 |
+
prompt = f"""
|
20 |
+
Question:
|
21 |
+
{question}
|
22 |
+
|
23 |
+
Answer (provide a precise, domain-specific response):
|
24 |
+
"""
|
25 |
+
|
26 |
+
response = client.chat.completions.create(
|
27 |
+
model=model,
|
28 |
+
messages=[
|
29 |
+
{
|
30 |
+
"role": "system",
|
31 |
+
"content": system_prompt
|
32 |
+
},
|
33 |
+
{
|
34 |
+
"role": "user",
|
35 |
+
"content": prompt
|
36 |
+
}
|
37 |
+
],
|
38 |
+
temperature=temperature, # Set low for deterministic and precise responses.
|
39 |
+
top_p=top_p, # Focus on high-probability outputs to ensure accuracy.
|
40 |
+
frequency_penalty=0.1, # Reduce repetition of technical terms.
|
41 |
+
presence_penalty=0.0 # Prevent introduction of unrelated ideas.
|
42 |
+
)
|
43 |
+
|
44 |
+
return fix_latex(response.choices[0].message.content)
|
45 |
+
|
46 |
+
def openai_context_integration(system_prompt, query, expert_answer, retrieved_context, model="gpt4o-mini", temperature=0.3, top_p=0.3):
|
47 |
+
|
48 |
+
prompt = f"""
|
49 |
+
Question:
|
50 |
+
{query}
|
51 |
+
|
52 |
+
Direct Answer:
|
53 |
+
{expert_answer}
|
54 |
+
|
55 |
+
Retrieved Context:
|
56 |
+
{retrieved_context}
|
57 |
+
|
58 |
+
Final Answer:
|
59 |
+
"""
|
60 |
+
|
61 |
+
response = client.chat.completions.create(
|
62 |
+
model=model,
|
63 |
+
messages=[
|
64 |
+
{
|
65 |
+
"role": "system",
|
66 |
+
"content": system_prompt
|
67 |
+
},
|
68 |
+
{
|
69 |
+
"role": "user",
|
70 |
+
"content": prompt
|
71 |
+
}
|
72 |
+
],
|
73 |
+
temperature=temperature, # Maintain some flexibility for smooth blending.
|
74 |
+
top_p=top_p, # Prioritize high-probability outputs to stay focused on the inputs.
|
75 |
+
frequency_penalty=0.1, # Allow necessary repetition for clarity.
|
76 |
+
presence_penalty=0.0 # Neutral to avoid introducing unrelated ideas.
|
77 |
+
)
|
78 |
+
|
79 |
+
return fix_latex(response.choices[0].message.content)
|
utils/rag_utils.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import numpy as np
|
3 |
+
import streamlit as st
|
4 |
+
|
5 |
+
@st.cache_resource
|
6 |
+
def load_youtube_data(base_path, embedding_model_name, chunk_tokens, overlap_tokens):
|
7 |
+
embedding_space_file_name = f'{base_path}/yt_embedding_space_{embedding_model_name}_tpc{chunk_tokens}_o{overlap_tokens}.json'
|
8 |
+
with open(embedding_space_file_name, 'r') as json_file:
|
9 |
+
loaded_data = json.load(json_file)
|
10 |
+
|
11 |
+
embedding_space = np.array(loaded_data['embedding_space'])
|
12 |
+
return loaded_data['chunks'], embedding_space
|
13 |
+
|
14 |
+
@st.cache_resource
|
15 |
+
def load_book_data(base_path, embedding_model_name, chunk_tokens, overlap_tokens):
|
16 |
+
embedding_space_file_name = f'{base_path}/latex_embedding_space_by_sections_{embedding_model_name}_tpc{chunk_tokens}_o{overlap_tokens}.json'
|
17 |
+
with open(embedding_space_file_name, 'r') as json_file:
|
18 |
+
loaded_data = json.load(json_file)
|
19 |
+
|
20 |
+
embedding_space = np.array(loaded_data['embedding_space'])
|
21 |
+
return loaded_data['chunks'], embedding_space
|
22 |
+
|
23 |
+
@st.cache_resource
|
24 |
+
def load_summary(file_path):
|
25 |
+
with open(file_path, 'r') as file:
|
26 |
+
transcripts = json.load(file)
|
27 |
+
return transcripts
|
28 |
+
|
29 |
+
def fixed_knn_retrieval(question_embedding, context_embeddings, top_k=5, min_k=1):
|
30 |
+
|
31 |
+
|
32 |
+
question_embedding = np.array(question_embedding)
|
33 |
+
|
34 |
+
# Normalize
|
35 |
+
question_embedding = question_embedding / np.linalg.norm(question_embedding)
|
36 |
+
context_embeddings = context_embeddings / np.linalg.norm(context_embeddings, axis=1, keepdims=True)
|
37 |
+
|
38 |
+
# Calculate cosine similarities between the question embedding and all context embeddings.
|
39 |
+
similarities = np.dot(context_embeddings, question_embedding)
|
40 |
+
# Sort the similarities in descending order and get the corresponding indices.
|
41 |
+
sorted_indices = np.argsort(similarities)[::-1]
|
42 |
+
# Select the top_k most similar contexts, ensuring at least min_k contexts are selected.
|
43 |
+
selected_indices = sorted_indices[:max(top_k, min_k)].tolist()
|
44 |
+
return selected_indices
|
45 |
+
|
utils/system_prompts.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def get_expert_system_prompt():
|
2 |
+
system_prompt = f"""
|
3 |
+
You are a highly specialized assistant for the subject Finite Element Method (FEM). Provide a direct and focused answer to the following question based on your specialized training.
|
4 |
+
"""
|
5 |
+
return system_prompt
|
6 |
+
|
7 |
+
|
8 |
+
def get_synthesis_system_prompt(subject_matter="Finite Element Method (FEM)"):
|
9 |
+
system_prompt = f"""
|
10 |
+
You are an AI teaching assistant for a {subject_matter} course. Your task is to answer questions based EXCLUSIVELY on the content provided from the professor's teaching materials. Do NOT use any external knowledge or information not present in the given context.
|
11 |
+
|
12 |
+
IMPORTANT: Before proceeding, carefully analyze the provided context and the question. If the context lacks sufficient information to answer the question adequately, respond EXACTLY as follows and then STOP:
|
13 |
+
\"NOT_ENOUGH_INFO The provided context doesn't contain enough information to fully answer this question. You may want to increase the number of relevant context passages or adjust the options and try again.\"
|
14 |
+
|
15 |
+
If the context is sufficient, continue with the remaining guidelines.
|
16 |
+
|
17 |
+
Guidelines:
|
18 |
+
1. Strictly adhere to the information in the context. Do not make assumptions or use general knowledge outside of the provided materials.
|
19 |
+
|
20 |
+
2. For partial answers:
|
21 |
+
a) Provide the information you can based on the context.
|
22 |
+
b) Clearly identify which aspects of the question you cannot address due to limited context.
|
23 |
+
|
24 |
+
3. Referencing:
|
25 |
+
a) Always cite your sources by referencing the video number and the given time in brackets and **bold** (e.g., [**Video 3, time 03:14**]) after each piece of information you use in your answer.
|
26 |
+
b) You may cite multiple references if they discuss the same content (e.g., [**Video 3, time 03:14; Video 1, time 12:04**]). However, try to reference them separately if they cover different aspects of the answer.
|
27 |
+
|
28 |
+
4. Length of response:
|
29 |
+
a) Use approximately 120-200 tokens for each video referenced.
|
30 |
+
b) If referencing multiple videos that discuss the same content, you can use a combined total of 120-200 tokens for all refrences.
|
31 |
+
|
32 |
+
5. Style and Formatting:
|
33 |
+
a) Provide the answer in markdown format.
|
34 |
+
b) Do not use any titles, sections, or subsections. Use mainly paragraphs. Bold text, items, and bullet points if it helps.
|
35 |
+
c) Symbols and equations within the text MUST be placed between $ and $, e.g., $x=0$ is the min of $\sigma(x)=x^2$.
|
36 |
+
d) For equations between paragraphs, use \n\n$ and $\n\n. For example, in the following equation: \n\n$ E = mc^2 $\n\n, note $c$ as the speed of light.
|
37 |
+
|
38 |
+
6. If multiple interpretations of the question are possible based on the context, acknowledge this and provide answers for each interpretation.
|
39 |
+
|
40 |
+
7. Use technical language appropriate for a {subject_matter} course, but be prepared to explain complex terms if asked.
|
41 |
+
|
42 |
+
8. If the question involves calculations, show your work step-by-step, citing the relevant formulas or methods from the context.
|
43 |
+
"""
|
44 |
+
return system_prompt
|