mostafa-sh commited on
Commit
5c2a5a2
·
1 Parent(s): 442d977
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ data/**/*.json filter=lfs diff=lfs merge=lfs -text
37
+ data/**/* filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,374 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import numpy as np
4
+ import streamlit as st
5
+ from sentence_transformers import SentenceTransformer
6
+ from openai import OpenAI
7
+ import random
8
+ import prompts
9
+
10
+
11
+ # client = OpenAI(api_key=st.secrets["general"]["OpenAI_API"])
12
+ client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
13
+
14
+ st.set_page_config(page_title="The AI Teaching Assistant")
15
+
16
+ def generate_YT_file_names(base_path, embedding_model_name, chunk_tokens, overlap_tokens):
17
+ embedding_space_file_name = f'{base_path}/yt_embedding_space_{embedding_model_name}_tpc{chunk_tokens}_o{overlap_tokens}.json'
18
+ with open(embedding_space_file_name, 'r') as json_file:
19
+ loaded_data = json.load(json_file)
20
+
21
+ embedding_space = np.array(loaded_data['embedding_space'])
22
+ return loaded_data['chunks'], embedding_space
23
+
24
+ def generate_Latex_file_names(base_path, embedding_model_name, chunk_tokens, overlap_tokens):
25
+ embedding_space_file_name = f'{base_path}/latex_embedding_space_by_sections_{embedding_model_name}_tpc{chunk_tokens}_o{overlap_tokens}.json'
26
+ with open(embedding_space_file_name, 'r') as json_file:
27
+ loaded_data = json.load(json_file)
28
+
29
+ embedding_space = np.array(loaded_data['embedding_space'])
30
+ return loaded_data['chunks'], embedding_space
31
+
32
+ @st.cache_resource
33
+ def load_text_data(json_file_name):
34
+ with open(json_file_name, 'r') as f:
35
+ data = json.load(f)
36
+ return data
37
+
38
+
39
+ @st.cache_resource
40
+ def load_embeddings(npy_file_name):
41
+ return np.load(npy_file_name)
42
+
43
+ @st.cache_resource
44
+ def load_model(model_name):
45
+ return SentenceTransformer(model_name)
46
+
47
+ @st.cache_resource
48
+ def load_summary(file_path):
49
+ with open(file_path, 'r') as file:
50
+ transcripts = json.load(file)
51
+ return transcripts
52
+
53
+
54
+ def embed_question_openai(texts, model="text-embedding-3-small"):
55
+ response = client.embeddings.create(
56
+ input=texts,
57
+ model=model
58
+ )
59
+ return np.array(response.data[0].embedding)
60
+
61
+ def embed_question(question, embedding_model):
62
+ if embedding_model == "text-embedding-3-small":
63
+ return embed_question_openai(question, embedding_model)
64
+ else:
65
+ return embedding_model.encode(question, convert_to_numpy=True)
66
+
67
+ def fixed_knn_retrieval(question_embedding, context_embeddings, top_k=5, min_k=1):
68
+
69
+ # Normalize
70
+ question_embedding = question_embedding / np.linalg.norm(question_embedding)
71
+ context_embeddings = context_embeddings / np.linalg.norm(context_embeddings, axis=1, keepdims=True)
72
+
73
+ # Calculate cosine similarities between the question embedding and all context embeddings.
74
+ similarities = np.dot(context_embeddings, question_embedding)
75
+ # Sort the similarities in descending order and get the corresponding indices.
76
+ sorted_indices = np.argsort(similarities)[::-1]
77
+ # Select the top_k most similar contexts, ensuring at least min_k contexts are selected.
78
+ selected_indices = sorted_indices[:max(top_k, min_k)].tolist()
79
+ return selected_indices
80
+
81
+ def sec_to_time(start_time):
82
+ return f"{start_time // 60:02}:{start_time % 60:02}"
83
+
84
+ st.markdown("""
85
+ <style>
86
+ .video-wrapper {
87
+ position: relative;
88
+ padding-bottom: 56.25%;
89
+ height: 0;
90
+ }
91
+ .video-wrapper iframe {
92
+ position: absolute;
93
+ top: 0;
94
+ left: 0;
95
+ width: 100%;
96
+ height: 100%;
97
+ }
98
+ </style>
99
+ """, unsafe_allow_html=True)
100
+
101
+ def get_youtube_embed(video_id, start_time=0, autoplay=0):
102
+ embed_code = f'''
103
+ <div class="video-wrapper">
104
+ <iframe src="https://www.youtube.com/embed/{video_id}?start={start_time}&autoplay={autoplay}&rel=0"
105
+ frameborder="0" allowfullscreen></iframe>
106
+ </div>
107
+ '''
108
+ return embed_code
109
+
110
+
111
+ disclaimer_contact =""":gray[AI Teaching Assistant is developed at the University of Southern California by Mostafa Faghih Shojaei, Rahul Gulati, Benjamin Jasperson, Shangshang Wang, Simone Cimolato, Dangli Cao, Willie Neiswanger, and Krishna Garikipati.]
112
+
113
+ :gray[**Main Data Sources:**] [Introduction to Finite Element Methods (FEM) by Prof. Krishna Garikipati](https://www.youtube.com/playlist?list=PLJhG_d-Sp_JHKVRhfTgDqbic_4MHpltXZ) :gray[and] [The Finite Element Method: Linear Static and Dynamic Finite Element Analysis by Thomas J. R. Hughes](https://www.google.com/books/edition/_/cHH2n_qBK0IC?hl=en).
114
+
115
+ :gray[**Disclaimer and Copyright Notice:**] :gray[1. AI-Generated Responses: Answers are generated using AI and, while thorough, may not always be 100% accurate. Please verify the information independently. 2. Content Ownership: All video content and lecture material referenced belong to their original creators. We encourage users to view the original material on verified platforms to ensure authenticity and accuracy. 3. Educational Fair Use: This tool is intended solely for educational purposes and operates under the principles of fair use. It is not authorized for commercial applications.]
116
+
117
+ :gray[For any questions, concerns, or feedback about this application, please contact the development team directly.]
118
+ """
119
+
120
+ # ---------------------------------------
121
+
122
+
123
+ base_path = "data/"
124
+
125
+ st.title(":red[AI Teaching Assistant]")
126
+ # st.markdown("### Finite Element Methods")
127
+ # st.markdown("### Based on Introduction to Finite Element Methods (FEM) by Prof. Krishna Garikipati")
128
+ # st.markdown("##### [YouTube playlist of the FEM lectures](https://www.youtube.com/playlist?list=PLJhG_d-Sp_JHKVRhfTgDqbic_4MHpltXZ)")
129
+
130
+ st.markdown(":gray[Welcome to] :red[AI Teaching Assistant]:gray[, developed at the] :red[University of Southern California]:gray[. This app leverages AI to provide expert answers to queries related to] :red[Finite Element Methods (FEM)]:gray[.]")
131
+
132
+ # As the content is AI-generated, we strongly recommend independently verifying the information provided.
133
+
134
+ st.markdown(" ")
135
+ st.markdown(" ")
136
+ # st.divider()
137
+ # Sidebar for settings
138
+ with st.sidebar:
139
+ st.header("Settings")
140
+ # with st.container(border=True):
141
+ # Embedding model
142
+
143
+ model_name = st.selectbox("Choose content embedding model", [
144
+ "text-embedding-3-small",
145
+ # "text-embedding-3-large",
146
+ # "all-MiniLM-L6-v2",
147
+ # "all-mpnet-base-v2"
148
+ ],
149
+ # help="""
150
+ # Select the embedding model to use for encoding the retrieved text data.
151
+ # Options include OpenAI's `text-embedding-3` models and two widely
152
+ # used SentenceTransformers models.
153
+ # """
154
+ )
155
+
156
+ with st.container(border=True):
157
+ st.write('**Video lectures**')
158
+ yt_token_choice = st.select_slider("Token per content", [256, 512, 1024], value=256, help="Larger values lead to an increase in the length of each retrieved piece of content", key="yt_token_len")
159
+ yt_chunk_tokens = yt_token_choice
160
+ yt_max_content = {128: 32, 256: 16, 512: 8, 1024: 4}[yt_chunk_tokens]
161
+ top_k_YT = st.slider("Number of relevant content pieces to retrieve", 0, yt_max_content, 4, key="yt_token_num")
162
+ yt_overlap_tokens = yt_chunk_tokens // 4
163
+
164
+ # st.divider()
165
+ with st.container(border=True):
166
+ st.write('**Textbook**')
167
+ show_textbook = False
168
+ # show_textbook = st.toggle("Show Textbook Content", value=False)
169
+ latex_token_choice = st.select_slider("Token per content", [128, 256, 512, 1024], value=256, help="Larger values lead to an increase in the length of each retrieved piece of content", key="latex_token_len")
170
+ latex_chunk_tokens = latex_token_choice
171
+ latex_max_content = {128: 32, 256: 16, 512: 8, 1024: 4}[latex_chunk_tokens]
172
+ top_k_Latex = st.slider("Number of relevant content pieces to retrieve", 0, latex_max_content, 4, key="latex_token_num")
173
+ # latex_overlap_tokens = latex_chunk_tokens // 4
174
+ latex_overlap_tokens = 0
175
+
176
+ st.write(' ')
177
+ with st.expander('Expert model',expanded=False):
178
+ # st.write('**Expert model**')
179
+ # with st.container(border=True):
180
+ # Choose the LLM model
181
+
182
+ use_expert_answer = st.toggle("Use expert answer", value=True)
183
+ show_expert_responce = st.toggle("Show initial expert answer", value=False)
184
+
185
+ model = st.selectbox("Choose the LLM model", ["gpt-4o-mini", "gpt-3.5-turbo"], key='a1model')
186
+
187
+ # Temperature
188
+ expert_temperature = st.slider("Temperature", 0.0, 0.3, .2, help="Defines the randomness in the next token prediction. Lower: More predictable and focused. Higher: More adventurous and diverse.", key='a1t')
189
+
190
+ expert_top_p = st.slider("Top P", 0.1, 0.3, 0.1, help="Defines the range of token choices the model can consider in the next prediction. Lower: More focused and restricted to high-probability options. Higher: More creative, allowing consideration of less likely options.", key='a1p')
191
+
192
+
193
+ with st.expander('Synthesis model',expanded=False):
194
+
195
+ # with st.container(border=True):
196
+ # Choose the LLM model
197
+ model = st.selectbox("Choose the LLM model", ["gpt-4o-mini", "gpt-3.5-turbo"], key='a2model')
198
+
199
+ # Temperature
200
+ integration_temperature = st.slider("Temperature", 0.0, .3, .5, help="Defines the randomness in the next token prediction. Lower: More predictable and focused. Higher: More adventurous and diverse.", key='a2t')
201
+
202
+ integration_top_p = st.slider("Top P", 0.1, 0.5, .3, help="Defines the range of token choices the model can consider in the next prediction. Lower: More focused and restricted to high-probability options. Higher: More creative, allowing consideration of less likely options.", key='a2p')
203
+
204
+
205
+
206
+ # Main content area
207
+ if "question" not in st.session_state:
208
+ st.session_state.question = ""
209
+
210
+ def get_random_question():
211
+ with open(base_path + "/questions.txt", "r") as file:
212
+ questions = [line.strip() for line in file]
213
+ return random.choice(questions)
214
+
215
+ text_area_placeholder = st.empty()
216
+ question_help = "Including details or instructions improves the answer."
217
+ st.session_state.question = text_area_placeholder.text_area(
218
+ "**Enter your question/query about Finite Element Method**",
219
+ height=120,
220
+ value=st.session_state.question,
221
+ help=question_help
222
+ )
223
+
224
+ _, col1, col2, _ = st.columns([4, 2, 4, 3])
225
+ with col1:
226
+ submit_button_placeholder = st.empty()
227
+
228
+ with col2:
229
+ if st.button("Random Question"):
230
+ while True:
231
+ random_question = get_random_question()
232
+ if random_question != st.session_state.question:
233
+ break
234
+ st.session_state.question = random_question
235
+ text_area_placeholder.text_area(
236
+ "**Enter your question:**",
237
+ height=120,
238
+ value=st.session_state.question,
239
+ help=question_help
240
+ )
241
+
242
+ # Load YouTube and LaTeX data
243
+ text_data_YT, context_embeddings_YT = generate_YT_file_names(base_path, model_name, yt_chunk_tokens, yt_overlap_tokens)
244
+ text_data_Latex, context_embeddings_Latex = generate_Latex_file_names(base_path, model_name, latex_chunk_tokens, latex_overlap_tokens)
245
+
246
+ summary = load_summary('data/KG_FEM_summary.json')
247
+
248
+ if 'question_answered' not in st.session_state:
249
+ st.session_state.question_answered = False
250
+ if 'context_by_video' not in st.session_state:
251
+ st.session_state.context_by_video = {}
252
+ if 'context_by_section' not in st.session_state:
253
+ st.session_state.context_by_section = {}
254
+ if 'answer' not in st.session_state:
255
+ st.session_state.answer = ""
256
+ if 'playing_video_id' not in st.session_state:
257
+ st.session_state.playing_video_id = None
258
+
259
+ if submit_button_placeholder.button("AI Answer", type="primary"):
260
+ if st.session_state.question != "":
261
+ with st.spinner("Finding relevant contexts..."):
262
+ question_embedding = embed_question(st.session_state.question, model_name)
263
+ initial_max_k = int(0.1 * context_embeddings_YT.shape[0])
264
+ idx_YT = fixed_knn_retrieval(question_embedding, context_embeddings_YT, top_k=top_k_YT, min_k=0)
265
+ idx_Latex = fixed_knn_retrieval(question_embedding, context_embeddings_Latex, top_k=top_k_Latex, min_k=0)
266
+
267
+ with st.spinner("Answering the question..."):
268
+ relevant_contexts_YT = sorted([text_data_YT[i] for i in idx_YT], key=lambda x: x['order'])
269
+ relevant_contexts_Latex = sorted([text_data_Latex[i] for i in idx_Latex], key=lambda x: x['order'])
270
+
271
+ st.session_state.context_by_video = {}
272
+ for context_item in relevant_contexts_YT:
273
+ video_id = context_item['video_id']
274
+ if video_id not in st.session_state.context_by_video:
275
+ st.session_state.context_by_video[video_id] = []
276
+ st.session_state.context_by_video[video_id].append(context_item)
277
+
278
+ st.session_state.context_by_section = {}
279
+ for context_item in relevant_contexts_Latex:
280
+ section_id = context_item['section']
281
+ if section_id not in st.session_state.context_by_section:
282
+ st.session_state.context_by_section[section_id] = []
283
+ st.session_state.context_by_section[section_id].append(context_item)
284
+
285
+ context = ''
286
+ for i, (video_id, contexts) in enumerate(st.session_state.context_by_video.items(), start=1):
287
+ for context_item in contexts:
288
+ start_time = int(context_item['start'])
289
+ context += f'Video {i}, time: {sec_to_time(start_time)}:' + context_item['text'] + '\n\n'
290
+
291
+ for i, (section_id, contexts) in enumerate(st.session_state.context_by_section.items(), start=1):
292
+ context += f'Section {i} ({section_id}):\n'
293
+ for context_item in contexts:
294
+ context += context_item['text'] + '\n\n'
295
+
296
+ if use_expert_answer:
297
+ st.session_state.expert_answer = prompts.openai_domain_specific_answer_generation("Finite Element Method", st.session_state.question, model=model, temperature=expert_temperature, top_p=expert_top_p)
298
+ else:
299
+ st.session_state.expert_answer = 'No Expert Answer. Only use the context.'
300
+ answer = prompts.openai_context_integration("Finite Element Method", st.session_state.question, st.session_state.expert_answer, context, model=model, temperature=integration_temperature, top_p=integration_top_p)
301
+
302
+ if answer.split()[0] == "NOT_ENOUGH_INFO":
303
+ st.markdown("")
304
+ st.markdown("#### Query:")
305
+ st.markdown(prompts.fix_latex(st.session_state.question))
306
+ if show_expert_responce:
307
+ st.markdown("#### Initial Expert Answer:")
308
+ st.markdown(st.session_state.expert_answer)
309
+ st.markdown("#### Answer:")
310
+ st.write(":smiling_face_with_tear:")
311
+ st.markdown(answer.split('NOT_ENOUGH_INFO')[1])
312
+ st.divider()
313
+ st.caption(disclaimer_contact)
314
+ # st.caption("The AI Teaching Assistant project")
315
+ st.session_state.question_answered = False
316
+ st.stop()
317
+ else:
318
+ st.session_state.answer = answer
319
+
320
+ st.session_state.question_answered = True
321
+
322
+ else:
323
+ st.markdown("")
324
+ st.write("Please enter a question. :smirk:")
325
+ st.session_state.question_answered = False
326
+
327
+ if st.session_state.question_answered:
328
+ st.markdown("")
329
+ st.markdown("#### Query:")
330
+ st.markdown(prompts.fix_latex(st.session_state.question))
331
+ if show_expert_responce:
332
+ st.markdown("#### Initial Expert Answer:")
333
+ st.markdown(st.session_state.expert_answer)
334
+ st.markdown("#### Answer:")
335
+ st.markdown(st.session_state.answer)
336
+
337
+ if top_k_YT > 0:
338
+ st.markdown("#### Retrieved content in lecture videos")
339
+ for i, (video_id, contexts) in enumerate(st.session_state.context_by_video.items(), start=1):
340
+ # with st.expander(f"**Video {i}** | {contexts[0]['title']}", expanded=True):
341
+ with st.container(border=True):
342
+ st.markdown(f"**Video {i} | {contexts[0]['title']}**")
343
+ video_placeholder = st.empty()
344
+ video_placeholder.markdown(get_youtube_embed(video_id, 0, 0), unsafe_allow_html=True)
345
+ st.markdown('')
346
+ with st.container(border=False):
347
+ st.markdown("Retrieved Times")
348
+ cols = st.columns([1 for i in range(len(contexts))] + [9 - len(contexts)])
349
+ for j, context_item in enumerate(contexts):
350
+ start_time = int(context_item['start'])
351
+ label = sec_to_time(start_time)
352
+ if cols[j].button(label, key=f"{video_id}_{start_time}"):
353
+ if st.session_state.playing_video_id is not None:
354
+ st.session_state.playing_video_id = None
355
+ video_placeholder.empty()
356
+ video_placeholder.markdown(get_youtube_embed(video_id, start_time, 1), unsafe_allow_html=True)
357
+ st.session_state.playing_video_id = video_id
358
+
359
+ with st.expander("Video Summary", expanded=False):
360
+ # st.write("##### Video Overview:")
361
+ st.markdown(summary[video_id])
362
+
363
+ if show_textbook and top_k_Latex > 0:
364
+ st.markdown("#### Retrieved content in textbook",help="The Finite Element Method: Linear Static and Dynamic Finite Element Analysis")
365
+ for i, (section_id, contexts) in enumerate(st.session_state.context_by_section.items(), start=1):
366
+ # with st.expander(f"**Section {i} | {section_id}**", expanded=True):
367
+ st.markdown(f"**Section {i} | {section_id}**")
368
+ for context_item in contexts:
369
+ st.markdown(context_item['text'])
370
+ st.divider()
371
+
372
+ st.markdown(" ")
373
+ st.divider()
374
+ st.caption(disclaimer_contact)
data/KG_FEM_summary.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9179743956afab296c02eec5b6b82f8bc49a449721c46ddd9634d7c4be4053a
3
+ size 203883
data/questions.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d23dbdf124c14dfb1c2224708711b4c892b5874f800e7e06240261f229468d61
3
+ size 456242
data/yt_embedding_space_text-embedding-3-small_tpc1024_o256.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0bb1fc517dfa8eeea1f7545bec577556e7bd170ec885cdf25eab3f5d665d2ba
3
+ size 17109772
data/yt_embedding_space_text-embedding-3-small_tpc256_o64.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f69373b4df1d9bef5a64a68d898c596fee6007c26a7ac6ff58d56f786c93d60
3
+ size 62427532
data/yt_embedding_space_text-embedding-3-small_tpc512_o128.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1cc71a26650538650196102342e9f48242eedf5ed2bbf1fb4c2299691edd6c6
3
+ size 31637503
prompts.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from openai import OpenAI
2
+ import re
3
+ import os
4
+
5
+
6
+ client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
7
+
8
+ def fix_latex(text):
9
+ text = re.sub(r"\\\(", r"$",text)
10
+ text = re.sub(r"\\\)", r"$",text)
11
+ text = re.sub(r"\\\[", r"$$",text)
12
+ text = re.sub(r"\\\]", r"$$",text)
13
+ return text
14
+
15
+
16
+ # Step 1: Domain-Specific Answer Generation
17
+ def openai_domain_specific_answer_generation(subject, question, model="gpt4o-mini", temperature=0.3, top_p=0.1):
18
+ system_prompt = f"""
19
+ You are a highly specialized assistant for the subject {subject}. Provide a direct and focused answer to the following question based on your specialized training.
20
+ """
21
+
22
+ prompt = f"""
23
+ Question:
24
+ {question}
25
+
26
+ Answer (provide a precise, domain-specific response):
27
+ """
28
+
29
+ response = client.chat.completions.create(
30
+ model=model,
31
+ messages=[
32
+ {
33
+ "role": "system",
34
+ "content": system_prompt
35
+ },
36
+ {
37
+ "role": "user",
38
+ "content": prompt
39
+ }
40
+ ],
41
+ temperature=temperature, # Set low for deterministic and precise responses.
42
+ top_p=top_p, # Focus on high-probability outputs to ensure accuracy.
43
+ frequency_penalty=0.1, # Reduce repetition of technical terms.
44
+ presence_penalty=0.0 # Prevent introduction of unrelated ideas.
45
+ )
46
+ return fix_latex(response.choices[0].message.content)
47
+
48
+
49
+
50
+ # Step 2: Context Integration
51
+ def openai_context_integration(subject_matter, query, expert_answer, retrieved_context, model="gpt4o-mini", temperature=0.3, top_p=0.3):
52
+ system_prompt = f"""
53
+ You are an AI teaching assistant for a {subject_matter} course. Your task is to answer questions based EXCLUSIVELY on the content provided from the professor's teaching materials. Do NOT use any external knowledge or information not present in the given context.
54
+
55
+ IMPORTANT: Before proceeding, carefully analyze the provided context and the question. If the context lacks sufficient information to answer the question adequately, respond EXACTLY as follows and then STOP:
56
+ \"NOT_ENOUGH_INFO The provided context doesn't contain enough information to fully answer this question. You may want to increase the number of relevant context passages or adjust the options and try again.\"
57
+
58
+ If the context is sufficient, continue with the remaining guidelines.
59
+
60
+ Guidelines:
61
+ 1. Strictly adhere to the information in the context. Do not make assumptions or use general knowledge outside of the provided materials.
62
+
63
+ 2. For partial answers:
64
+ a) Provide the information you can based on the context.
65
+ b) Clearly identify which aspects of the question you cannot address due to limited context.
66
+
67
+ 3. Referencing:
68
+ a) Always cite your sources by referencing the video number and the given time in brackets and **bold** (e.g., [**Video 3, time 03:14**]) after each piece of information you use in your answer.
69
+ b) You may cite multiple references if they discuss the same content (e.g., [**Video 3, time 03:14; Video 1, time 12:04**]). However, try to reference them separately if they cover different aspects of the answer.
70
+
71
+ 4. Length of response:
72
+ a) Use approximately 120-200 tokens for each video referenced.
73
+ b) If referencing multiple videos that discuss the same content, you can use a combined total of 120-200 tokens for all refrences.
74
+
75
+ 5. Style and Formatting:
76
+ a) Provide the answer in markdown format.
77
+ b) Do not use any titles, sections, or subsections. Use mainly paragraphs. Bold text, items, and bullet points if it helps.
78
+ c) Symbols and equations within the text MUST be placed between $ and $, e.g., $x=0$ is the min of $\sigma(x)=x^2$.
79
+ d) For equations between paragraphs, use \n\n$ and $\n\n. For example, in the following equation: \n\n$ E = mc^2 $\n\n, note $c$ as the speed of light.
80
+
81
+ 6. If multiple interpretations of the question are possible based on the context, acknowledge this and provide answers for each interpretation.
82
+
83
+ 7. Use technical language appropriate for a {subject_matter} course, but be prepared to explain complex terms if asked.
84
+
85
+ 8. If the question involves calculations, show your work step-by-step, citing the relevant formulas or methods from the context.
86
+ """
87
+
88
+ prompt = f"""
89
+ Question:
90
+ {query}
91
+
92
+ Direct Answer:
93
+ {expert_answer}
94
+
95
+ Retrieved Context:
96
+ {retrieved_context}
97
+
98
+ Final Answer:
99
+
100
+ """
101
+
102
+ response = client.chat.completions.create(
103
+ model=model,
104
+ messages=[
105
+ {
106
+ "role": "system",
107
+ "content": system_prompt
108
+ },
109
+ {
110
+ "role": "user",
111
+ "content": prompt
112
+ }
113
+ ],
114
+ temperature=temperature, # Maintain some flexibility for smooth blending.
115
+ top_p=top_p, # Prioritize high-probability outputs to stay focused on the inputs.
116
+ frequency_penalty=0.1, # Allow necessary repetition for clarity.
117
+ presence_penalty=0.0 # Neutral to avoid introducing unrelated ideas.
118
+ )
119
+ return fix_latex(response.choices[0].message.content)
requirements.txt CHANGED
@@ -1,5 +1,5 @@
1
  numpy==1.26.3
2
  openai==1.57.0
3
  sentence-transformers==2.7.0
4
- streamlit==1.40.2
5
- python-dotenv==1.0.1
 
1
  numpy==1.26.3
2
  openai==1.57.0
3
  sentence-transformers==2.7.0
4
+ streamlit==1.43.2
5
+ python-dotenv==1.0.1