mostafa-sh commited on
Commit
4a6114a
·
1 Parent(s): c1f91db
Files changed (2) hide show
  1. app.py +2 -5
  2. utils/openai_utils.py +2 -2
app.py CHANGED
@@ -6,7 +6,6 @@ import streamlit as st
6
 
7
  # from openai import OpenAI
8
  import random
9
- # import prompts
10
 
11
  from utils.help import get_disclaimer
12
  from utils.format import sec_to_time, fix_latex, get_youtube_embed
@@ -171,9 +170,6 @@ text_data_YT, context_embeddings_YT = load_youtube_data(base_path, model_name, y
171
  text_data_Latex, context_embeddings_Latex = load_book_data(base_path, model_name, latex_chunk_tokens, latex_overlap_tokens)
172
  summary = load_summary('data/KG_FEM_summary.json')
173
 
174
-
175
-
176
-
177
  if 'question_answered' not in st.session_state:
178
  st.session_state.question_answered = False
179
  if 'context_by_video' not in st.session_state:
@@ -268,6 +264,7 @@ if submit_button_placeholder.button("AI Answer", type="primary"):
268
  temperature=integration_temperature,
269
  top_p=integration_top_p
270
  )
 
271
 
272
  if answer.split()[0] == "NOT_ENOUGH_INFO":
273
  st.markdown("")
@@ -297,7 +294,7 @@ if submit_button_placeholder.button("AI Answer", type="primary"):
297
  if st.session_state.question_answered:
298
  st.markdown("")
299
  st.markdown("#### Query:")
300
- st.markdown(prompts.fix_latex(st.session_state.question))
301
  if show_expert_responce:
302
  st.markdown("#### Initial Expert Answer:")
303
  st.markdown(st.session_state.expert_answer)
 
6
 
7
  # from openai import OpenAI
8
  import random
 
9
 
10
  from utils.help import get_disclaimer
11
  from utils.format import sec_to_time, fix_latex, get_youtube_embed
 
170
  text_data_Latex, context_embeddings_Latex = load_book_data(base_path, model_name, latex_chunk_tokens, latex_overlap_tokens)
171
  summary = load_summary('data/KG_FEM_summary.json')
172
 
 
 
 
173
  if 'question_answered' not in st.session_state:
174
  st.session_state.question_answered = False
175
  if 'context_by_video' not in st.session_state:
 
264
  temperature=integration_temperature,
265
  top_p=integration_top_p
266
  )
267
+ answer = fix_latex(answer)
268
 
269
  if answer.split()[0] == "NOT_ENOUGH_INFO":
270
  st.markdown("")
 
294
  if st.session_state.question_answered:
295
  st.markdown("")
296
  st.markdown("#### Query:")
297
+ st.markdown(fix_latex(st.session_state.question))
298
  if show_expert_responce:
299
  st.markdown("#### Initial Expert Answer:")
300
  st.markdown(st.session_state.expert_answer)
utils/openai_utils.py CHANGED
@@ -41,7 +41,7 @@ def openai_domain_specific_answer_generation(system_prompt, question, model="gpt
41
  presence_penalty=0.0 # Prevent introduction of unrelated ideas.
42
  )
43
 
44
- return fix_latex(response.choices[0].message.content)
45
 
46
  def openai_context_integration(system_prompt, query, expert_answer, retrieved_context, model="gpt4o-mini", temperature=0.3, top_p=0.3):
47
 
@@ -76,4 +76,4 @@ def openai_context_integration(system_prompt, query, expert_answer, retrieved_co
76
  presence_penalty=0.0 # Neutral to avoid introducing unrelated ideas.
77
  )
78
 
79
- return fix_latex(response.choices[0].message.content)
 
41
  presence_penalty=0.0 # Prevent introduction of unrelated ideas.
42
  )
43
 
44
+ return response.choices[0].message.content
45
 
46
  def openai_context_integration(system_prompt, query, expert_answer, retrieved_context, model="gpt4o-mini", temperature=0.3, top_p=0.3):
47
 
 
76
  presence_penalty=0.0 # Neutral to avoid introducing unrelated ideas.
77
  )
78
 
79
+ return response.choices[0].message.content