Sambhavnoobcoder commited on
Commit
263fd54
·
verified ·
1 Parent(s): 6ff00a5

Claude updates stuff

Browse files
Files changed (1) hide show
  1. app.py +15 -65
app.py CHANGED
@@ -63,22 +63,23 @@ def initialize_faiss_index(embeddings):
63
  # Handle natural language queries
64
  conversation_history = []
65
 
66
- def handle_query(query, faiss_index, embeddings_texts, model):
67
- global conversation_history
 
 
 
 
 
68
 
 
69
  query_embedding = model.encode([query]).astype('float32')
70
-
71
- # Search FAISS index
72
- _, indices = faiss_index.search(query_embedding, 3) # Retrieve top 3 results
73
  relevant_texts = [embeddings_texts[idx] for idx in indices[0]]
74
-
75
- # Combine relevant texts and truncate if necessary
76
  combined_text = "\n".join([text for text, _ in relevant_texts])
77
- max_length = 500 # Adjust as necessary
78
  if len(combined_text) > max_length:
79
  combined_text = combined_text[:max_length] + "..."
80
 
81
- # Generate a response using Gemini
82
  try:
83
  response = genai.generate_text(
84
  model="models/text-bison-001",
@@ -87,71 +88,20 @@ def handle_query(query, faiss_index, embeddings_texts, model):
87
  )
88
  generated_text = response.result if response else "No response generated."
89
  except Exception as e:
90
- print(f"Error generating text: {e}")
91
- generated_text = "An error occurred while generating the response."
92
-
93
- # Update conversation history
94
- conversation_history.append(f"User: {query}")
95
- conversation_history.append(f"System: {generated_text}")
96
 
97
- # Extract sources
98
  sources = [url for _, url in relevant_texts]
99
-
100
  return generated_text, sources
101
 
102
- def generate_concise_response(prompt, context):
103
- try:
104
- response = genai.generate_text(
105
- model="models/text-bison-001",
106
- prompt=f"{prompt}\n\nContext: {context}\n\nAnswer:",
107
- max_output_tokens=200
108
- )
109
- return response.result if response else "No response generated."
110
- except Exception as e:
111
- print(f"Error generating concise response: {e}")
112
- return "An error occurred while generating the concise response."
113
-
114
- # Main function to execute the pipeline
115
  def chatbot(message, history):
116
- global conversation_history
117
-
118
- lecture_notes = fetch_lecture_notes()
119
- model_architectures = fetch_model_architectures()
120
-
121
- all_texts = lecture_notes + [model_architectures]
122
-
123
- # Load the SentenceTransformers model
124
- embedding_model = SentenceTransformer('paraphrase-MiniLM-L6-v2')
125
-
126
- embeddings = create_embeddings(all_texts, embedding_model)
127
-
128
- # Initialize FAISS index
129
- faiss_index = initialize_faiss_index(np.array(embeddings))
130
-
131
  response, sources = handle_query(message, faiss_index, all_texts, embedding_model)
132
- print("Query:", message)
133
- print("Response:", response)
134
  total_text = response if response else "No response generated."
135
  if sources:
136
- print("Sources:", sources)
137
- relevant_source = ""
138
- for source in sources:
139
- relevant_source += source + "\n"
140
- total_text += "\n\nSources:\n" + relevant_source
141
- else:
142
- print("Sources: None of the provided sources were used.")
143
- print("----")
144
-
145
- # Generate a concise and relevant summary using Gemini
146
- prompt = "Summarize the user queries so far"
147
- user_queries_summary = " ".join([message])
148
- concise_response = generate_concise_response(prompt, user_queries_summary)
149
- print("Concise Response:")
150
- print(concise_response)
151
 
152
- # Update conversation history with the new user message and system response
153
  history.append((message, total_text))
154
-
155
  return history
156
 
157
  iface = gr.ChatInterface(
@@ -163,11 +113,11 @@ iface = gr.ChatInterface(
163
  "Explain the transformer architecture.",
164
  "Tell me about datasets used to train LLMs.",
165
  "How are LLM training datasets cleaned and preprocessed?",
166
- "Summarize the user queries so far"
167
  ],
168
  retry_btn="Regenerate",
169
  undo_btn="Undo",
170
  clear_btn="Clear",
 
171
  )
172
 
173
  if __name__ == "__main__":
 
63
  # Handle natural language queries
64
  conversation_history = []
65
 
66
+ # Global variables
67
+ lecture_notes = fetch_lecture_notes()
68
+ model_architectures = fetch_model_architectures()
69
+ all_texts = lecture_notes + [model_architectures]
70
+ embedding_model = SentenceTransformer('paraphrase-MiniLM-L6-v2')
71
+ embeddings = create_embeddings(all_texts, embedding_model)
72
+ faiss_index = initialize_faiss_index(np.array(embeddings))
73
 
74
+ def handle_query(query, faiss_index, embeddings_texts, model):
75
  query_embedding = model.encode([query]).astype('float32')
76
+ _, indices = faiss_index.search(query_embedding, 3)
 
 
77
  relevant_texts = [embeddings_texts[idx] for idx in indices[0]]
 
 
78
  combined_text = "\n".join([text for text, _ in relevant_texts])
79
+ max_length = 500
80
  if len(combined_text) > max_length:
81
  combined_text = combined_text[:max_length] + "..."
82
 
 
83
  try:
84
  response = genai.generate_text(
85
  model="models/text-bison-001",
 
88
  )
89
  generated_text = response.result if response else "No response generated."
90
  except Exception as e:
91
+ generated_text = f"An error occurred while generating the response: {str(e)}"
 
 
 
 
 
92
 
 
93
  sources = [url for _, url in relevant_texts]
 
94
  return generated_text, sources
95
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
  def chatbot(message, history):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
  response, sources = handle_query(message, faiss_index, all_texts, embedding_model)
98
+
 
99
  total_text = response if response else "No response generated."
100
  if sources:
101
+ relevant_source = "\n".join(sources)
102
+ total_text += f"\n\nSources:\n{relevant_source}"
 
 
 
 
 
 
 
 
 
 
 
 
 
103
 
 
104
  history.append((message, total_text))
 
105
  return history
106
 
107
  iface = gr.ChatInterface(
 
113
  "Explain the transformer architecture.",
114
  "Tell me about datasets used to train LLMs.",
115
  "How are LLM training datasets cleaned and preprocessed?",
 
116
  ],
117
  retry_btn="Regenerate",
118
  undo_btn="Undo",
119
  clear_btn="Clear",
120
+ cache_examples=False, # Disable example caching to avoid file-related errors
121
  )
122
 
123
  if __name__ == "__main__":