Abs6187 commited on
Commit
2ac4ccc
·
verified ·
1 Parent(s): 2ca8698

Upload 5 files

Browse files
Files changed (5) hide show
  1. .gitattributes +35 -35
  2. README.md +14 -14
  3. app.py +456 -0
  4. notebook.ipynb +111 -0
  5. requirements.txt +9 -0
.gitattributes CHANGED
@@ -1,35 +1,35 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,14 +1,14 @@
1
- ---
2
- title: TechMatrix AI Web Search Agent
3
- emoji: 👀
4
- colorFrom: blue
5
- colorTo: green
6
- sdk: streamlit
7
- sdk_version: 1.43.2
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- short_description: Intelligent web search and response agent
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+ ---
2
+ title: TechMatrix AI Web Search Agent
3
+ emoji: 🔍
4
+ colorFrom: indigo
5
+ colorTo: purple
6
+ sdk: streamlit
7
+ sdk_version: 1.43.2
8
+ app_file: app.py
9
+ pinned: false
10
+ license: apache-2.0
11
+ short_description: Intelligent web search and response agent
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,456 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from llama_index.core.agent import ReActAgent
3
+ from llama_index.llms.groq import Groq
4
+ from llama_index.core.tools import FunctionTool
5
+ from llama_index.tools.tavily_research.base import TavilyToolSpec
6
+ import os
7
+ import json
8
+ import pandas as pd
9
+ from datetime import datetime
10
+ from dotenv import load_dotenv
11
+ import time
12
+ import base64
13
+ import plotly.graph_objects as go
14
+ import re
15
+
16
+ # Load environment variables
17
+ load_dotenv()
18
+
19
+ # Initialize session state if not already done
20
+ if 'conversation_history' not in st.session_state:
21
+ st.session_state.conversation_history = []
22
+ if 'api_key' not in st.session_state:
23
+ st.session_state.api_key = ""
24
+ if 'current_response' not in st.session_state:
25
+ st.session_state.current_response = None
26
+ if 'feedback_data' not in st.session_state:
27
+ st.session_state.feedback_data = []
28
+ if 'current_sources' not in st.session_state:
29
+ st.session_state.current_sources = []
30
+
31
+ # Custom CSS for better UI
32
+ st.markdown("""
33
+ <style>
34
+ .main-header {
35
+ font-size: 2.5rem;
36
+ color: #4527A0;
37
+ text-align: center;
38
+ margin-bottom: 1rem;
39
+ font-weight: bold;
40
+ }
41
+ .sub-header {
42
+ font-size: 1.5rem;
43
+ color: #5E35B1;
44
+ margin-bottom: 0.5rem;
45
+ }
46
+ .team-header {
47
+ font-size: 1.2rem;
48
+ color: #673AB7;
49
+ font-weight: bold;
50
+ margin-top: 1rem;
51
+ }
52
+ .team-member {
53
+ font-size: 1rem;
54
+ margin-left: 1rem;
55
+ color: #7E57C2;
56
+ }
57
+ .api-section {
58
+ background-color: #EDE7F6;
59
+ padding: 1rem;
60
+ border-radius: 10px;
61
+ margin-bottom: 1rem;
62
+ }
63
+ .response-container {
64
+ background-color: #F3E5F5;
65
+ padding: 1rem;
66
+ border-radius: 5px;
67
+ margin-top: 1rem;
68
+ }
69
+ .footer {
70
+ text-align: center;
71
+ margin-top: 2rem;
72
+ font-size: 0.8rem;
73
+ color: #9575CD;
74
+ }
75
+ .error-msg {
76
+ color: #D32F2F;
77
+ font-weight: bold;
78
+ }
79
+ .success-msg {
80
+ color: #388E3C;
81
+ font-weight: bold;
82
+ }
83
+ .history-item {
84
+ padding: 0.5rem;
85
+ border-radius: 5px;
86
+ margin-bottom: 0.5rem;
87
+ }
88
+ .query-text {
89
+ font-weight: bold;
90
+ color: #303F9F;
91
+ }
92
+ .response-text {
93
+ color: #1A237E;
94
+ }
95
+ .feedback-container {
96
+ background-color: #E8EAF6;
97
+ padding: 1rem;
98
+ border-radius: 5px;
99
+ margin-top: 1rem;
100
+ }
101
+ .feedback-btn {
102
+ margin-right: 0.5rem;
103
+ }
104
+ .star-rating {
105
+ display: flex;
106
+ justify-content: center;
107
+ margin-top: 0.5rem;
108
+ }
109
+ .analytics-container {
110
+ background-color: #E1F5FE;
111
+ padding: 1rem;
112
+ border-radius: 5px;
113
+ margin-top: 1rem;
114
+ }
115
+ .sources-container {
116
+ background-color: #E0F7FA;
117
+ padding: 1rem;
118
+ border-radius: 5px;
119
+ margin-top: 1rem;
120
+ }
121
+ .source-item {
122
+ background-color: #B2EBF2;
123
+ padding: 0.5rem;
124
+ border-radius: 5px;
125
+ margin-bottom: 0.5rem;
126
+ }
127
+ .source-url {
128
+ font-style: italic;
129
+ color: #0277BD;
130
+ word-break: break-all;
131
+ }
132
+ </style>
133
+ """, unsafe_allow_html=True)
134
+
135
+ # Main title and description
136
+ st.markdown('<div class="main-header">TechMatrix AI Web Search Agent</div>', unsafe_allow_html=True)
137
+ st.markdown('''
138
+ This intelligent agent uses state-of-the-art LLM technology to search the web and provide comprehensive answers to your questions.
139
+ Simply enter your query, and let our AI handle the rest!
140
+ ''')
141
+
142
+ # Sidebar for team information
143
+ with st.sidebar:
144
+ st.markdown('<div class="team-header">TechMatrix Solvers</div>', unsafe_allow_html=True)
145
+
146
+ st.markdown('<div class="team-member">👑 Abhay Gupta (Team Leader)</div>', unsafe_allow_html=True)
147
+ st.markdown('[LinkedIn Profile](https://www.linkedin.com/in/abhay-gupta-197b17264/)')
148
+
149
+ st.markdown('<div class="team-member">🧠 Mayank Das Bairagi</div>', unsafe_allow_html=True)
150
+ st.markdown('[LinkedIn Profile](https://www.linkedin.com/in/mayank-das-bairagi-18639525a/)')
151
+
152
+ st.markdown('<div class="team-member">💻 Kripanshu Gupta</div>', unsafe_allow_html=True)
153
+ st.markdown('[LinkedIn Profile](https://www.linkedin.com/in/kripanshu-gupta-a66349261/)')
154
+
155
+ st.markdown('<div class="team-member">🔍 Bhumika Patel</div>', unsafe_allow_html=True)
156
+ st.markdown('[LinkedIn Profile](https://www.linkedin.com/in/bhumika-patel-ml/)')
157
+
158
+ st.markdown('---')
159
+
160
+ # Advanced Settings
161
+ st.markdown('<div class="sub-header">Advanced Settings</div>', unsafe_allow_html=True)
162
+ model_option = st.selectbox(
163
+ 'LLM Model',
164
+ ('gemma2-9b-it', 'llama3-8b-8192', 'mixtral-8x7b-32768'),
165
+ index=0
166
+ )
167
+
168
+ search_depth = st.slider('Search Depth', min_value=1, max_value=5, value=3,
169
+ help="Higher values will search more thoroughly but take longer")
170
+
171
+ # Clear history button
172
+ if st.button('Clear Conversation History'):
173
+ st.session_state.conversation_history = []
174
+ st.success('Conversation history cleared!')
175
+
176
+ # Analytics section in sidebar
177
+ if st.session_state.feedback_data:
178
+ st.markdown('---')
179
+ st.markdown('<div class="sub-header">Response Analytics</div>', unsafe_allow_html=True)
180
+
181
+ # Calculate average rating
182
+ ratings = [item['rating'] for item in st.session_state.feedback_data if 'rating' in item]
183
+ avg_rating = sum(ratings) / len(ratings) if ratings else 0
184
+
185
+ # Create a chart
186
+ fig = go.Figure(go.Indicator(
187
+ mode="gauge+number",
188
+ value=avg_rating,
189
+ title={'text': "Average Rating"},
190
+ domain={'x': [0, 1], 'y': [0, 1]},
191
+ gauge={
192
+ 'axis': {'range': [0, 5]},
193
+ 'bar': {'color': "#6200EA"},
194
+ 'steps': [
195
+ {'range': [0, 2], 'color': "#FFD0D0"},
196
+ {'range': [2, 3.5], 'color': "#FFFFCC"},
197
+ {'range': [3.5, 5], 'color': "#D0FFD0"}
198
+ ]
199
+ }
200
+ ))
201
+
202
+ fig.update_layout(height=250, margin=dict(l=20, r=20, t=30, b=20))
203
+ st.plotly_chart(fig, use_container_width=True)
204
+
205
+ # Show feedback counts
206
+ feedback_counts = {"👍 Helpful": 0, "👎 Not Helpful": 0}
207
+ for item in st.session_state.feedback_data:
208
+ if 'feedback' in item:
209
+ if item['feedback'] == 'helpful':
210
+ feedback_counts["👍 Helpful"] += 1
211
+ elif item['feedback'] == 'not_helpful':
212
+ feedback_counts["👎 Not Helpful"] += 1
213
+
214
+ st.markdown("### Feedback Summary")
215
+ for key, value in feedback_counts.items():
216
+ st.markdown(f"**{key}:** {value}")
217
+
218
+ # API key input section
219
+ st.markdown('<div class="sub-header">API Credentials</div>', unsafe_allow_html=True)
220
+ with st.expander("Configure API Keys"):
221
+ st.markdown('<div class="api-section">', unsafe_allow_html=True)
222
+ api_key = st.text_input("Enter your Groq API key:",
223
+ type="password",
224
+ value=st.session_state.api_key,
225
+ help="Get your API key from https://console.groq.com/keys")
226
+
227
+ tavily_key = st.text_input("Enter your Tavily API key (optional):",
228
+ type="password",
229
+ help="Get your Tavily API key from https://tavily.com/#api")
230
+
231
+ if api_key:
232
+ st.session_state.api_key = api_key
233
+ os.environ['GROQ_API_KEY'] = api_key
234
+
235
+ if tavily_key:
236
+ os.environ['TAVILY_API_KEY'] = tavily_key
237
+ st.markdown('</div>', unsafe_allow_html=True)
238
+
239
+ # Function to create download link for text data
240
+ def get_download_link(text, filename, link_text):
241
+ b64 = base64.b64encode(text.encode()).decode()
242
+ href = f'<a href="data:file/txt;base64,{b64}" download="{filename}">{link_text}</a>'
243
+ return href
244
+
245
+ # Function to handle feedback submission
246
+ def submit_feedback(feedback_type, query, response):
247
+ feedback_entry = {
248
+ "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
249
+ "query": query,
250
+ "response": response,
251
+ "feedback": feedback_type
252
+ }
253
+ st.session_state.feedback_data.append(feedback_entry)
254
+ return True
255
+
256
+ # Function to submit rating
257
+ def submit_rating(rating, query, response):
258
+ # Find if there's an existing entry for this query/response
259
+ for entry in st.session_state.feedback_data:
260
+ if entry.get('query') == query and entry.get('response') == response:
261
+ entry['rating'] = rating
262
+ return True
263
+
264
+ # If not found, create a new entry
265
+ feedback_entry = {
266
+ "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
267
+ "query": query,
268
+ "response": response,
269
+ "rating": rating
270
+ }
271
+ st.session_state.feedback_data.append(feedback_entry)
272
+ return True
273
+
274
+ # Function to extract URLs from text
275
+ def extract_urls(text):
276
+ url_pattern = r'https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+'
277
+ return re.findall(url_pattern, text)
278
+
279
+ # Setup search tools
280
+ try:
281
+ if 'TAVILY_API_KEY' in os.environ and os.environ['TAVILY_API_KEY']:
282
+ search = TavilyToolSpec(api_key=os.environ['TAVILY_API_KEY'])
283
+ else:
284
+ # Fallback to a default key or inform the user
285
+ st.warning("Using default Tavily API key with limited quota. For better results, please provide your own key.")
286
+ search = TavilyToolSpec(api_key=os.getenv('TAVILY_API_KEY'))
287
+
288
+ def search_tool(prompt: str) -> list:
289
+ """Search the web for information about the given prompt."""
290
+ try:
291
+ search_results = search.search(prompt, max_results=search_depth)
292
+ # Store source URLs
293
+ sources = []
294
+ for result in search_results:
295
+ if hasattr(result, 'url') and result.url:
296
+ sources.append({
297
+ 'title': result.title if hasattr(result, 'title') else "Unknown Source",
298
+ 'url': result.url
299
+ })
300
+
301
+ # Store in session state for later display
302
+ st.session_state.current_sources = sources
303
+
304
+ return [result.text for result in search_results]
305
+ except Exception as e:
306
+ return [f"Error during search: {str(e)}"]
307
+
308
+ search_toolkit = FunctionTool.from_defaults(fn=search_tool)
309
+ except Exception as e:
310
+ st.error(f"Error setting up search tools: {str(e)}")
311
+ search_toolkit = None
312
+
313
+ # Query input
314
+ query = st.text_input("What would you like to know?",
315
+ placeholder="Enter your question here...",
316
+ help="Ask any question, and our AI will search the web for answers")
317
+
318
+ # Search button
319
+ search_button = st.button("🔍 Search")
320
+
321
+ # Process the search when button is clicked
322
+ if search_button and query:
323
+ # Check if API key is provided
324
+ if not st.session_state.api_key:
325
+ st.error("Please enter your Groq API key first!")
326
+ else:
327
+ try:
328
+ with st.spinner("🧠 Searching the web and analyzing results..."):
329
+ # Initialize the LLM and agent
330
+ llm = Groq(model=model_option)
331
+ agent = ReActAgent.from_tools([search_toolkit], llm=llm, verbose=True)
332
+
333
+ # Clear current sources before the new search
334
+ st.session_state.current_sources = []
335
+
336
+ # Get the response
337
+ start_time = time.time()
338
+ response = agent.chat(query)
339
+ end_time = time.time()
340
+
341
+ # Extract any additional URLs from the response
342
+ additional_urls = extract_urls(response.response)
343
+ for url in additional_urls:
344
+ if not any(source['url'] == url for source in st.session_state.current_sources):
345
+ st.session_state.current_sources.append({
346
+ 'title': "Referenced Source",
347
+ 'url': url
348
+ })
349
+
350
+ # Store the response in session state
351
+ st.session_state.current_response = {
352
+ "query": query,
353
+ "response": response.response,
354
+ "time": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
355
+ "duration": round(end_time - start_time, 2),
356
+ "sources": st.session_state.current_sources
357
+ }
358
+
359
+ # Add to conversation history
360
+ st.session_state.conversation_history.append(st.session_state.current_response)
361
+
362
+ # Display success message
363
+ st.success(f"Found results in {round(end_time - start_time, 2)} seconds!")
364
+ except Exception as e:
365
+ st.error(f"An error occurred: {str(e)}")
366
+
367
+ # Display current response if available
368
+ if st.session_state.current_response:
369
+ with st.container():
370
+ st.markdown('<div class="response-container">', unsafe_allow_html=True)
371
+ st.markdown("### Response:")
372
+ st.write(st.session_state.current_response["response"])
373
+
374
+ # Export options
375
+ col1, col2 = st.columns(2)
376
+ with col1:
377
+ st.markdown(
378
+ get_download_link(
379
+ st.session_state.current_response["response"],
380
+ f"search_result_{datetime.now().strftime('%Y%m%d_%H%M%S')}.txt",
381
+ "Download as Text"
382
+ ),
383
+ unsafe_allow_html=True
384
+ )
385
+ with col2:
386
+ # Create JSON with metadata
387
+ json_data = json.dumps({
388
+ "query": st.session_state.current_response["query"],
389
+ "response": st.session_state.current_response["response"],
390
+ "timestamp": st.session_state.current_response["time"],
391
+ "processing_time": st.session_state.current_response["duration"],
392
+ "sources": st.session_state.current_sources if "sources" in st.session_state.current_response else []
393
+ }, indent=4)
394
+
395
+ st.markdown(
396
+ get_download_link(
397
+ json_data,
398
+ f"search_result_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json",
399
+ "Download as JSON"
400
+ ),
401
+ unsafe_allow_html=True
402
+ )
403
+ st.markdown('</div>', unsafe_allow_html=True)
404
+
405
+ # Display sources if available
406
+ if "sources" in st.session_state.current_response and st.session_state.current_response["sources"]:
407
+ with st.expander("View Sources", expanded=True):
408
+ st.markdown('<div class="sources-container">', unsafe_allow_html=True)
409
+ for i, source in enumerate(st.session_state.current_response["sources"]):
410
+ st.markdown(f'<div class="source-item">', unsafe_allow_html=True)
411
+ st.markdown(f"**Source {i+1}:** {source.get('title', 'Unknown Source')}")
412
+ st.markdown(f'<div class="source-url"><a href="{source["url"]}" target="_blank">{source["url"]}</a></div>', unsafe_allow_html=True)
413
+ st.markdown('</div>', unsafe_allow_html=True)
414
+ st.markdown('</div>', unsafe_allow_html=True)
415
+
416
+ # Feedback section
417
+ st.markdown('<div class="feedback-container">', unsafe_allow_html=True)
418
+ st.markdown("### Was this response helpful?")
419
+
420
+ col1, col2 = st.columns(2)
421
+ with col1:
422
+ if st.button("👍 Helpful", key="helpful_btn"):
423
+ if submit_feedback("helpful", st.session_state.current_response["query"], st.session_state.current_response["response"]):
424
+ st.success("Thank you for your feedback!")
425
+ with col2:
426
+ if st.button("👎 Not Helpful", key="not_helpful_btn"):
427
+ if submit_feedback("not_helpful", st.session_state.current_response["query"], st.session_state.current_response["response"]):
428
+ st.success("Thank you for your feedback! We'll work to improve our responses.")
429
+
430
+ st.markdown("### Rate this response:")
431
+ rating = st.slider("", min_value=1, max_value=5, value=4,
432
+ help="Rate the quality of this response from 1 (poor) to 5 (excellent)")
433
+
434
+ if st.button("Submit Rating"):
435
+ if submit_rating(rating, st.session_state.current_response["query"], st.session_state.current_response["response"]):
436
+ st.success("Rating submitted! Thank you for helping us improve.")
437
+
438
+ st.markdown('</div>', unsafe_allow_html=True)
439
+
440
+ # Display conversation history
441
+ if st.session_state.conversation_history:
442
+ with st.expander("View Conversation History"):
443
+ for i, item in enumerate(reversed(st.session_state.conversation_history)):
444
+ st.markdown(f'<div class="history-item">', unsafe_allow_html=True)
445
+ st.markdown(f'<span class="query-text">Q: {item["query"]}</span> <small>({item["time"]})</small>', unsafe_allow_html=True)
446
+ st.markdown(f'<div class="response-text">A: {item["response"][:200]}{"..." if len(item["response"]) > 200 else ""}</div>', unsafe_allow_html=True)
447
+ st.markdown('</div>', unsafe_allow_html=True)
448
+ if i < len(st.session_state.conversation_history) - 1:
449
+ st.markdown('---')
450
+
451
+ # Footer with attribution
452
+ st.markdown('''
453
+ <div class="footer">
454
+ <p>Powered by Groq + Llama-Index + Tavily Search | Created by TechMatrix Solvers | 2024</p>
455
+ </div>
456
+ ''', unsafe_allow_html=True)
notebook.ipynb ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {},
7
+ "outputs": [
8
+ {
9
+ "name": "stderr",
10
+ "output_type": "stream",
11
+ "text": [
12
+ "/Users/soumyadip/Library/Python/3.9/lib/python/site-packages/urllib3/__init__.py:35: NotOpenSSLWarning: urllib3 v2 only supports OpenSSL 1.1.1+, currently the 'ssl' module is compiled with 'LibreSSL 2.8.3'. See: https://github.com/urllib3/urllib3/issues/3020\n",
13
+ " warnings.warn(\n"
14
+ ]
15
+ },
16
+ {
17
+ "data": {
18
+ "text/plain": [
19
+ "True"
20
+ ]
21
+ },
22
+ "execution_count": 2,
23
+ "metadata": {},
24
+ "output_type": "execute_result"
25
+ }
26
+ ],
27
+ "source": [
28
+ "from llama_index.core.agent import ReActAgent\n",
29
+ "from llama_index.llms.groq import Groq\n",
30
+ "from llama_index.core.llms import ChatMessage\n",
31
+ "from llama_index.core.tools import BaseTool, FunctionTool\n",
32
+ "from llama_index.tools.tavily_research.base import TavilyToolSpec\n",
33
+ "import os\n",
34
+ "from dotenv import load_dotenv\n",
35
+ "load_dotenv()"
36
+ ]
37
+ },
38
+ {
39
+ "cell_type": "code",
40
+ "execution_count": 3,
41
+ "metadata": {},
42
+ "outputs": [],
43
+ "source": [
44
+ "search = TavilyToolSpec(api_key=os.getenv('TAVILY_API_KEY'))\n",
45
+ "def search_tool(prompt:str)->list:\n",
46
+ " \"return only search result from the web\"\n",
47
+ " results = search.search(prompt)\n",
48
+ " return [result.text for result in results]\n",
49
+ "search_toolkit = FunctionTool.from_defaults(fn=search_tool)"
50
+ ]
51
+ },
52
+ {
53
+ "cell_type": "code",
54
+ "execution_count": 4,
55
+ "metadata": {},
56
+ "outputs": [],
57
+ "source": [
58
+ "llm = Groq(model = \"gemma2-9b-it\")\n",
59
+ "agent = ReActAgent.from_tools([search_toolkit],llm=llm,verbose=True)"
60
+ ]
61
+ },
62
+ {
63
+ "cell_type": "code",
64
+ "execution_count": null,
65
+ "metadata": {},
66
+ "outputs": [
67
+ {
68
+ "name": "stdout",
69
+ "output_type": "stream",
70
+ "text": [
71
+ "> Running step 72522799-577e-4630-afd1-3f7637988a23. Step input: what is the addition of 25 and 26?\n",
72
+ "\u001b[1;3;38;5;200mThought: I can answer without using any more tools. I'll use the user's language to answer\n",
73
+ "Answer: 51\n",
74
+ "\u001b[0m"
75
+ ]
76
+ }
77
+ ],
78
+ "source": [
79
+ "response = agent.chat(\"what is the instagram link of Soumyadip Changder?\")"
80
+ ]
81
+ },
82
+ {
83
+ "cell_type": "code",
84
+ "execution_count": null,
85
+ "metadata": {},
86
+ "outputs": [],
87
+ "source": []
88
+ }
89
+ ],
90
+ "metadata": {
91
+ "kernelspec": {
92
+ "display_name": "Python 3",
93
+ "language": "python",
94
+ "name": "python3"
95
+ },
96
+ "language_info": {
97
+ "codemirror_mode": {
98
+ "name": "ipython",
99
+ "version": 3
100
+ },
101
+ "file_extension": ".py",
102
+ "mimetype": "text/x-python",
103
+ "name": "python",
104
+ "nbconvert_exporter": "python",
105
+ "pygments_lexer": "ipython3",
106
+ "version": "3.9.6"
107
+ }
108
+ },
109
+ "nbformat": 4,
110
+ "nbformat_minor": 2
111
+ }
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ streamlit
2
+ llama-index
3
+ llama-index-llms-groq
4
+ python-dotenv
5
+ llama_index_tools_tavily_research
6
+ pandas
7
+ pybase64
8
+ requests
9
+ plotly