Asman2010 commited on
Commit
f5ed9bf
·
verified ·
1 Parent(s): d207966

Upload 65 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .github/ISSUE_TEMPLATE/bug_report.md +27 -0
  2. .github/ISSUE_TEMPLATE/custom.md +7 -0
  3. .github/ISSUE_TEMPLATE/feature_request.md +19 -0
  4. docs/architecture/README.md +11 -0
  5. docs/architecture/WORKING.md +19 -0
  6. src/Perplexica - Shortcut.lnk +0 -0
  7. src/agents/academicSearchAgent.ts +265 -0
  8. src/agents/imageSearchAgent.ts +84 -0
  9. src/agents/redditSearchAgent.ts +260 -0
  10. src/agents/videoSearchAgent.ts +90 -0
  11. src/agents/webSearchAgent.ts +261 -0
  12. src/agents/wolframAlphaSearchAgent.ts +219 -0
  13. src/agents/writingAssistant.ts +90 -0
  14. src/agents/youtubeSearchAgent.ts +261 -0
  15. src/app.ts +30 -0
  16. src/config.ts +69 -0
  17. src/lib/providers.ts +157 -0
  18. src/lib/searxng.ts +47 -0
  19. src/routes/config.ts +63 -0
  20. src/routes/images.ts +46 -0
  21. src/routes/index.ts +14 -0
  22. src/routes/models.ts +24 -0
  23. src/routes/videos.ts +46 -0
  24. src/utils/computeSimilarity.ts +17 -0
  25. src/utils/formatHistory.ts +9 -0
  26. src/utils/logger.ts +22 -0
  27. src/websocket/connectionManager.ts +86 -0
  28. src/websocket/index.ts +8 -0
  29. src/websocket/messageHandler.ts +109 -0
  30. src/websocket/websocketServer.ts +16 -0
  31. ui/.env.example +2 -0
  32. ui/.eslintrc.json +3 -0
  33. ui/.gitignore +34 -0
  34. ui/.prettierrc.js +11 -0
  35. ui/app/discover/page.tsx +5 -0
  36. ui/app/favicon.ico +0 -0
  37. ui/app/globals.css +13 -0
  38. ui/app/layout.tsx +42 -0
  39. ui/app/page.tsx +17 -0
  40. ui/components/Chat.tsx +87 -0
  41. ui/components/ChatWindow.tsx +293 -0
  42. ui/components/EmptyChat.tsx +26 -0
  43. ui/components/EmptyChatMessageInput.tsx +65 -0
  44. ui/components/Layout.tsx +9 -0
  45. ui/components/MessageActions/Copy.tsx +29 -0
  46. ui/components/MessageActions/Rewrite.tsx +20 -0
  47. ui/components/MessageBox.tsx +145 -0
  48. ui/components/MessageBoxLoading.tsx +11 -0
  49. ui/components/MessageInput.tsx +92 -0
  50. ui/components/MessageInputActions.tsx +180 -0
.github/ISSUE_TEMPLATE/bug_report.md ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ name: Bug report
3
+ about: Create an issue to help us fix bugs
4
+ title: ''
5
+ labels: bug
6
+ assignees: ''
7
+ ---
8
+
9
+ **Describe the bug**
10
+ A clear and concise description of what the bug is.
11
+
12
+ **To Reproduce**
13
+ Steps to reproduce the behavior:
14
+
15
+ 1. Go to '...'
16
+ 2. Click on '....'
17
+ 3. Scroll down to '....'
18
+ 4. See error
19
+
20
+ **Expected behavior**
21
+ A clear and concise description of what you expected to happen.
22
+
23
+ **Screenshots**
24
+ If applicable, add screenshots to help explain your problem.
25
+
26
+ **Additional context**
27
+ Add any other context about the problem here.
.github/ISSUE_TEMPLATE/custom.md ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ---
2
+ name: Custom issue template
3
+ about: Describe this issue template's purpose here.
4
+ title: ''
5
+ labels: ''
6
+ assignees: ''
7
+ ---
.github/ISSUE_TEMPLATE/feature_request.md ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ name: Feature request
3
+ about: Suggest an idea for this project
4
+ title: ''
5
+ labels: enhancement
6
+ assignees: ''
7
+ ---
8
+
9
+ **Is your feature request related to a problem? Please describe.**
10
+ A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
11
+
12
+ **Describe the solution you'd like**
13
+ A clear and concise description of what you want to happen.
14
+
15
+ **Describe alternatives you've considered**
16
+ A clear and concise description of any alternative solutions or features you've considered.
17
+
18
+ **Additional context**
19
+ Add any other context or screenshots about the feature request here.
docs/architecture/README.md ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Perplexica's Architecture
2
+
3
+ Perplexica's architecture consists of the following key components:
4
+
5
+ 1. **User Interface**: A web-based interface that allows users to interact with Perplexica for searching images, videos, and much more.
6
+ 2. **Agent/Chains**: These components predict Perplexica's next actions, understand user queries, and decide whether a web search is necessary.
7
+ 3. **SearXNG**: A metadata search engine used by Perplexica to search the web for sources.
8
+ 4. **LLMs (Large Language Models)**: Utilized by agents and chains for tasks like understanding content, writing responses, and citing sources. Examples include Claude, GPTs, etc.
9
+ 5. **Embedding Models**: To improve the accuracy of search results, embedding models re-rank the results using similarity search algorithms such as cosine similarity and dot product distance.
10
+
11
+ For a more detailed explanation of how these components work together, see [WORKING.md](https://github.com/ItzCrazyKns/Perplexica/tree/master/docs/architecture/WORKING.md).
docs/architecture/WORKING.md ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## How does Perplexica work?
2
+
3
+ Curious about how Perplexica works? Don't worry, we'll cover it here. Before we begin, make sure you've read about the architecture of Perplexica to ensure you understand what it's made up of. Haven't read it? You can read it [here](https://github.com/ItzCrazyKns/Perplexica/tree/master/docs/architecture/README.md).
4
+
5
+ We'll understand how Perplexica works by taking an example of a scenario where a user asks: "How does an A.C. work?". We'll break down the process into steps to make it easier to understand. The steps are as follows:
6
+
7
+ 1. The message is sent via WS to the backend server where it invokes the chain. The chain will depend on your focus mode. For this example, let's assume we use the "webSearch" focus mode.
8
+ 2. The chain is now invoked; first, the message is passed to another chain where it first predicts (using the chat history and the question) whether there is a need for sources or searching the web. If there is, it will generate a query (in accordance with the chat history) for searching the web that we'll take up later. If not, the chain will end there, and then the answer generator chain, also known as the response generator, will be started.
9
+ 3. The query returned by the first chain is passed to SearXNG to search the web for information.
10
+ 4. After the information is retrieved, it is based on keyword-based search. We then convert the information into embeddings and the query as well, then we perform a similarity search to find the most relevant sources to answer the query.
11
+ 5. After all this is done, the sources are passed to the response generator. This chain takes all the chat history, the query, and the sources. It generates a response that is streamed to the UI.
12
+
13
+ ### How are the answers cited?
14
+
15
+ The LLMs are prompted to do so. We've prompted them so well that they cite the answers themselves, and using some UI magic, we display it to the user.
16
+
17
+ ### Image and Video Search
18
+
19
+ Image and video searches are conducted in a similar manner. A query is always generated first, then we search the web for images and videos that match the query. These results are then returned to the user.
src/Perplexica - Shortcut.lnk ADDED
Binary file (868 Bytes). View file
 
src/agents/academicSearchAgent.ts ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { BaseMessage } from '@langchain/core/messages';
2
+ import {
3
+ PromptTemplate,
4
+ ChatPromptTemplate,
5
+ MessagesPlaceholder,
6
+ } from '@langchain/core/prompts';
7
+ import {
8
+ RunnableSequence,
9
+ RunnableMap,
10
+ RunnableLambda,
11
+ } from '@langchain/core/runnables';
12
+ import { StringOutputParser } from '@langchain/core/output_parsers';
13
+ import { Document } from '@langchain/core/documents';
14
+ import { searchSearxng } from '../lib/searxng';
15
+ import type { StreamEvent } from '@langchain/core/tracers/log_stream';
16
+ import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
17
+ import type { Embeddings } from '@langchain/core/embeddings';
18
+ import formatChatHistoryAsString from '../utils/formatHistory';
19
+ import eventEmitter from 'events';
20
+ import computeSimilarity from '../utils/computeSimilarity';
21
+ import logger from '../utils/logger';
22
+
23
+ const basicAcademicSearchRetrieverPrompt = `
24
+ You will be given a conversation below and a follow up question. You need to rephrase the follow-up question if needed so it is a standalone question that can be used by the LLM to search the web for information.
25
+ If it is a writing task or a simple hi, hello rather than a question, you need to return \`not_needed\` as the response.
26
+
27
+ Example:
28
+ 1. Follow up question: How does stable diffusion work?
29
+ Rephrased: Stable diffusion working
30
+
31
+ 2. Follow up question: What is linear algebra?
32
+ Rephrased: Linear algebra
33
+
34
+ 3. Follow up question: What is the third law of thermodynamics?
35
+ Rephrased: Third law of thermodynamics
36
+
37
+ Conversation:
38
+ {chat_history}
39
+
40
+ Follow up question: {query}
41
+ Rephrased question:
42
+ `;
43
+
44
+ const basicAcademicSearchResponsePrompt = `
45
+ You are Perplexica, an AI model who is expert at searching the web and answering user's queries. You are set on focus mode 'Academic', this means you will be searching for academic papers and articles on the web.
46
+
47
+ Generate a response that is informative and relevant to the user's query based on provided context (the context consits of search results containg a brief description of the content of that page).
48
+ You must use this context to answer the user's query in the best way possible. Use an unbaised and journalistic tone in your response. Do not repeat the text.
49
+ You must not tell the user to open any link or visit any website to get the answer. You must provide the answer in the response itself. If the user asks for links you can provide them.
50
+ Your responses should be medium to long in length be informative and relevant to the user's query. You can use markdowns to format your response. You should use bullet points to list the information. Make sure the answer is not short and is informative.
51
+ You have to cite the answer using [number] notation. You must cite the sentences with their relevent context number. You must cite each and every part of the answer so the user can know where the information is coming from.
52
+ Place these citations at the end of that particular sentence. You can cite the same sentence multiple times if it is relevant to the user's query like [number1][number2].
53
+ However you do not need to cite it using the same number. You can use different numbers to cite the same sentence multiple times. The number refers to the number of the search result (passed in the context) used to generate that part of the answer.
54
+
55
+ Aything inside the following \`context\` HTML block provided below is for your knowledge returned by the search engine and is not shared by the user. You have to answer question on the basis of it and cite the relevant information from it but you do not have to
56
+ talk about the context in your response.
57
+
58
+ <context>
59
+ {context}
60
+ </context>
61
+
62
+ If you think there's nothing relevant in the search results, you can say that 'Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?'.
63
+ Anything between the \`context\` is retrieved from a search engine and is not a part of the conversation with the user. Today's date is ${new Date().toISOString()}
64
+ `;
65
+
66
+ const strParser = new StringOutputParser();
67
+
68
+ const handleStream = async (
69
+ stream: AsyncGenerator<StreamEvent, any, unknown>,
70
+ emitter: eventEmitter,
71
+ ) => {
72
+ for await (const event of stream) {
73
+ if (
74
+ event.event === 'on_chain_end' &&
75
+ event.name === 'FinalSourceRetriever'
76
+ ) {
77
+ emitter.emit(
78
+ 'data',
79
+ JSON.stringify({ type: 'sources', data: event.data.output }),
80
+ );
81
+ }
82
+ if (
83
+ event.event === 'on_chain_stream' &&
84
+ event.name === 'FinalResponseGenerator'
85
+ ) {
86
+ emitter.emit(
87
+ 'data',
88
+ JSON.stringify({ type: 'response', data: event.data.chunk }),
89
+ );
90
+ }
91
+ if (
92
+ event.event === 'on_chain_end' &&
93
+ event.name === 'FinalResponseGenerator'
94
+ ) {
95
+ emitter.emit('end');
96
+ }
97
+ }
98
+ };
99
+
100
+ type BasicChainInput = {
101
+ chat_history: BaseMessage[];
102
+ query: string;
103
+ };
104
+
105
+ const createBasicAcademicSearchRetrieverChain = (llm: BaseChatModel) => {
106
+ return RunnableSequence.from([
107
+ PromptTemplate.fromTemplate(basicAcademicSearchRetrieverPrompt),
108
+ llm,
109
+ strParser,
110
+ RunnableLambda.from(async (input: string) => {
111
+ if (input === 'not_needed') {
112
+ return { query: '', docs: [] };
113
+ }
114
+
115
+ const res = await searchSearxng(input, {
116
+ language: 'en',
117
+ engines: [
118
+ 'arxiv',
119
+ 'google scholar',
120
+ 'internetarchivescholar',
121
+ 'pubmed',
122
+ ],
123
+ });
124
+
125
+ const documents = res.results.map(
126
+ (result) =>
127
+ new Document({
128
+ pageContent: result.content,
129
+ metadata: {
130
+ title: result.title,
131
+ url: result.url,
132
+ ...(result.img_src && { img_src: result.img_src }),
133
+ },
134
+ }),
135
+ );
136
+
137
+ return { query: input, docs: documents };
138
+ }),
139
+ ]);
140
+ };
141
+
142
+ const createBasicAcademicSearchAnsweringChain = (
143
+ llm: BaseChatModel,
144
+ embeddings: Embeddings,
145
+ ) => {
146
+ const basicAcademicSearchRetrieverChain =
147
+ createBasicAcademicSearchRetrieverChain(llm);
148
+
149
+ const processDocs = async (docs: Document[]) => {
150
+ return docs
151
+ .map((_, index) => `${index + 1}. ${docs[index].pageContent}`)
152
+ .join('\n');
153
+ };
154
+
155
+ const rerankDocs = async ({
156
+ query,
157
+ docs,
158
+ }: {
159
+ query: string;
160
+ docs: Document[];
161
+ }) => {
162
+ if (docs.length === 0) {
163
+ return docs;
164
+ }
165
+
166
+ const docsWithContent = docs.filter(
167
+ (doc) => doc.pageContent && doc.pageContent.length > 0,
168
+ );
169
+
170
+ const [docEmbeddings, queryEmbedding] = await Promise.all([
171
+ embeddings.embedDocuments(docsWithContent.map((doc) => doc.pageContent)),
172
+ embeddings.embedQuery(query),
173
+ ]);
174
+
175
+ const similarity = docEmbeddings.map((docEmbedding, i) => {
176
+ const sim = computeSimilarity(queryEmbedding, docEmbedding);
177
+
178
+ return {
179
+ index: i,
180
+ similarity: sim,
181
+ };
182
+ });
183
+
184
+ const sortedDocs = similarity
185
+ .sort((a, b) => b.similarity - a.similarity)
186
+ .slice(0, 15)
187
+ .map((sim) => docsWithContent[sim.index]);
188
+
189
+ return sortedDocs;
190
+ };
191
+
192
+ return RunnableSequence.from([
193
+ RunnableMap.from({
194
+ query: (input: BasicChainInput) => input.query,
195
+ chat_history: (input: BasicChainInput) => input.chat_history,
196
+ context: RunnableSequence.from([
197
+ (input) => ({
198
+ query: input.query,
199
+ chat_history: formatChatHistoryAsString(input.chat_history),
200
+ }),
201
+ basicAcademicSearchRetrieverChain
202
+ .pipe(rerankDocs)
203
+ .withConfig({
204
+ runName: 'FinalSourceRetriever',
205
+ })
206
+ .pipe(processDocs),
207
+ ]),
208
+ }),
209
+ ChatPromptTemplate.fromMessages([
210
+ ['system', basicAcademicSearchResponsePrompt],
211
+ new MessagesPlaceholder('chat_history'),
212
+ ['user', '{query}'],
213
+ ]),
214
+ llm,
215
+ strParser,
216
+ ]).withConfig({
217
+ runName: 'FinalResponseGenerator',
218
+ });
219
+ };
220
+
221
+ const basicAcademicSearch = (
222
+ query: string,
223
+ history: BaseMessage[],
224
+ llm: BaseChatModel,
225
+ embeddings: Embeddings,
226
+ ) => {
227
+ const emitter = new eventEmitter();
228
+
229
+ try {
230
+ const basicAcademicSearchAnsweringChain =
231
+ createBasicAcademicSearchAnsweringChain(llm, embeddings);
232
+
233
+ const stream = basicAcademicSearchAnsweringChain.streamEvents(
234
+ {
235
+ chat_history: history,
236
+ query: query,
237
+ },
238
+ {
239
+ version: 'v1',
240
+ },
241
+ );
242
+
243
+ handleStream(stream, emitter);
244
+ } catch (err) {
245
+ emitter.emit(
246
+ 'error',
247
+ JSON.stringify({ data: 'An error has occurred please try again later' }),
248
+ );
249
+ logger.error(`Error in academic search: ${err}`);
250
+ }
251
+
252
+ return emitter;
253
+ };
254
+
255
+ const handleAcademicSearch = (
256
+ message: string,
257
+ history: BaseMessage[],
258
+ llm: BaseChatModel,
259
+ embeddings: Embeddings,
260
+ ) => {
261
+ const emitter = basicAcademicSearch(message, history, llm, embeddings);
262
+ return emitter;
263
+ };
264
+
265
+ export default handleAcademicSearch;
src/agents/imageSearchAgent.ts ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import {
2
+ RunnableSequence,
3
+ RunnableMap,
4
+ RunnableLambda,
5
+ } from '@langchain/core/runnables';
6
+ import { PromptTemplate } from '@langchain/core/prompts';
7
+ import formatChatHistoryAsString from '../utils/formatHistory';
8
+ import { BaseMessage } from '@langchain/core/messages';
9
+ import { StringOutputParser } from '@langchain/core/output_parsers';
10
+ import { searchSearxng } from '../lib/searxng';
11
+ import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
12
+
13
+ const imageSearchChainPrompt = `
14
+ You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search the web for images.
15
+ You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
16
+
17
+ Example:
18
+ 1. Follow up question: What is a cat?
19
+ Rephrased: A cat
20
+
21
+ 2. Follow up question: What is a car? How does it works?
22
+ Rephrased: Car working
23
+
24
+ 3. Follow up question: How does an AC work?
25
+ Rephrased: AC working
26
+
27
+ Conversation:
28
+ {chat_history}
29
+
30
+ Follow up question: {query}
31
+ Rephrased question:
32
+ `;
33
+
34
+ type ImageSearchChainInput = {
35
+ chat_history: BaseMessage[];
36
+ query: string;
37
+ };
38
+
39
+ const strParser = new StringOutputParser();
40
+
41
+ const createImageSearchChain = (llm: BaseChatModel) => {
42
+ return RunnableSequence.from([
43
+ RunnableMap.from({
44
+ chat_history: (input: ImageSearchChainInput) => {
45
+ return formatChatHistoryAsString(input.chat_history);
46
+ },
47
+ query: (input: ImageSearchChainInput) => {
48
+ return input.query;
49
+ },
50
+ }),
51
+ PromptTemplate.fromTemplate(imageSearchChainPrompt),
52
+ llm,
53
+ strParser,
54
+ RunnableLambda.from(async (input: string) => {
55
+ const res = await searchSearxng(input, {
56
+ engines: ['bing images', 'google images'],
57
+ });
58
+
59
+ const images = [];
60
+
61
+ res.results.forEach((result) => {
62
+ if (result.img_src && result.url && result.title) {
63
+ images.push({
64
+ img_src: result.img_src,
65
+ url: result.url,
66
+ title: result.title,
67
+ });
68
+ }
69
+ });
70
+
71
+ return images.slice(0, 10);
72
+ }),
73
+ ]);
74
+ };
75
+
76
+ const handleImageSearch = (
77
+ input: ImageSearchChainInput,
78
+ llm: BaseChatModel,
79
+ ) => {
80
+ const imageSearchChain = createImageSearchChain(llm);
81
+ return imageSearchChain.invoke(input);
82
+ };
83
+
84
+ export default handleImageSearch;
src/agents/redditSearchAgent.ts ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { BaseMessage } from '@langchain/core/messages';
2
+ import {
3
+ PromptTemplate,
4
+ ChatPromptTemplate,
5
+ MessagesPlaceholder,
6
+ } from '@langchain/core/prompts';
7
+ import {
8
+ RunnableSequence,
9
+ RunnableMap,
10
+ RunnableLambda,
11
+ } from '@langchain/core/runnables';
12
+ import { StringOutputParser } from '@langchain/core/output_parsers';
13
+ import { Document } from '@langchain/core/documents';
14
+ import { searchSearxng } from '../lib/searxng';
15
+ import type { StreamEvent } from '@langchain/core/tracers/log_stream';
16
+ import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
17
+ import type { Embeddings } from '@langchain/core/embeddings';
18
+ import formatChatHistoryAsString from '../utils/formatHistory';
19
+ import eventEmitter from 'events';
20
+ import computeSimilarity from '../utils/computeSimilarity';
21
+ import logger from '../utils/logger';
22
+
23
+ const basicRedditSearchRetrieverPrompt = `
24
+ You will be given a conversation below and a follow up question. You need to rephrase the follow-up question if needed so it is a standalone question that can be used by the LLM to search the web for information.
25
+ If it is a writing task or a simple hi, hello rather than a question, you need to return \`not_needed\` as the response.
26
+
27
+ Example:
28
+ 1. Follow up question: Which company is most likely to create an AGI
29
+ Rephrased: Which company is most likely to create an AGI
30
+
31
+ 2. Follow up question: Is Earth flat?
32
+ Rephrased: Is Earth flat?
33
+
34
+ 3. Follow up question: Is there life on Mars?
35
+ Rephrased: Is there life on Mars?
36
+
37
+ Conversation:
38
+ {chat_history}
39
+
40
+ Follow up question: {query}
41
+ Rephrased question:
42
+ `;
43
+
44
+ const basicRedditSearchResponsePrompt = `
45
+ You are Perplexica, an AI model who is expert at searching the web and answering user's queries. You are set on focus mode 'Reddit', this means you will be searching for information, opinions and discussions on the web using Reddit.
46
+
47
+ Generate a response that is informative and relevant to the user's query based on provided context (the context consits of search results containg a brief description of the content of that page).
48
+ You must use this context to answer the user's query in the best way possible. Use an unbaised and journalistic tone in your response. Do not repeat the text.
49
+ You must not tell the user to open any link or visit any website to get the answer. You must provide the answer in the response itself. If the user asks for links you can provide them.
50
+ Your responses should be medium to long in length be informative and relevant to the user's query. You can use markdowns to format your response. You should use bullet points to list the information. Make sure the answer is not short and is informative.
51
+ You have to cite the answer using [number] notation. You must cite the sentences with their relevent context number. You must cite each and every part of the answer so the user can know where the information is coming from.
52
+ Place these citations at the end of that particular sentence. You can cite the same sentence multiple times if it is relevant to the user's query like [number1][number2].
53
+ However you do not need to cite it using the same number. You can use different numbers to cite the same sentence multiple times. The number refers to the number of the search result (passed in the context) used to generate that part of the answer.
54
+
55
+ Aything inside the following \`context\` HTML block provided below is for your knowledge returned by Reddit and is not shared by the user. You have to answer question on the basis of it and cite the relevant information from it but you do not have to
56
+ talk about the context in your response.
57
+
58
+ <context>
59
+ {context}
60
+ </context>
61
+
62
+ If you think there's nothing relevant in the search results, you can say that 'Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?'.
63
+ Anything between the \`context\` is retrieved from Reddit and is not a part of the conversation with the user. Today's date is ${new Date().toISOString()}
64
+ `;
65
+
66
+ const strParser = new StringOutputParser();
67
+
68
+ const handleStream = async (
69
+ stream: AsyncGenerator<StreamEvent, any, unknown>,
70
+ emitter: eventEmitter,
71
+ ) => {
72
+ for await (const event of stream) {
73
+ if (
74
+ event.event === 'on_chain_end' &&
75
+ event.name === 'FinalSourceRetriever'
76
+ ) {
77
+ emitter.emit(
78
+ 'data',
79
+ JSON.stringify({ type: 'sources', data: event.data.output }),
80
+ );
81
+ }
82
+ if (
83
+ event.event === 'on_chain_stream' &&
84
+ event.name === 'FinalResponseGenerator'
85
+ ) {
86
+ emitter.emit(
87
+ 'data',
88
+ JSON.stringify({ type: 'response', data: event.data.chunk }),
89
+ );
90
+ }
91
+ if (
92
+ event.event === 'on_chain_end' &&
93
+ event.name === 'FinalResponseGenerator'
94
+ ) {
95
+ emitter.emit('end');
96
+ }
97
+ }
98
+ };
99
+
100
+ type BasicChainInput = {
101
+ chat_history: BaseMessage[];
102
+ query: string;
103
+ };
104
+
105
+ const createBasicRedditSearchRetrieverChain = (llm: BaseChatModel) => {
106
+ return RunnableSequence.from([
107
+ PromptTemplate.fromTemplate(basicRedditSearchRetrieverPrompt),
108
+ llm,
109
+ strParser,
110
+ RunnableLambda.from(async (input: string) => {
111
+ if (input === 'not_needed') {
112
+ return { query: '', docs: [] };
113
+ }
114
+
115
+ const res = await searchSearxng(input, {
116
+ language: 'en',
117
+ engines: ['reddit'],
118
+ });
119
+
120
+ const documents = res.results.map(
121
+ (result) =>
122
+ new Document({
123
+ pageContent: result.content ? result.content : result.title,
124
+ metadata: {
125
+ title: result.title,
126
+ url: result.url,
127
+ ...(result.img_src && { img_src: result.img_src }),
128
+ },
129
+ }),
130
+ );
131
+
132
+ return { query: input, docs: documents };
133
+ }),
134
+ ]);
135
+ };
136
+
137
+ const createBasicRedditSearchAnsweringChain = (
138
+ llm: BaseChatModel,
139
+ embeddings: Embeddings,
140
+ ) => {
141
+ const basicRedditSearchRetrieverChain =
142
+ createBasicRedditSearchRetrieverChain(llm);
143
+
144
+ const processDocs = async (docs: Document[]) => {
145
+ return docs
146
+ .map((_, index) => `${index + 1}. ${docs[index].pageContent}`)
147
+ .join('\n');
148
+ };
149
+
150
+ const rerankDocs = async ({
151
+ query,
152
+ docs,
153
+ }: {
154
+ query: string;
155
+ docs: Document[];
156
+ }) => {
157
+ if (docs.length === 0) {
158
+ return docs;
159
+ }
160
+
161
+ const docsWithContent = docs.filter(
162
+ (doc) => doc.pageContent && doc.pageContent.length > 0,
163
+ );
164
+
165
+ const [docEmbeddings, queryEmbedding] = await Promise.all([
166
+ embeddings.embedDocuments(docsWithContent.map((doc) => doc.pageContent)),
167
+ embeddings.embedQuery(query),
168
+ ]);
169
+
170
+ const similarity = docEmbeddings.map((docEmbedding, i) => {
171
+ const sim = computeSimilarity(queryEmbedding, docEmbedding);
172
+
173
+ return {
174
+ index: i,
175
+ similarity: sim,
176
+ };
177
+ });
178
+
179
+ const sortedDocs = similarity
180
+ .sort((a, b) => b.similarity - a.similarity)
181
+ .slice(0, 15)
182
+ .filter((sim) => sim.similarity > 0.3)
183
+ .map((sim) => docsWithContent[sim.index]);
184
+
185
+ return sortedDocs;
186
+ };
187
+
188
+ return RunnableSequence.from([
189
+ RunnableMap.from({
190
+ query: (input: BasicChainInput) => input.query,
191
+ chat_history: (input: BasicChainInput) => input.chat_history,
192
+ context: RunnableSequence.from([
193
+ (input) => ({
194
+ query: input.query,
195
+ chat_history: formatChatHistoryAsString(input.chat_history),
196
+ }),
197
+ basicRedditSearchRetrieverChain
198
+ .pipe(rerankDocs)
199
+ .withConfig({
200
+ runName: 'FinalSourceRetriever',
201
+ })
202
+ .pipe(processDocs),
203
+ ]),
204
+ }),
205
+ ChatPromptTemplate.fromMessages([
206
+ ['system', basicRedditSearchResponsePrompt],
207
+ new MessagesPlaceholder('chat_history'),
208
+ ['user', '{query}'],
209
+ ]),
210
+ llm,
211
+ strParser,
212
+ ]).withConfig({
213
+ runName: 'FinalResponseGenerator',
214
+ });
215
+ };
216
+
217
+ const basicRedditSearch = (
218
+ query: string,
219
+ history: BaseMessage[],
220
+ llm: BaseChatModel,
221
+ embeddings: Embeddings,
222
+ ) => {
223
+ const emitter = new eventEmitter();
224
+
225
+ try {
226
+ const basicRedditSearchAnsweringChain =
227
+ createBasicRedditSearchAnsweringChain(llm, embeddings);
228
+ const stream = basicRedditSearchAnsweringChain.streamEvents(
229
+ {
230
+ chat_history: history,
231
+ query: query,
232
+ },
233
+ {
234
+ version: 'v1',
235
+ },
236
+ );
237
+
238
+ handleStream(stream, emitter);
239
+ } catch (err) {
240
+ emitter.emit(
241
+ 'error',
242
+ JSON.stringify({ data: 'An error has occurred please try again later' }),
243
+ );
244
+ logger.error(`Error in RedditSearch: ${err}`);
245
+ }
246
+
247
+ return emitter;
248
+ };
249
+
250
+ const handleRedditSearch = (
251
+ message: string,
252
+ history: BaseMessage[],
253
+ llm: BaseChatModel,
254
+ embeddings: Embeddings,
255
+ ) => {
256
+ const emitter = basicRedditSearch(message, history, llm, embeddings);
257
+ return emitter;
258
+ };
259
+
260
+ export default handleRedditSearch;
src/agents/videoSearchAgent.ts ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import {
2
+ RunnableSequence,
3
+ RunnableMap,
4
+ RunnableLambda,
5
+ } from '@langchain/core/runnables';
6
+ import { PromptTemplate } from '@langchain/core/prompts';
7
+ import formatChatHistoryAsString from '../utils/formatHistory';
8
+ import { BaseMessage } from '@langchain/core/messages';
9
+ import { StringOutputParser } from '@langchain/core/output_parsers';
10
+ import { searchSearxng } from '../lib/searxng';
11
+ import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
12
+
13
+ const VideoSearchChainPrompt = `
14
+ You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search Youtube for videos.
15
+ You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
16
+
17
+ Example:
18
+ 1. Follow up question: How does a car work?
19
+ Rephrased: How does a car work?
20
+
21
+ 2. Follow up question: What is the theory of relativity?
22
+ Rephrased: What is theory of relativity
23
+
24
+ 3. Follow up question: How does an AC work?
25
+ Rephrased: How does an AC work
26
+
27
+ Conversation:
28
+ {chat_history}
29
+
30
+ Follow up question: {query}
31
+ Rephrased question:
32
+ `;
33
+
34
+ type VideoSearchChainInput = {
35
+ chat_history: BaseMessage[];
36
+ query: string;
37
+ };
38
+
39
+ const strParser = new StringOutputParser();
40
+
41
+ const createVideoSearchChain = (llm: BaseChatModel) => {
42
+ return RunnableSequence.from([
43
+ RunnableMap.from({
44
+ chat_history: (input: VideoSearchChainInput) => {
45
+ return formatChatHistoryAsString(input.chat_history);
46
+ },
47
+ query: (input: VideoSearchChainInput) => {
48
+ return input.query;
49
+ },
50
+ }),
51
+ PromptTemplate.fromTemplate(VideoSearchChainPrompt),
52
+ llm,
53
+ strParser,
54
+ RunnableLambda.from(async (input: string) => {
55
+ const res = await searchSearxng(input, {
56
+ engines: ['youtube'],
57
+ });
58
+
59
+ const videos = [];
60
+
61
+ res.results.forEach((result) => {
62
+ if (
63
+ result.thumbnail &&
64
+ result.url &&
65
+ result.title &&
66
+ result.iframe_src
67
+ ) {
68
+ videos.push({
69
+ img_src: result.thumbnail,
70
+ url: result.url,
71
+ title: result.title,
72
+ iframe_src: result.iframe_src,
73
+ });
74
+ }
75
+ });
76
+
77
+ return videos.slice(0, 10);
78
+ }),
79
+ ]);
80
+ };
81
+
82
+ const handleVideoSearch = (
83
+ input: VideoSearchChainInput,
84
+ llm: BaseChatModel,
85
+ ) => {
86
+ const VideoSearchChain = createVideoSearchChain(llm);
87
+ return VideoSearchChain.invoke(input);
88
+ };
89
+
90
+ export default handleVideoSearch;
src/agents/webSearchAgent.ts ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { BaseMessage } from '@langchain/core/messages';
2
+ import {
3
+ PromptTemplate,
4
+ ChatPromptTemplate,
5
+ MessagesPlaceholder,
6
+ } from '@langchain/core/prompts';
7
+ import {
8
+ RunnableSequence,
9
+ RunnableMap,
10
+ RunnableLambda,
11
+ } from '@langchain/core/runnables';
12
+ import { StringOutputParser } from '@langchain/core/output_parsers';
13
+ import { Document } from '@langchain/core/documents';
14
+ import { searchSearxng } from '../lib/searxng';
15
+ import type { StreamEvent } from '@langchain/core/tracers/log_stream';
16
+ import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
17
+ import type { Embeddings } from '@langchain/core/embeddings';
18
+ import formatChatHistoryAsString from '../utils/formatHistory';
19
+ import eventEmitter from 'events';
20
+ import computeSimilarity from '../utils/computeSimilarity';
21
+ import logger from '../utils/logger';
22
+
23
+ const basicSearchRetrieverPrompt = `
24
+ You will be given a conversation below and a follow up question. You need to rephrase the follow-up question if needed so it is a standalone question that can be used by the LLM to search the web for information.
25
+ If it is a writing task or a simple hi, hello rather than a question, you need to return \`not_needed\` as the response.
26
+
27
+ Example:
28
+ 1. Follow up question: What is the capital of France?
29
+ Rephrased: Capital of france
30
+
31
+ 2. Follow up question: What is the population of New York City?
32
+ Rephrased: Population of New York City
33
+
34
+ 3. Follow up question: What is Docker?
35
+ Rephrased: What is Docker
36
+
37
+ Conversation:
38
+ {chat_history}
39
+
40
+ Follow up question: {query}
41
+ Rephrased question:
42
+ `;
43
+
44
+ const basicWebSearchResponsePrompt = `
45
+ You are Perplexica, an AI model who is expert at searching the web and answering user's queries.
46
+
47
+ Generate a response that is informative and relevant to the user's query based on provided context (the context consits of search results containg a brief description of the content of that page).
48
+ You must use this context to answer the user's query in the best way possible. Use an unbaised and journalistic tone in your response. Do not repeat the text.
49
+ You must not tell the user to open any link or visit any website to get the answer. You must provide the answer in the response itself. If the user asks for links you can provide them.
50
+ Your responses should be medium to long in length be informative and relevant to the user's query. You can use markdowns to format your response. You should use bullet points to list the information. Make sure the answer is not short and is informative.
51
+ You have to cite the answer using [number] notation. You must cite the sentences with their relevent context number. You must cite each and every part of the answer so the user can know where the information is coming from.
52
+ Place these citations at the end of that particular sentence. You can cite the same sentence multiple times if it is relevant to the user's query like [number1][number2].
53
+ However you do not need to cite it using the same number. You can use different numbers to cite the same sentence multiple times. The number refers to the number of the search result (passed in the context) used to generate that part of the answer.
54
+
55
+ Aything inside the following \`context\` HTML block provided below is for your knowledge returned by the search engine and is not shared by the user. You have to answer question on the basis of it and cite the relevant information from it but you do not have to
56
+ talk about the context in your response.
57
+
58
+ <context>
59
+ {context}
60
+ </context>
61
+
62
+ If you think there's nothing relevant in the search results, you can say that 'Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?'.
63
+ Anything between the \`context\` is retrieved from a search engine and is not a part of the conversation with the user. Today's date is ${new Date().toISOString()}
64
+ `;
65
+
66
+ const strParser = new StringOutputParser();
67
+
68
+ const handleStream = async (
69
+ stream: AsyncGenerator<StreamEvent, any, unknown>,
70
+ emitter: eventEmitter,
71
+ ) => {
72
+ for await (const event of stream) {
73
+ if (
74
+ event.event === 'on_chain_end' &&
75
+ event.name === 'FinalSourceRetriever'
76
+ ) {
77
+ emitter.emit(
78
+ 'data',
79
+ JSON.stringify({ type: 'sources', data: event.data.output }),
80
+ );
81
+ }
82
+ if (
83
+ event.event === 'on_chain_stream' &&
84
+ event.name === 'FinalResponseGenerator'
85
+ ) {
86
+ emitter.emit(
87
+ 'data',
88
+ JSON.stringify({ type: 'response', data: event.data.chunk }),
89
+ );
90
+ }
91
+ if (
92
+ event.event === 'on_chain_end' &&
93
+ event.name === 'FinalResponseGenerator'
94
+ ) {
95
+ emitter.emit('end');
96
+ }
97
+ }
98
+ };
99
+
100
+ type BasicChainInput = {
101
+ chat_history: BaseMessage[];
102
+ query: string;
103
+ };
104
+
105
+ const createBasicWebSearchRetrieverChain = (llm: BaseChatModel) => {
106
+ return RunnableSequence.from([
107
+ PromptTemplate.fromTemplate(basicSearchRetrieverPrompt),
108
+ llm,
109
+ strParser,
110
+ RunnableLambda.from(async (input: string) => {
111
+ if (input === 'not_needed') {
112
+ return { query: '', docs: [] };
113
+ }
114
+
115
+ const res = await searchSearxng(input, {
116
+ language: 'en',
117
+ });
118
+
119
+ const documents = res.results.map(
120
+ (result) =>
121
+ new Document({
122
+ pageContent: result.content,
123
+ metadata: {
124
+ title: result.title,
125
+ url: result.url,
126
+ ...(result.img_src && { img_src: result.img_src }),
127
+ },
128
+ }),
129
+ );
130
+
131
+ return { query: input, docs: documents };
132
+ }),
133
+ ]);
134
+ };
135
+
136
+ const createBasicWebSearchAnsweringChain = (
137
+ llm: BaseChatModel,
138
+ embeddings: Embeddings,
139
+ ) => {
140
+ const basicWebSearchRetrieverChain = createBasicWebSearchRetrieverChain(llm);
141
+
142
+ const processDocs = async (docs: Document[]) => {
143
+ return docs
144
+ .map((_, index) => `${index + 1}. ${docs[index].pageContent}`)
145
+ .join('\n');
146
+ };
147
+
148
+ const rerankDocs = async ({
149
+ query,
150
+ docs,
151
+ }: {
152
+ query: string;
153
+ docs: Document[];
154
+ }) => {
155
+ if (docs.length === 0) {
156
+ return docs;
157
+ }
158
+
159
+ const docsWithContent = docs.filter(
160
+ (doc) => doc.pageContent && doc.pageContent.length > 0,
161
+ );
162
+
163
+ const [docEmbeddings, queryEmbedding] = await Promise.all([
164
+ embeddings.embedDocuments(docsWithContent.map((doc) => doc.pageContent)),
165
+ embeddings.embedQuery(query),
166
+ ]);
167
+
168
+ const similarity = docEmbeddings.map((docEmbedding, i) => {
169
+ const sim = computeSimilarity(queryEmbedding, docEmbedding);
170
+
171
+ return {
172
+ index: i,
173
+ similarity: sim,
174
+ };
175
+ });
176
+
177
+ const sortedDocs = similarity
178
+ .sort((a, b) => b.similarity - a.similarity)
179
+ .filter((sim) => sim.similarity > 0.5)
180
+ .slice(0, 15)
181
+ .map((sim) => docsWithContent[sim.index]);
182
+
183
+ return sortedDocs;
184
+ };
185
+
186
+ return RunnableSequence.from([
187
+ RunnableMap.from({
188
+ query: (input: BasicChainInput) => input.query,
189
+ chat_history: (input: BasicChainInput) => input.chat_history,
190
+ context: RunnableSequence.from([
191
+ (input) => ({
192
+ query: input.query,
193
+ chat_history: formatChatHistoryAsString(input.chat_history),
194
+ }),
195
+ basicWebSearchRetrieverChain
196
+ .pipe(rerankDocs)
197
+ .withConfig({
198
+ runName: 'FinalSourceRetriever',
199
+ })
200
+ .pipe(processDocs),
201
+ ]),
202
+ }),
203
+ ChatPromptTemplate.fromMessages([
204
+ ['system', basicWebSearchResponsePrompt],
205
+ new MessagesPlaceholder('chat_history'),
206
+ ['user', '{query}'],
207
+ ]),
208
+ llm,
209
+ strParser,
210
+ ]).withConfig({
211
+ runName: 'FinalResponseGenerator',
212
+ });
213
+ };
214
+
215
+ const basicWebSearch = (
216
+ query: string,
217
+ history: BaseMessage[],
218
+ llm: BaseChatModel,
219
+ embeddings: Embeddings,
220
+ ) => {
221
+ const emitter = new eventEmitter();
222
+
223
+ try {
224
+ const basicWebSearchAnsweringChain = createBasicWebSearchAnsweringChain(
225
+ llm,
226
+ embeddings,
227
+ );
228
+
229
+ const stream = basicWebSearchAnsweringChain.streamEvents(
230
+ {
231
+ chat_history: history,
232
+ query: query,
233
+ },
234
+ {
235
+ version: 'v1',
236
+ },
237
+ );
238
+
239
+ handleStream(stream, emitter);
240
+ } catch (err) {
241
+ emitter.emit(
242
+ 'error',
243
+ JSON.stringify({ data: 'An error has occurred please try again later' }),
244
+ );
245
+ logger.error(`Error in websearch: ${err}`);
246
+ }
247
+
248
+ return emitter;
249
+ };
250
+
251
+ const handleWebSearch = (
252
+ message: string,
253
+ history: BaseMessage[],
254
+ llm: BaseChatModel,
255
+ embeddings: Embeddings,
256
+ ) => {
257
+ const emitter = basicWebSearch(message, history, llm, embeddings);
258
+ return emitter;
259
+ };
260
+
261
+ export default handleWebSearch;
src/agents/wolframAlphaSearchAgent.ts ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { BaseMessage } from '@langchain/core/messages';
2
+ import {
3
+ PromptTemplate,
4
+ ChatPromptTemplate,
5
+ MessagesPlaceholder,
6
+ } from '@langchain/core/prompts';
7
+ import {
8
+ RunnableSequence,
9
+ RunnableMap,
10
+ RunnableLambda,
11
+ } from '@langchain/core/runnables';
12
+ import { StringOutputParser } from '@langchain/core/output_parsers';
13
+ import { Document } from '@langchain/core/documents';
14
+ import { searchSearxng } from '../lib/searxng';
15
+ import type { StreamEvent } from '@langchain/core/tracers/log_stream';
16
+ import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
17
+ import type { Embeddings } from '@langchain/core/embeddings';
18
+ import formatChatHistoryAsString from '../utils/formatHistory';
19
+ import eventEmitter from 'events';
20
+ import logger from '../utils/logger';
21
+
22
+ const basicWolframAlphaSearchRetrieverPrompt = `
23
+ You will be given a conversation below and a follow up question. You need to rephrase the follow-up question if needed so it is a standalone question that can be used by the LLM to search the web for information.
24
+ If it is a writing task or a simple hi, hello rather than a question, you need to return \`not_needed\` as the response.
25
+
26
+ Example:
27
+ 1. Follow up question: What is the atomic radius of S?
28
+ Rephrased: Atomic radius of S
29
+
30
+ 2. Follow up question: What is linear algebra?
31
+ Rephrased: Linear algebra
32
+
33
+ 3. Follow up question: What is the third law of thermodynamics?
34
+ Rephrased: Third law of thermodynamics
35
+
36
+ Conversation:
37
+ {chat_history}
38
+
39
+ Follow up question: {query}
40
+ Rephrased question:
41
+ `;
42
+
43
+ const basicWolframAlphaSearchResponsePrompt = `
44
+ You are Perplexica, an AI model who is expert at searching the web and answering user's queries. You are set on focus mode 'Wolfram Alpha', this means you will be searching for information on the web using Wolfram Alpha. It is a computational knowledge engine that can answer factual queries and perform computations.
45
+
46
+ Generate a response that is informative and relevant to the user's query based on provided context (the context consits of search results containg a brief description of the content of that page).
47
+ You must use this context to answer the user's query in the best way possible. Use an unbaised and journalistic tone in your response. Do not repeat the text.
48
+ You must not tell the user to open any link or visit any website to get the answer. You must provide the answer in the response itself. If the user asks for links you can provide them.
49
+ Your responses should be medium to long in length be informative and relevant to the user's query. You can use markdowns to format your response. You should use bullet points to list the information. Make sure the answer is not short and is informative.
50
+ You have to cite the answer using [number] notation. You must cite the sentences with their relevent context number. You must cite each and every part of the answer so the user can know where the information is coming from.
51
+ Place these citations at the end of that particular sentence. You can cite the same sentence multiple times if it is relevant to the user's query like [number1][number2].
52
+ However you do not need to cite it using the same number. You can use different numbers to cite the same sentence multiple times. The number refers to the number of the search result (passed in the context) used to generate that part of the answer.
53
+
54
+ Aything inside the following \`context\` HTML block provided below is for your knowledge returned by Wolfram Alpha and is not shared by the user. You have to answer question on the basis of it and cite the relevant information from it but you do not have to
55
+ talk about the context in your response.
56
+
57
+ <context>
58
+ {context}
59
+ </context>
60
+
61
+ If you think there's nothing relevant in the search results, you can say that 'Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?'.
62
+ Anything between the \`context\` is retrieved from Wolfram Alpha and is not a part of the conversation with the user. Today's date is ${new Date().toISOString()}
63
+ `;
64
+
65
+ const strParser = new StringOutputParser();
66
+
67
+ const handleStream = async (
68
+ stream: AsyncGenerator<StreamEvent, any, unknown>,
69
+ emitter: eventEmitter,
70
+ ) => {
71
+ for await (const event of stream) {
72
+ if (
73
+ event.event === 'on_chain_end' &&
74
+ event.name === 'FinalSourceRetriever'
75
+ ) {
76
+ emitter.emit(
77
+ 'data',
78
+ JSON.stringify({ type: 'sources', data: event.data.output }),
79
+ );
80
+ }
81
+ if (
82
+ event.event === 'on_chain_stream' &&
83
+ event.name === 'FinalResponseGenerator'
84
+ ) {
85
+ emitter.emit(
86
+ 'data',
87
+ JSON.stringify({ type: 'response', data: event.data.chunk }),
88
+ );
89
+ }
90
+ if (
91
+ event.event === 'on_chain_end' &&
92
+ event.name === 'FinalResponseGenerator'
93
+ ) {
94
+ emitter.emit('end');
95
+ }
96
+ }
97
+ };
98
+
99
+ type BasicChainInput = {
100
+ chat_history: BaseMessage[];
101
+ query: string;
102
+ };
103
+
104
+ const createBasicWolframAlphaSearchRetrieverChain = (llm: BaseChatModel) => {
105
+ return RunnableSequence.from([
106
+ PromptTemplate.fromTemplate(basicWolframAlphaSearchRetrieverPrompt),
107
+ llm,
108
+ strParser,
109
+ RunnableLambda.from(async (input: string) => {
110
+ if (input === 'not_needed') {
111
+ return { query: '', docs: [] };
112
+ }
113
+
114
+ const res = await searchSearxng(input, {
115
+ language: 'en',
116
+ engines: ['wolframalpha'],
117
+ });
118
+
119
+ const documents = res.results.map(
120
+ (result) =>
121
+ new Document({
122
+ pageContent: result.content,
123
+ metadata: {
124
+ title: result.title,
125
+ url: result.url,
126
+ ...(result.img_src && { img_src: result.img_src }),
127
+ },
128
+ }),
129
+ );
130
+
131
+ return { query: input, docs: documents };
132
+ }),
133
+ ]);
134
+ };
135
+
136
+ const createBasicWolframAlphaSearchAnsweringChain = (llm: BaseChatModel) => {
137
+ const basicWolframAlphaSearchRetrieverChain =
138
+ createBasicWolframAlphaSearchRetrieverChain(llm);
139
+
140
+ const processDocs = (docs: Document[]) => {
141
+ return docs
142
+ .map((_, index) => `${index + 1}. ${docs[index].pageContent}`)
143
+ .join('\n');
144
+ };
145
+
146
+ return RunnableSequence.from([
147
+ RunnableMap.from({
148
+ query: (input: BasicChainInput) => input.query,
149
+ chat_history: (input: BasicChainInput) => input.chat_history,
150
+ context: RunnableSequence.from([
151
+ (input) => ({
152
+ query: input.query,
153
+ chat_history: formatChatHistoryAsString(input.chat_history),
154
+ }),
155
+ basicWolframAlphaSearchRetrieverChain
156
+ .pipe(({ query, docs }) => {
157
+ return docs;
158
+ })
159
+ .withConfig({
160
+ runName: 'FinalSourceRetriever',
161
+ })
162
+ .pipe(processDocs),
163
+ ]),
164
+ }),
165
+ ChatPromptTemplate.fromMessages([
166
+ ['system', basicWolframAlphaSearchResponsePrompt],
167
+ new MessagesPlaceholder('chat_history'),
168
+ ['user', '{query}'],
169
+ ]),
170
+ llm,
171
+ strParser,
172
+ ]).withConfig({
173
+ runName: 'FinalResponseGenerator',
174
+ });
175
+ };
176
+
177
+ const basicWolframAlphaSearch = (
178
+ query: string,
179
+ history: BaseMessage[],
180
+ llm: BaseChatModel,
181
+ ) => {
182
+ const emitter = new eventEmitter();
183
+
184
+ try {
185
+ const basicWolframAlphaSearchAnsweringChain =
186
+ createBasicWolframAlphaSearchAnsweringChain(llm);
187
+ const stream = basicWolframAlphaSearchAnsweringChain.streamEvents(
188
+ {
189
+ chat_history: history,
190
+ query: query,
191
+ },
192
+ {
193
+ version: 'v1',
194
+ },
195
+ );
196
+
197
+ handleStream(stream, emitter);
198
+ } catch (err) {
199
+ emitter.emit(
200
+ 'error',
201
+ JSON.stringify({ data: 'An error has occurred please try again later' }),
202
+ );
203
+ logger.error(`Error in WolframAlphaSearch: ${err}`);
204
+ }
205
+
206
+ return emitter;
207
+ };
208
+
209
+ const handleWolframAlphaSearch = (
210
+ message: string,
211
+ history: BaseMessage[],
212
+ llm: BaseChatModel,
213
+ embeddings: Embeddings,
214
+ ) => {
215
+ const emitter = basicWolframAlphaSearch(message, history, llm);
216
+ return emitter;
217
+ };
218
+
219
+ export default handleWolframAlphaSearch;
src/agents/writingAssistant.ts ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { BaseMessage } from '@langchain/core/messages';
2
+ import {
3
+ ChatPromptTemplate,
4
+ MessagesPlaceholder,
5
+ } from '@langchain/core/prompts';
6
+ import { RunnableSequence } from '@langchain/core/runnables';
7
+ import { StringOutputParser } from '@langchain/core/output_parsers';
8
+ import type { StreamEvent } from '@langchain/core/tracers/log_stream';
9
+ import eventEmitter from 'events';
10
+ import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
11
+ import type { Embeddings } from '@langchain/core/embeddings';
12
+ import logger from '../utils/logger';
13
+
14
+ const writingAssistantPrompt = `
15
+ You are Perplexica, an AI model who is expert at searching the web and answering user's queries. You are currently set on focus mode 'Writing Assistant', this means you will be helping the user write a response to a given query.
16
+ Since you are a writing assistant, you would not perform web searches. If you think you lack information to answer the query, you can ask the user for more information or suggest them to switch to a different focus mode.
17
+ `;
18
+
19
+ const strParser = new StringOutputParser();
20
+
21
+ const handleStream = async (
22
+ stream: AsyncGenerator<StreamEvent, any, unknown>,
23
+ emitter: eventEmitter,
24
+ ) => {
25
+ for await (const event of stream) {
26
+ if (
27
+ event.event === 'on_chain_stream' &&
28
+ event.name === 'FinalResponseGenerator'
29
+ ) {
30
+ emitter.emit(
31
+ 'data',
32
+ JSON.stringify({ type: 'response', data: event.data.chunk }),
33
+ );
34
+ }
35
+ if (
36
+ event.event === 'on_chain_end' &&
37
+ event.name === 'FinalResponseGenerator'
38
+ ) {
39
+ emitter.emit('end');
40
+ }
41
+ }
42
+ };
43
+
44
+ const createWritingAssistantChain = (llm: BaseChatModel) => {
45
+ return RunnableSequence.from([
46
+ ChatPromptTemplate.fromMessages([
47
+ ['system', writingAssistantPrompt],
48
+ new MessagesPlaceholder('chat_history'),
49
+ ['user', '{query}'],
50
+ ]),
51
+ llm,
52
+ strParser,
53
+ ]).withConfig({
54
+ runName: 'FinalResponseGenerator',
55
+ });
56
+ };
57
+
58
+ const handleWritingAssistant = (
59
+ query: string,
60
+ history: BaseMessage[],
61
+ llm: BaseChatModel,
62
+ embeddings: Embeddings,
63
+ ) => {
64
+ const emitter = new eventEmitter();
65
+
66
+ try {
67
+ const writingAssistantChain = createWritingAssistantChain(llm);
68
+ const stream = writingAssistantChain.streamEvents(
69
+ {
70
+ chat_history: history,
71
+ query: query,
72
+ },
73
+ {
74
+ version: 'v1',
75
+ },
76
+ );
77
+
78
+ handleStream(stream, emitter);
79
+ } catch (err) {
80
+ emitter.emit(
81
+ 'error',
82
+ JSON.stringify({ data: 'An error has occurred please try again later' }),
83
+ );
84
+ logger.error(`Error in writing assistant: ${err}`);
85
+ }
86
+
87
+ return emitter;
88
+ };
89
+
90
+ export default handleWritingAssistant;
src/agents/youtubeSearchAgent.ts ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { BaseMessage } from '@langchain/core/messages';
2
+ import {
3
+ PromptTemplate,
4
+ ChatPromptTemplate,
5
+ MessagesPlaceholder,
6
+ } from '@langchain/core/prompts';
7
+ import {
8
+ RunnableSequence,
9
+ RunnableMap,
10
+ RunnableLambda,
11
+ } from '@langchain/core/runnables';
12
+ import { StringOutputParser } from '@langchain/core/output_parsers';
13
+ import { Document } from '@langchain/core/documents';
14
+ import { searchSearxng } from '../lib/searxng';
15
+ import type { StreamEvent } from '@langchain/core/tracers/log_stream';
16
+ import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
17
+ import type { Embeddings } from '@langchain/core/embeddings';
18
+ import formatChatHistoryAsString from '../utils/formatHistory';
19
+ import eventEmitter from 'events';
20
+ import computeSimilarity from '../utils/computeSimilarity';
21
+ import logger from '../utils/logger';
22
+
23
+ const basicYoutubeSearchRetrieverPrompt = `
24
+ You will be given a conversation below and a follow up question. You need to rephrase the follow-up question if needed so it is a standalone question that can be used by the LLM to search the web for information.
25
+ If it is a writing task or a simple hi, hello rather than a question, you need to return \`not_needed\` as the response.
26
+
27
+ Example:
28
+ 1. Follow up question: How does an A.C work?
29
+ Rephrased: A.C working
30
+
31
+ 2. Follow up question: Linear algebra explanation video
32
+ Rephrased: What is linear algebra?
33
+
34
+ 3. Follow up question: What is theory of relativity?
35
+ Rephrased: What is theory of relativity?
36
+
37
+ Conversation:
38
+ {chat_history}
39
+
40
+ Follow up question: {query}
41
+ Rephrased question:
42
+ `;
43
+
44
+ const basicYoutubeSearchResponsePrompt = `
45
+ You are Perplexica, an AI model who is expert at searching the web and answering user's queries. You are set on focus mode 'Youtube', this means you will be searching for videos on the web using Youtube and providing information based on the video's transcript.
46
+
47
+ Generate a response that is informative and relevant to the user's query based on provided context (the context consits of search results containg a brief description of the content of that page).
48
+ You must use this context to answer the user's query in the best way possible. Use an unbaised and journalistic tone in your response. Do not repeat the text.
49
+ You must not tell the user to open any link or visit any website to get the answer. You must provide the answer in the response itself. If the user asks for links you can provide them.
50
+ Your responses should be medium to long in length be informative and relevant to the user's query. You can use markdowns to format your response. You should use bullet points to list the information. Make sure the answer is not short and is informative.
51
+ You have to cite the answer using [number] notation. You must cite the sentences with their relevent context number. You must cite each and every part of the answer so the user can know where the information is coming from.
52
+ Place these citations at the end of that particular sentence. You can cite the same sentence multiple times if it is relevant to the user's query like [number1][number2].
53
+ However you do not need to cite it using the same number. You can use different numbers to cite the same sentence multiple times. The number refers to the number of the search result (passed in the context) used to generate that part of the answer.
54
+
55
+ Aything inside the following \`context\` HTML block provided below is for your knowledge returned by Youtube and is not shared by the user. You have to answer question on the basis of it and cite the relevant information from it but you do not have to
56
+ talk about the context in your response.
57
+
58
+ <context>
59
+ {context}
60
+ </context>
61
+
62
+ If you think there's nothing relevant in the search results, you can say that 'Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?'.
63
+ Anything between the \`context\` is retrieved from Youtube and is not a part of the conversation with the user. Today's date is ${new Date().toISOString()}
64
+ `;
65
+
66
+ const strParser = new StringOutputParser();
67
+
68
+ const handleStream = async (
69
+ stream: AsyncGenerator<StreamEvent, any, unknown>,
70
+ emitter: eventEmitter,
71
+ ) => {
72
+ for await (const event of stream) {
73
+ if (
74
+ event.event === 'on_chain_end' &&
75
+ event.name === 'FinalSourceRetriever'
76
+ ) {
77
+ emitter.emit(
78
+ 'data',
79
+ JSON.stringify({ type: 'sources', data: event.data.output }),
80
+ );
81
+ }
82
+ if (
83
+ event.event === 'on_chain_stream' &&
84
+ event.name === 'FinalResponseGenerator'
85
+ ) {
86
+ emitter.emit(
87
+ 'data',
88
+ JSON.stringify({ type: 'response', data: event.data.chunk }),
89
+ );
90
+ }
91
+ if (
92
+ event.event === 'on_chain_end' &&
93
+ event.name === 'FinalResponseGenerator'
94
+ ) {
95
+ emitter.emit('end');
96
+ }
97
+ }
98
+ };
99
+
100
+ type BasicChainInput = {
101
+ chat_history: BaseMessage[];
102
+ query: string;
103
+ };
104
+
105
+ const createBasicYoutubeSearchRetrieverChain = (llm: BaseChatModel) => {
106
+ return RunnableSequence.from([
107
+ PromptTemplate.fromTemplate(basicYoutubeSearchRetrieverPrompt),
108
+ llm,
109
+ strParser,
110
+ RunnableLambda.from(async (input: string) => {
111
+ if (input === 'not_needed') {
112
+ return { query: '', docs: [] };
113
+ }
114
+
115
+ const res = await searchSearxng(input, {
116
+ language: 'en',
117
+ engines: ['youtube'],
118
+ });
119
+
120
+ const documents = res.results.map(
121
+ (result) =>
122
+ new Document({
123
+ pageContent: result.content ? result.content : result.title,
124
+ metadata: {
125
+ title: result.title,
126
+ url: result.url,
127
+ ...(result.img_src && { img_src: result.img_src }),
128
+ },
129
+ }),
130
+ );
131
+
132
+ return { query: input, docs: documents };
133
+ }),
134
+ ]);
135
+ };
136
+
137
+ const createBasicYoutubeSearchAnsweringChain = (
138
+ llm: BaseChatModel,
139
+ embeddings: Embeddings,
140
+ ) => {
141
+ const basicYoutubeSearchRetrieverChain =
142
+ createBasicYoutubeSearchRetrieverChain(llm);
143
+
144
+ const processDocs = async (docs: Document[]) => {
145
+ return docs
146
+ .map((_, index) => `${index + 1}. ${docs[index].pageContent}`)
147
+ .join('\n');
148
+ };
149
+
150
+ const rerankDocs = async ({
151
+ query,
152
+ docs,
153
+ }: {
154
+ query: string;
155
+ docs: Document[];
156
+ }) => {
157
+ if (docs.length === 0) {
158
+ return docs;
159
+ }
160
+
161
+ const docsWithContent = docs.filter(
162
+ (doc) => doc.pageContent && doc.pageContent.length > 0,
163
+ );
164
+
165
+ const [docEmbeddings, queryEmbedding] = await Promise.all([
166
+ embeddings.embedDocuments(docsWithContent.map((doc) => doc.pageContent)),
167
+ embeddings.embedQuery(query),
168
+ ]);
169
+
170
+ const similarity = docEmbeddings.map((docEmbedding, i) => {
171
+ const sim = computeSimilarity(queryEmbedding, docEmbedding);
172
+
173
+ return {
174
+ index: i,
175
+ similarity: sim,
176
+ };
177
+ });
178
+
179
+ const sortedDocs = similarity
180
+ .sort((a, b) => b.similarity - a.similarity)
181
+ .slice(0, 15)
182
+ .filter((sim) => sim.similarity > 0.3)
183
+ .map((sim) => docsWithContent[sim.index]);
184
+
185
+ return sortedDocs;
186
+ };
187
+
188
+ return RunnableSequence.from([
189
+ RunnableMap.from({
190
+ query: (input: BasicChainInput) => input.query,
191
+ chat_history: (input: BasicChainInput) => input.chat_history,
192
+ context: RunnableSequence.from([
193
+ (input) => ({
194
+ query: input.query,
195
+ chat_history: formatChatHistoryAsString(input.chat_history),
196
+ }),
197
+ basicYoutubeSearchRetrieverChain
198
+ .pipe(rerankDocs)
199
+ .withConfig({
200
+ runName: 'FinalSourceRetriever',
201
+ })
202
+ .pipe(processDocs),
203
+ ]),
204
+ }),
205
+ ChatPromptTemplate.fromMessages([
206
+ ['system', basicYoutubeSearchResponsePrompt],
207
+ new MessagesPlaceholder('chat_history'),
208
+ ['user', '{query}'],
209
+ ]),
210
+ llm,
211
+ strParser,
212
+ ]).withConfig({
213
+ runName: 'FinalResponseGenerator',
214
+ });
215
+ };
216
+
217
+ const basicYoutubeSearch = (
218
+ query: string,
219
+ history: BaseMessage[],
220
+ llm: BaseChatModel,
221
+ embeddings: Embeddings,
222
+ ) => {
223
+ const emitter = new eventEmitter();
224
+
225
+ try {
226
+ const basicYoutubeSearchAnsweringChain =
227
+ createBasicYoutubeSearchAnsweringChain(llm, embeddings);
228
+
229
+ const stream = basicYoutubeSearchAnsweringChain.streamEvents(
230
+ {
231
+ chat_history: history,
232
+ query: query,
233
+ },
234
+ {
235
+ version: 'v1',
236
+ },
237
+ );
238
+
239
+ handleStream(stream, emitter);
240
+ } catch (err) {
241
+ emitter.emit(
242
+ 'error',
243
+ JSON.stringify({ data: 'An error has occurred please try again later' }),
244
+ );
245
+ logger.error(`Error in youtube search: ${err}`);
246
+ }
247
+
248
+ return emitter;
249
+ };
250
+
251
+ const handleYoutubeSearch = (
252
+ message: string,
253
+ history: BaseMessage[],
254
+ llm: BaseChatModel,
255
+ embeddings: Embeddings,
256
+ ) => {
257
+ const emitter = basicYoutubeSearch(message, history, llm, embeddings);
258
+ return emitter;
259
+ };
260
+
261
+ export default handleYoutubeSearch;
src/app.ts ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { startWebSocketServer } from './websocket';
2
+ import express from 'express';
3
+ import cors from 'cors';
4
+ import http from 'http';
5
+ import routes from './routes';
6
+ import { getPort } from './config';
7
+ import logger from './utils/logger';
8
+
9
+ const port = getPort();
10
+
11
+ const app = express();
12
+ const server = http.createServer(app);
13
+
14
+ const corsOptions = {
15
+ origin: '*',
16
+ };
17
+
18
+ app.use(cors(corsOptions));
19
+ app.use(express.json());
20
+
21
+ app.use('/api', routes);
22
+ app.get('/api', (_, res) => {
23
+ res.status(200).json({ status: 'ok' });
24
+ });
25
+
26
+ server.listen(port, () => {
27
+ logger.info(`Server is running on port ${port}`);
28
+ });
29
+
30
+ startWebSocketServer(server);
src/config.ts ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import fs from 'fs';
2
+ import path from 'path';
3
+ import toml from '@iarna/toml';
4
+
5
+ const configFileName = 'config.toml';
6
+
7
+ interface Config {
8
+ GENERAL: {
9
+ PORT: number;
10
+ SIMILARITY_MEASURE: string;
11
+ };
12
+ API_KEYS: {
13
+ OPENAI: string;
14
+ GROQ: string;
15
+ };
16
+ API_ENDPOINTS: {
17
+ SEARXNG: string;
18
+ OLLAMA: string;
19
+ };
20
+ }
21
+
22
+ type RecursivePartial<T> = {
23
+ [P in keyof T]?: RecursivePartial<T[P]>;
24
+ };
25
+
26
+ const loadConfig = () =>
27
+ toml.parse(
28
+ fs.readFileSync(path.join(__dirname, `../${configFileName}`), 'utf-8'),
29
+ ) as any as Config;
30
+
31
+ export const getPort = () => loadConfig().GENERAL.PORT;
32
+
33
+ export const getSimilarityMeasure = () =>
34
+ loadConfig().GENERAL.SIMILARITY_MEASURE;
35
+
36
+ export const getOpenaiApiKey = () => loadConfig().API_KEYS.OPENAI;
37
+
38
+ export const getGroqApiKey = () => loadConfig().API_KEYS.GROQ;
39
+
40
+ export const getSearxngApiEndpoint = () => loadConfig().API_ENDPOINTS.SEARXNG;
41
+
42
+ export const getOllamaApiEndpoint = () => loadConfig().API_ENDPOINTS.OLLAMA;
43
+
44
+ export const updateConfig = (config: RecursivePartial<Config>) => {
45
+ const currentConfig = loadConfig();
46
+
47
+ for (const key in currentConfig) {
48
+ if (!config[key]) config[key] = {};
49
+
50
+ if (typeof currentConfig[key] === 'object' && currentConfig[key] !== null) {
51
+ for (const nestedKey in currentConfig[key]) {
52
+ if (
53
+ !config[key][nestedKey] &&
54
+ currentConfig[key][nestedKey] &&
55
+ config[key][nestedKey] !== ''
56
+ ) {
57
+ config[key][nestedKey] = currentConfig[key][nestedKey];
58
+ }
59
+ }
60
+ } else if (currentConfig[key] && config[key] !== '') {
61
+ config[key] = currentConfig[key];
62
+ }
63
+ }
64
+
65
+ fs.writeFileSync(
66
+ path.join(__dirname, `../${configFileName}`),
67
+ toml.stringify(config),
68
+ );
69
+ };
src/lib/providers.ts ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
2
+ import { ChatOllama } from '@langchain/community/chat_models/ollama';
3
+ import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
4
+ import {
5
+ getGroqApiKey,
6
+ getOllamaApiEndpoint,
7
+ getOpenaiApiKey,
8
+ } from '../config';
9
+ import logger from '../utils/logger';
10
+
11
+ export const getAvailableChatModelProviders = async () => {
12
+ const openAIApiKey = getOpenaiApiKey();
13
+ const groqApiKey = getGroqApiKey();
14
+ const ollamaEndpoint = getOllamaApiEndpoint();
15
+
16
+ const models = {};
17
+
18
+ if (openAIApiKey) {
19
+ try {
20
+ models['openai'] = {
21
+ 'GPT-3.5 turbo': new ChatOpenAI({
22
+ openAIApiKey,
23
+ modelName: 'gpt-3.5-turbo',
24
+ temperature: 0.7,
25
+ }),
26
+ 'GPT-4': new ChatOpenAI({
27
+ openAIApiKey,
28
+ modelName: 'gpt-4',
29
+ temperature: 0.7,
30
+ }),
31
+ 'GPT-4 turbo': new ChatOpenAI({
32
+ openAIApiKey,
33
+ modelName: 'gpt-4-turbo',
34
+ temperature: 0.7,
35
+ }),
36
+ };
37
+ } catch (err) {
38
+ logger.error(`Error loading OpenAI models: ${err}`);
39
+ }
40
+ }
41
+
42
+ if (groqApiKey) {
43
+ try {
44
+ models['groq'] = {
45
+ 'LLaMA3 8b': new ChatOpenAI(
46
+ {
47
+ openAIApiKey: groqApiKey,
48
+ modelName: 'llama3-8b-8192',
49
+ temperature: 0.7,
50
+ },
51
+ {
52
+ baseURL: 'https://api.groq.com/openai/v1',
53
+ },
54
+ ),
55
+ 'LLaMA3 70b': new ChatOpenAI(
56
+ {
57
+ openAIApiKey: groqApiKey,
58
+ modelName: 'llama3-70b-8192',
59
+ temperature: 0.7,
60
+ },
61
+ {
62
+ baseURL: 'https://api.groq.com/openai/v1',
63
+ },
64
+ ),
65
+ 'Mixtral 8x7b': new ChatOpenAI(
66
+ {
67
+ openAIApiKey: groqApiKey,
68
+ modelName: 'mixtral-8x7b-32768',
69
+ temperature: 0.7,
70
+ },
71
+ {
72
+ baseURL: 'https://api.groq.com/openai/v1',
73
+ },
74
+ ),
75
+ 'Gemma 7b': new ChatOpenAI(
76
+ {
77
+ openAIApiKey: groqApiKey,
78
+ modelName: 'gemma-7b-it',
79
+ temperature: 0.7,
80
+ },
81
+ {
82
+ baseURL: 'https://api.groq.com/openai/v1',
83
+ },
84
+ ),
85
+ };
86
+ } catch (err) {
87
+ logger.error(`Error loading Groq models: ${err}`);
88
+ }
89
+ }
90
+
91
+ if (ollamaEndpoint) {
92
+ try {
93
+ const response = await fetch(`${ollamaEndpoint}/api/tags`);
94
+
95
+ const { models: ollamaModels } = (await response.json()) as any;
96
+
97
+ models['ollama'] = ollamaModels.reduce((acc, model) => {
98
+ acc[model.model] = new ChatOllama({
99
+ baseUrl: ollamaEndpoint,
100
+ model: model.model,
101
+ temperature: 0.7,
102
+ });
103
+ return acc;
104
+ }, {});
105
+ } catch (err) {
106
+ logger.error(`Error loading Ollama models: ${err}`);
107
+ }
108
+ }
109
+
110
+ models['custom_openai'] = {};
111
+
112
+ return models;
113
+ };
114
+
115
+ export const getAvailableEmbeddingModelProviders = async () => {
116
+ const openAIApiKey = getOpenaiApiKey();
117
+ const ollamaEndpoint = getOllamaApiEndpoint();
118
+
119
+ const models = {};
120
+
121
+ if (openAIApiKey) {
122
+ try {
123
+ models['openai'] = {
124
+ 'Text embedding 3 small': new OpenAIEmbeddings({
125
+ openAIApiKey,
126
+ modelName: 'text-embedding-3-small',
127
+ }),
128
+ 'Text embedding 3 large': new OpenAIEmbeddings({
129
+ openAIApiKey,
130
+ modelName: 'text-embedding-3-large',
131
+ }),
132
+ };
133
+ } catch (err) {
134
+ logger.error(`Error loading OpenAI embeddings: ${err}`);
135
+ }
136
+ }
137
+
138
+ if (ollamaEndpoint) {
139
+ try {
140
+ const response = await fetch(`${ollamaEndpoint}/api/tags`);
141
+
142
+ const { models: ollamaModels } = (await response.json()) as any;
143
+
144
+ models['ollama'] = ollamaModels.reduce((acc, model) => {
145
+ acc[model.model] = new OllamaEmbeddings({
146
+ baseUrl: ollamaEndpoint,
147
+ model: model.model,
148
+ });
149
+ return acc;
150
+ }, {});
151
+ } catch (err) {
152
+ logger.error(`Error loading Ollama embeddings: ${err}`);
153
+ }
154
+ }
155
+
156
+ return models;
157
+ };
src/lib/searxng.ts ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import axios from 'axios';
2
+ import { getSearxngApiEndpoint } from '../config';
3
+
4
+ interface SearxngSearchOptions {
5
+ categories?: string[];
6
+ engines?: string[];
7
+ language?: string;
8
+ pageno?: number;
9
+ }
10
+
11
+ interface SearxngSearchResult {
12
+ title: string;
13
+ url: string;
14
+ img_src?: string;
15
+ thumbnail_src?: string;
16
+ thumbnail?: string;
17
+ content?: string;
18
+ author?: string;
19
+ iframe_src?: string;
20
+ }
21
+
22
+ export const searchSearxng = async (
23
+ query: string,
24
+ opts?: SearxngSearchOptions,
25
+ ) => {
26
+ const searxngURL = getSearxngApiEndpoint();
27
+
28
+ const url = new URL(`${searxngURL}/search?format=json`);
29
+ url.searchParams.append('q', query);
30
+
31
+ if (opts) {
32
+ Object.keys(opts).forEach((key) => {
33
+ if (Array.isArray(opts[key])) {
34
+ url.searchParams.append(key, opts[key].join(','));
35
+ return;
36
+ }
37
+ url.searchParams.append(key, opts[key]);
38
+ });
39
+ }
40
+
41
+ const res = await axios.get(url.toString());
42
+
43
+ const results: SearxngSearchResult[] = res.data.results;
44
+ const suggestions: string[] = res.data.suggestions;
45
+
46
+ return { results, suggestions };
47
+ };
src/routes/config.ts ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import express from 'express';
2
+ import {
3
+ getAvailableChatModelProviders,
4
+ getAvailableEmbeddingModelProviders,
5
+ } from '../lib/providers';
6
+ import {
7
+ getGroqApiKey,
8
+ getOllamaApiEndpoint,
9
+ getOpenaiApiKey,
10
+ updateConfig,
11
+ } from '../config';
12
+
13
+ const router = express.Router();
14
+
15
+ router.get('/', async (_, res) => {
16
+ const config = {};
17
+
18
+ const [chatModelProviders, embeddingModelProviders] = await Promise.all([
19
+ getAvailableChatModelProviders(),
20
+ getAvailableEmbeddingModelProviders(),
21
+ ]);
22
+
23
+ config['chatModelProviders'] = {};
24
+ config['embeddingModelProviders'] = {};
25
+
26
+ for (const provider in chatModelProviders) {
27
+ config['chatModelProviders'][provider] = Object.keys(
28
+ chatModelProviders[provider],
29
+ );
30
+ }
31
+
32
+ for (const provider in embeddingModelProviders) {
33
+ config['embeddingModelProviders'][provider] = Object.keys(
34
+ embeddingModelProviders[provider],
35
+ );
36
+ }
37
+
38
+ config['openaiApiKey'] = getOpenaiApiKey();
39
+ config['ollamaApiUrl'] = getOllamaApiEndpoint();
40
+ config['groqApiKey'] = getGroqApiKey();
41
+
42
+ res.status(200).json(config);
43
+ });
44
+
45
+ router.post('/', async (req, res) => {
46
+ const config = req.body;
47
+
48
+ const updatedConfig = {
49
+ API_KEYS: {
50
+ OPENAI: config.openaiApiKey,
51
+ GROQ: config.groqApiKey,
52
+ },
53
+ API_ENDPOINTS: {
54
+ OLLAMA: config.ollamaApiUrl,
55
+ },
56
+ };
57
+
58
+ updateConfig(updatedConfig);
59
+
60
+ res.status(200).json({ message: 'Config updated' });
61
+ });
62
+
63
+ export default router;
src/routes/images.ts ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import express from 'express';
2
+ import handleImageSearch from '../agents/imageSearchAgent';
3
+ import { BaseChatModel } from '@langchain/core/language_models/chat_models';
4
+ import { getAvailableChatModelProviders } from '../lib/providers';
5
+ import { HumanMessage, AIMessage } from '@langchain/core/messages';
6
+ import logger from '../utils/logger';
7
+
8
+ const router = express.Router();
9
+
10
+ router.post('/', async (req, res) => {
11
+ try {
12
+ let { query, chat_history, chat_model_provider, chat_model } = req.body;
13
+
14
+ chat_history = chat_history.map((msg: any) => {
15
+ if (msg.role === 'user') {
16
+ return new HumanMessage(msg.content);
17
+ } else if (msg.role === 'assistant') {
18
+ return new AIMessage(msg.content);
19
+ }
20
+ });
21
+
22
+ const chatModels = await getAvailableChatModelProviders();
23
+ const provider = chat_model_provider || Object.keys(chatModels)[0];
24
+ const chatModel = chat_model || Object.keys(chatModels[provider])[0];
25
+
26
+ let llm: BaseChatModel | undefined;
27
+
28
+ if (chatModels[provider] && chatModels[provider][chatModel]) {
29
+ llm = chatModels[provider][chatModel] as BaseChatModel | undefined;
30
+ }
31
+
32
+ if (!llm) {
33
+ res.status(500).json({ message: 'Invalid LLM model selected' });
34
+ return;
35
+ }
36
+
37
+ const images = await handleImageSearch({ query, chat_history }, llm);
38
+
39
+ res.status(200).json({ images });
40
+ } catch (err) {
41
+ res.status(500).json({ message: 'An error has occurred.' });
42
+ logger.error(`Error in image search: ${err.message}`);
43
+ }
44
+ });
45
+
46
+ export default router;
src/routes/index.ts ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import express from 'express';
2
+ import imagesRouter from './images';
3
+ import videosRouter from './videos';
4
+ import configRouter from './config';
5
+ import modelsRouter from './models';
6
+
7
+ const router = express.Router();
8
+
9
+ router.use('/images', imagesRouter);
10
+ router.use('/videos', videosRouter);
11
+ router.use('/config', configRouter);
12
+ router.use('/models', modelsRouter);
13
+
14
+ export default router;
src/routes/models.ts ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import express from 'express';
2
+ import logger from '../utils/logger';
3
+ import {
4
+ getAvailableChatModelProviders,
5
+ getAvailableEmbeddingModelProviders,
6
+ } from '../lib/providers';
7
+
8
+ const router = express.Router();
9
+
10
+ router.get('/', async (req, res) => {
11
+ try {
12
+ const [chatModelProviders, embeddingModelProviders] = await Promise.all([
13
+ getAvailableChatModelProviders(),
14
+ getAvailableEmbeddingModelProviders(),
15
+ ]);
16
+
17
+ res.status(200).json({ chatModelProviders, embeddingModelProviders });
18
+ } catch (err) {
19
+ res.status(500).json({ message: 'An error has occurred.' });
20
+ logger.error(err.message);
21
+ }
22
+ });
23
+
24
+ export default router;
src/routes/videos.ts ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import express from 'express';
2
+ import { BaseChatModel } from '@langchain/core/language_models/chat_models';
3
+ import { getAvailableChatModelProviders } from '../lib/providers';
4
+ import { HumanMessage, AIMessage } from '@langchain/core/messages';
5
+ import logger from '../utils/logger';
6
+ import handleVideoSearch from '../agents/videoSearchAgent';
7
+
8
+ const router = express.Router();
9
+
10
+ router.post('/', async (req, res) => {
11
+ try {
12
+ let { query, chat_history, chat_model_provider, chat_model } = req.body;
13
+
14
+ chat_history = chat_history.map((msg: any) => {
15
+ if (msg.role === 'user') {
16
+ return new HumanMessage(msg.content);
17
+ } else if (msg.role === 'assistant') {
18
+ return new AIMessage(msg.content);
19
+ }
20
+ });
21
+
22
+ const chatModels = await getAvailableChatModelProviders();
23
+ const provider = chat_model_provider || Object.keys(chatModels)[0];
24
+ const chatModel = chat_model || Object.keys(chatModels[provider])[0];
25
+
26
+ let llm: BaseChatModel | undefined;
27
+
28
+ if (chatModels[provider] && chatModels[provider][chatModel]) {
29
+ llm = chatModels[provider][chatModel] as BaseChatModel | undefined;
30
+ }
31
+
32
+ if (!llm) {
33
+ res.status(500).json({ message: 'Invalid LLM model selected' });
34
+ return;
35
+ }
36
+
37
+ const videos = await handleVideoSearch({ chat_history, query }, llm);
38
+
39
+ res.status(200).json({ videos });
40
+ } catch (err) {
41
+ res.status(500).json({ message: 'An error has occurred.' });
42
+ logger.error(`Error in video search: ${err.message}`);
43
+ }
44
+ });
45
+
46
+ export default router;
src/utils/computeSimilarity.ts ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dot from 'compute-dot';
2
+ import cosineSimilarity from 'compute-cosine-similarity';
3
+ import { getSimilarityMeasure } from '../config';
4
+
5
+ const computeSimilarity = (x: number[], y: number[]): number => {
6
+ const similarityMeasure = getSimilarityMeasure();
7
+
8
+ if (similarityMeasure === 'cosine') {
9
+ return cosineSimilarity(x, y);
10
+ } else if (similarityMeasure === 'dot') {
11
+ return dot(x, y);
12
+ }
13
+
14
+ throw new Error('Invalid similarity measure');
15
+ };
16
+
17
+ export default computeSimilarity;
src/utils/formatHistory.ts ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import { BaseMessage } from '@langchain/core/messages';
2
+
3
+ const formatChatHistoryAsString = (history: BaseMessage[]) => {
4
+ return history
5
+ .map((message) => `${message._getType()}: ${message.content}`)
6
+ .join('\n');
7
+ };
8
+
9
+ export default formatChatHistoryAsString;
src/utils/logger.ts ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import winston from 'winston';
2
+
3
+ const logger = winston.createLogger({
4
+ level: 'info',
5
+ transports: [
6
+ new winston.transports.Console({
7
+ format: winston.format.combine(
8
+ winston.format.colorize(),
9
+ winston.format.simple(),
10
+ ),
11
+ }),
12
+ new winston.transports.File({
13
+ filename: 'app.log',
14
+ format: winston.format.combine(
15
+ winston.format.timestamp(),
16
+ winston.format.json(),
17
+ ),
18
+ }),
19
+ ],
20
+ });
21
+
22
+ export default logger;
src/websocket/connectionManager.ts ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { WebSocket } from 'ws';
2
+ import { handleMessage } from './messageHandler';
3
+ import {
4
+ getAvailableEmbeddingModelProviders,
5
+ getAvailableChatModelProviders,
6
+ } from '../lib/providers';
7
+ import { BaseChatModel } from '@langchain/core/language_models/chat_models';
8
+ import type { Embeddings } from '@langchain/core/embeddings';
9
+ import type { IncomingMessage } from 'http';
10
+ import logger from '../utils/logger';
11
+ import { ChatOpenAI } from '@langchain/openai';
12
+
13
+ export const handleConnection = async (
14
+ ws: WebSocket,
15
+ request: IncomingMessage,
16
+ ) => {
17
+ const searchParams = new URL(request.url, `http://${request.headers.host}`)
18
+ .searchParams;
19
+
20
+ const [chatModelProviders, embeddingModelProviders] = await Promise.all([
21
+ getAvailableChatModelProviders(),
22
+ getAvailableEmbeddingModelProviders(),
23
+ ]);
24
+
25
+ const chatModelProvider =
26
+ searchParams.get('chatModelProvider') || Object.keys(chatModelProviders)[0];
27
+ const chatModel =
28
+ searchParams.get('chatModel') ||
29
+ Object.keys(chatModelProviders[chatModelProvider])[0];
30
+
31
+ const embeddingModelProvider =
32
+ searchParams.get('embeddingModelProvider') ||
33
+ Object.keys(embeddingModelProviders)[0];
34
+ const embeddingModel =
35
+ searchParams.get('embeddingModel') ||
36
+ Object.keys(embeddingModelProviders[embeddingModelProvider])[0];
37
+
38
+ let llm: BaseChatModel | undefined;
39
+ let embeddings: Embeddings | undefined;
40
+
41
+ if (
42
+ chatModelProviders[chatModelProvider] &&
43
+ chatModelProviders[chatModelProvider][chatModel] &&
44
+ chatModelProvider != 'custom_openai'
45
+ ) {
46
+ llm = chatModelProviders[chatModelProvider][chatModel] as
47
+ | BaseChatModel
48
+ | undefined;
49
+ } else if (chatModelProvider == 'custom_openai') {
50
+ llm = new ChatOpenAI({
51
+ modelName: chatModel,
52
+ openAIApiKey: searchParams.get('openAIApiKey'),
53
+ temperature: 0.7,
54
+ configuration: {
55
+ baseURL: searchParams.get('openAIBaseURL'),
56
+ },
57
+ });
58
+ }
59
+
60
+ if (
61
+ embeddingModelProviders[embeddingModelProvider] &&
62
+ embeddingModelProviders[embeddingModelProvider][embeddingModel]
63
+ ) {
64
+ embeddings = embeddingModelProviders[embeddingModelProvider][
65
+ embeddingModel
66
+ ] as Embeddings | undefined;
67
+ }
68
+
69
+ if (!llm || !embeddings) {
70
+ ws.send(
71
+ JSON.stringify({
72
+ type: 'error',
73
+ data: 'Invalid LLM or embeddings model selected',
74
+ }),
75
+ );
76
+ ws.close();
77
+ }
78
+
79
+ ws.on(
80
+ 'message',
81
+ async (message) =>
82
+ await handleMessage(message.toString(), ws, llm, embeddings),
83
+ );
84
+
85
+ ws.on('close', () => logger.debug('Connection closed'));
86
+ };
src/websocket/index.ts ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ import { initServer } from './websocketServer';
2
+ import http from 'http';
3
+
4
+ export const startWebSocketServer = (
5
+ server: http.Server<typeof http.IncomingMessage, typeof http.ServerResponse>,
6
+ ) => {
7
+ initServer(server);
8
+ };
src/websocket/messageHandler.ts ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { EventEmitter, WebSocket } from 'ws';
2
+ import { BaseMessage, AIMessage, HumanMessage } from '@langchain/core/messages';
3
+ import handleWebSearch from '../agents/webSearchAgent';
4
+ import handleAcademicSearch from '../agents/academicSearchAgent';
5
+ import handleWritingAssistant from '../agents/writingAssistant';
6
+ import handleWolframAlphaSearch from '../agents/wolframAlphaSearchAgent';
7
+ import handleYoutubeSearch from '../agents/youtubeSearchAgent';
8
+ import handleRedditSearch from '../agents/redditSearchAgent';
9
+ import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
10
+ import type { Embeddings } from '@langchain/core/embeddings';
11
+ import logger from '../utils/logger';
12
+
13
+ type Message = {
14
+ type: string;
15
+ content: string;
16
+ copilot: boolean;
17
+ focusMode: string;
18
+ history: Array<[string, string]>;
19
+ };
20
+
21
+ const searchHandlers = {
22
+ webSearch: handleWebSearch,
23
+ academicSearch: handleAcademicSearch,
24
+ writingAssistant: handleWritingAssistant,
25
+ wolframAlphaSearch: handleWolframAlphaSearch,
26
+ youtubeSearch: handleYoutubeSearch,
27
+ redditSearch: handleRedditSearch,
28
+ };
29
+
30
+ const handleEmitterEvents = (
31
+ emitter: EventEmitter,
32
+ ws: WebSocket,
33
+ id: string,
34
+ ) => {
35
+ emitter.on('data', (data) => {
36
+ const parsedData = JSON.parse(data);
37
+ if (parsedData.type === 'response') {
38
+ ws.send(
39
+ JSON.stringify({
40
+ type: 'message',
41
+ data: parsedData.data,
42
+ messageId: id,
43
+ }),
44
+ );
45
+ } else if (parsedData.type === 'sources') {
46
+ ws.send(
47
+ JSON.stringify({
48
+ type: 'sources',
49
+ data: parsedData.data,
50
+ messageId: id,
51
+ }),
52
+ );
53
+ }
54
+ });
55
+ emitter.on('end', () => {
56
+ ws.send(JSON.stringify({ type: 'messageEnd', messageId: id }));
57
+ });
58
+ emitter.on('error', (data) => {
59
+ const parsedData = JSON.parse(data);
60
+ ws.send(JSON.stringify({ type: 'error', data: parsedData.data }));
61
+ });
62
+ };
63
+
64
+ export const handleMessage = async (
65
+ message: string,
66
+ ws: WebSocket,
67
+ llm: BaseChatModel,
68
+ embeddings: Embeddings,
69
+ ) => {
70
+ try {
71
+ const parsedMessage = JSON.parse(message) as Message;
72
+ const id = Math.random().toString(36).substring(7);
73
+
74
+ if (!parsedMessage.content)
75
+ return ws.send(
76
+ JSON.stringify({ type: 'error', data: 'Invalid message format' }),
77
+ );
78
+
79
+ const history: BaseMessage[] = parsedMessage.history.map((msg) => {
80
+ if (msg[0] === 'human') {
81
+ return new HumanMessage({
82
+ content: msg[1],
83
+ });
84
+ } else {
85
+ return new AIMessage({
86
+ content: msg[1],
87
+ });
88
+ }
89
+ });
90
+
91
+ if (parsedMessage.type === 'message') {
92
+ const handler = searchHandlers[parsedMessage.focusMode];
93
+ if (handler) {
94
+ const emitter = handler(
95
+ parsedMessage.content,
96
+ history,
97
+ llm,
98
+ embeddings,
99
+ );
100
+ handleEmitterEvents(emitter, ws, id);
101
+ } else {
102
+ ws.send(JSON.stringify({ type: 'error', data: 'Invalid focus mode' }));
103
+ }
104
+ }
105
+ } catch (err) {
106
+ ws.send(JSON.stringify({ type: 'error', data: 'Invalid message format' }));
107
+ logger.error(`Failed to handle message: ${err}`);
108
+ }
109
+ };
src/websocket/websocketServer.ts ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { WebSocketServer } from 'ws';
2
+ import { handleConnection } from './connectionManager';
3
+ import http from 'http';
4
+ import { getPort } from '../config';
5
+ import logger from '../utils/logger';
6
+
7
+ export const initServer = (
8
+ server: http.Server<typeof http.IncomingMessage, typeof http.ServerResponse>,
9
+ ) => {
10
+ const port = getPort();
11
+ const wss = new WebSocketServer({ server });
12
+
13
+ wss.on('connection', handleConnection);
14
+
15
+ logger.info(`WebSocket server started on port ${port}`);
16
+ };
ui/.env.example ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ NEXT_PUBLIC_WS_URL=ws://localhost:3001
2
+ NEXT_PUBLIC_API_URL=http://localhost:3001/api
ui/.eslintrc.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "extends": "next/core-web-vitals"
3
+ }
ui/.gitignore ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dependencies
2
+ /node_modules
3
+ /.pnp
4
+ .pnp.js
5
+ .yarn/install-state.gz
6
+
7
+ # testing
8
+ /coverage
9
+
10
+ # next.js
11
+ /.next/
12
+ /out/
13
+
14
+ # production
15
+ /build
16
+
17
+ # misc
18
+ .DS_Store
19
+ *.pem
20
+
21
+ # debug
22
+ npm-debug.log*
23
+ yarn-debug.log*
24
+ yarn-error.log*
25
+
26
+ # local env files
27
+ .env*.local
28
+
29
+ # vercel
30
+ .vercel
31
+
32
+ # typescript
33
+ *.tsbuildinfo
34
+ next-env.d.ts
ui/.prettierrc.js ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /** @type {import("prettier").Config} */
2
+
3
+ const config = {
4
+ printWidth: 80,
5
+ trailingComma: 'all',
6
+ endOfLine: 'auto',
7
+ singleQuote: true,
8
+ tabWidth: 2,
9
+ };
10
+
11
+ module.exports = config;
ui/app/discover/page.tsx ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ const Page = () => {
2
+ return <div>page</div>;
3
+ };
4
+
5
+ export default Page;
ui/app/favicon.ico ADDED
ui/app/globals.css ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @tailwind base;
2
+ @tailwind components;
3
+ @tailwind utilities;
4
+
5
+ @layer base {
6
+ .overflow-hidden-scrollable {
7
+ -ms-overflow-style: none;
8
+ }
9
+
10
+ .overflow-hidden-scrollable::-webkit-scrollbar {
11
+ display: none;
12
+ }
13
+ }
ui/app/layout.tsx ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import type { Metadata } from 'next';
2
+ import { Montserrat } from 'next/font/google';
3
+ import './globals.css';
4
+ import { cn } from '@/lib/utils';
5
+ import Sidebar from '@/components/Sidebar';
6
+ import { Toaster } from 'sonner';
7
+
8
+ const montserrat = Montserrat({
9
+ weight: ['300', '400', '500', '700'],
10
+ subsets: ['latin'],
11
+ display: 'swap',
12
+ fallback: ['Arial', 'sans-serif'],
13
+ });
14
+
15
+ export const metadata: Metadata = {
16
+ title: 'Perplexica - Chat with the internet',
17
+ description:
18
+ 'Perplexica is an AI powered chatbot that is connected to the internet.',
19
+ };
20
+
21
+ export default function RootLayout({
22
+ children,
23
+ }: Readonly<{
24
+ children: React.ReactNode;
25
+ }>) {
26
+ return (
27
+ <html className="h-full" lang="en">
28
+ <body className={cn('h-full', montserrat.className)}>
29
+ <Sidebar>{children}</Sidebar>
30
+ <Toaster
31
+ toastOptions={{
32
+ unstyled: true,
33
+ classNames: {
34
+ toast:
35
+ 'bg-[#111111] text-white rounded-lg p-4 flex flex-row items-center space-x-2',
36
+ },
37
+ }}
38
+ />
39
+ </body>
40
+ </html>
41
+ );
42
+ }
ui/app/page.tsx ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ChatWindow from '@/components/ChatWindow';
2
+ import { Metadata } from 'next';
3
+
4
+ export const metadata: Metadata = {
5
+ title: 'Chat - Perplexica',
6
+ description: 'Chat with the internet, chat with Perplexica.',
7
+ };
8
+
9
+ const Home = () => {
10
+ return (
11
+ <div>
12
+ <ChatWindow />
13
+ </div>
14
+ );
15
+ };
16
+
17
+ export default Home;
ui/components/Chat.tsx ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 'use client';
2
+
3
+ import { useEffect, useRef, useState } from 'react';
4
+ import MessageInput from './MessageInput';
5
+ import { Message } from './ChatWindow';
6
+ import MessageBox from './MessageBox';
7
+ import MessageBoxLoading from './MessageBoxLoading';
8
+
9
+ const Chat = ({
10
+ loading,
11
+ messages,
12
+ sendMessage,
13
+ messageAppeared,
14
+ rewrite,
15
+ }: {
16
+ messages: Message[];
17
+ sendMessage: (message: string) => void;
18
+ loading: boolean;
19
+ messageAppeared: boolean;
20
+ rewrite: (messageId: string) => void;
21
+ }) => {
22
+ const [dividerWidth, setDividerWidth] = useState(0);
23
+ const dividerRef = useRef<HTMLDivElement | null>(null);
24
+ const messageEnd = useRef<HTMLDivElement | null>(null);
25
+
26
+ useEffect(() => {
27
+ const updateDividerWidth = () => {
28
+ if (dividerRef.current) {
29
+ setDividerWidth(dividerRef.current.scrollWidth);
30
+ }
31
+ };
32
+
33
+ updateDividerWidth();
34
+
35
+ window.addEventListener('resize', updateDividerWidth);
36
+
37
+ return () => {
38
+ window.removeEventListener('resize', updateDividerWidth);
39
+ };
40
+ });
41
+
42
+ useEffect(() => {
43
+ messageEnd.current?.scrollIntoView({ behavior: 'smooth' });
44
+
45
+ if (messages.length === 1) {
46
+ document.title = `${messages[0].content.substring(0, 30)} - Perplexica`;
47
+ }
48
+ }, [messages]);
49
+
50
+ return (
51
+ <div className="flex flex-col space-y-6 pt-8 pb-44 lg:pb-32 sm:mx-4 md:mx-8">
52
+ {messages.map((msg, i) => {
53
+ const isLast = i === messages.length - 1;
54
+
55
+ return (
56
+ <>
57
+ <MessageBox
58
+ key={i}
59
+ message={msg}
60
+ messageIndex={i}
61
+ history={messages}
62
+ loading={loading}
63
+ dividerRef={isLast ? dividerRef : undefined}
64
+ isLast={isLast}
65
+ rewrite={rewrite}
66
+ />
67
+ {!isLast && msg.role === 'assistant' && (
68
+ <div className="h-px w-full bg-[#1C1C1C]" />
69
+ )}
70
+ </>
71
+ );
72
+ })}
73
+ {loading && !messageAppeared && <MessageBoxLoading />}
74
+ <div ref={messageEnd} className="h-0" />
75
+ {dividerWidth > 0 && (
76
+ <div
77
+ className="bottom-24 lg:bottom-10 fixed z-40"
78
+ style={{ width: dividerWidth }}
79
+ >
80
+ <MessageInput loading={loading} sendMessage={sendMessage} />
81
+ </div>
82
+ )}
83
+ </div>
84
+ );
85
+ };
86
+
87
+ export default Chat;
ui/components/ChatWindow.tsx ADDED
@@ -0,0 +1,293 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 'use client';
2
+
3
+ import { useEffect, useState } from 'react';
4
+ import { Document } from '@langchain/core/documents';
5
+ import Navbar from './Navbar';
6
+ import Chat from './Chat';
7
+ import EmptyChat from './EmptyChat';
8
+ import { toast } from 'sonner';
9
+
10
+ export type Message = {
11
+ id: string;
12
+ createdAt: Date;
13
+ content: string;
14
+ role: 'user' | 'assistant';
15
+ sources?: Document[];
16
+ };
17
+
18
+ const useSocket = (url: string) => {
19
+ const [ws, setWs] = useState<WebSocket | null>(null);
20
+
21
+ useEffect(() => {
22
+ if (!ws) {
23
+ const connectWs = async () => {
24
+ let chatModel = localStorage.getItem('chatModel');
25
+ let chatModelProvider = localStorage.getItem('chatModelProvider');
26
+ let embeddingModel = localStorage.getItem('embeddingModel');
27
+ let embeddingModelProvider = localStorage.getItem(
28
+ 'embeddingModelProvider',
29
+ );
30
+
31
+ if (
32
+ !chatModel ||
33
+ !chatModelProvider ||
34
+ !embeddingModel ||
35
+ !embeddingModelProvider
36
+ ) {
37
+ const providers = await fetch(
38
+ `${process.env.NEXT_PUBLIC_API_URL}/models`,
39
+ {
40
+ headers: {
41
+ 'Content-Type': 'application/json',
42
+ },
43
+ },
44
+ ).then(async (res) => await res.json());
45
+
46
+ const chatModelProviders = providers.chatModelProviders;
47
+ const embeddingModelProviders = providers.embeddingModelProviders;
48
+
49
+ if (
50
+ !chatModelProviders ||
51
+ Object.keys(chatModelProviders).length === 0
52
+ )
53
+ return console.error('No chat models available');
54
+
55
+ if (
56
+ !embeddingModelProviders ||
57
+ Object.keys(embeddingModelProviders).length === 0
58
+ )
59
+ return console.error('No embedding models available');
60
+
61
+ chatModelProvider = Object.keys(chatModelProviders)[0];
62
+ chatModel = Object.keys(chatModelProviders[chatModelProvider])[0];
63
+
64
+ embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
65
+ embeddingModel = Object.keys(
66
+ embeddingModelProviders[embeddingModelProvider],
67
+ )[0];
68
+
69
+ localStorage.setItem('chatModel', chatModel!);
70
+ localStorage.setItem('chatModelProvider', chatModelProvider);
71
+ localStorage.setItem('embeddingModel', embeddingModel!);
72
+ localStorage.setItem(
73
+ 'embeddingModelProvider',
74
+ embeddingModelProvider,
75
+ );
76
+ }
77
+
78
+ const wsURL = new URL(url);
79
+ const searchParams = new URLSearchParams({});
80
+
81
+ searchParams.append('chatModel', chatModel!);
82
+ searchParams.append('chatModelProvider', chatModelProvider);
83
+
84
+ if (chatModelProvider === 'custom_openai') {
85
+ searchParams.append(
86
+ 'openAIApiKey',
87
+ localStorage.getItem('openAIApiKey')!,
88
+ );
89
+ searchParams.append(
90
+ 'openAIBaseURL',
91
+ localStorage.getItem('openAIBaseURL')!,
92
+ );
93
+ }
94
+
95
+ searchParams.append('embeddingModel', embeddingModel!);
96
+ searchParams.append('embeddingModelProvider', embeddingModelProvider);
97
+
98
+ wsURL.search = searchParams.toString();
99
+
100
+ const ws = new WebSocket(wsURL.toString());
101
+
102
+ ws.onopen = () => {
103
+ console.log('[DEBUG] open');
104
+ setWs(ws);
105
+ };
106
+
107
+ ws.onmessage = (e) => {
108
+ const parsedData = JSON.parse(e.data);
109
+ if (parsedData.type === 'error') {
110
+ toast.error(parsedData.data);
111
+ }
112
+ };
113
+ };
114
+
115
+ connectWs();
116
+ }
117
+
118
+ return () => {
119
+ ws?.close();
120
+ console.log('[DEBUG] closed');
121
+ };
122
+ }, [ws, url]);
123
+
124
+ return ws;
125
+ };
126
+
127
+ const ChatWindow = () => {
128
+ const ws = useSocket(process.env.NEXT_PUBLIC_WS_URL!);
129
+ const [chatHistory, setChatHistory] = useState<[string, string][]>([]);
130
+ const [messages, setMessages] = useState<Message[]>([]);
131
+ const [loading, setLoading] = useState(false);
132
+ const [messageAppeared, setMessageAppeared] = useState(false);
133
+ const [focusMode, setFocusMode] = useState('webSearch');
134
+
135
+ const sendMessage = async (message: string) => {
136
+ if (loading) return;
137
+ setLoading(true);
138
+ setMessageAppeared(false);
139
+
140
+ let sources: Document[] | undefined = undefined;
141
+ let recievedMessage = '';
142
+ let added = false;
143
+
144
+ ws?.send(
145
+ JSON.stringify({
146
+ type: 'message',
147
+ content: message,
148
+ focusMode: focusMode,
149
+ history: [...chatHistory, ['human', message]],
150
+ }),
151
+ );
152
+
153
+ setMessages((prevMessages) => [
154
+ ...prevMessages,
155
+ {
156
+ content: message,
157
+ id: Math.random().toString(36).substring(7),
158
+ role: 'user',
159
+ createdAt: new Date(),
160
+ },
161
+ ]);
162
+
163
+ const messageHandler = (e: MessageEvent) => {
164
+ const data = JSON.parse(e.data);
165
+
166
+ if (data.type === 'error') {
167
+ toast.error(data.data);
168
+ setLoading(false);
169
+ return;
170
+ }
171
+
172
+ if (data.type === 'sources') {
173
+ sources = data.data;
174
+ if (!added) {
175
+ setMessages((prevMessages) => [
176
+ ...prevMessages,
177
+ {
178
+ content: '',
179
+ id: data.messageId,
180
+ role: 'assistant',
181
+ sources: sources,
182
+ createdAt: new Date(),
183
+ },
184
+ ]);
185
+ added = true;
186
+ }
187
+ setMessageAppeared(true);
188
+ }
189
+
190
+ if (data.type === 'message') {
191
+ if (!added) {
192
+ setMessages((prevMessages) => [
193
+ ...prevMessages,
194
+ {
195
+ content: data.data,
196
+ id: data.messageId,
197
+ role: 'assistant',
198
+ sources: sources,
199
+ createdAt: new Date(),
200
+ },
201
+ ]);
202
+ added = true;
203
+ }
204
+
205
+ setMessages((prev) =>
206
+ prev.map((message) => {
207
+ if (message.id === data.messageId) {
208
+ return { ...message, content: message.content + data.data };
209
+ }
210
+
211
+ return message;
212
+ }),
213
+ );
214
+
215
+ recievedMessage += data.data;
216
+ setMessageAppeared(true);
217
+ }
218
+
219
+ if (data.type === 'messageEnd') {
220
+ setChatHistory((prevHistory) => [
221
+ ...prevHistory,
222
+ ['human', message],
223
+ ['assistant', recievedMessage],
224
+ ]);
225
+ ws?.removeEventListener('message', messageHandler);
226
+ setLoading(false);
227
+ }
228
+ };
229
+
230
+ ws?.addEventListener('message', messageHandler);
231
+ };
232
+
233
+ const rewrite = (messageId: string) => {
234
+ const index = messages.findIndex((msg) => msg.id === messageId);
235
+
236
+ if (index === -1) return;
237
+
238
+ const message = messages[index - 1];
239
+
240
+ setMessages((prev) => {
241
+ return [...prev.slice(0, messages.length > 2 ? index - 1 : 0)];
242
+ });
243
+ setChatHistory((prev) => {
244
+ return [...prev.slice(0, messages.length > 2 ? index - 1 : 0)];
245
+ });
246
+
247
+ sendMessage(message.content);
248
+ };
249
+
250
+ return ws ? (
251
+ <div>
252
+ {messages.length > 0 ? (
253
+ <>
254
+ <Navbar messages={messages} />
255
+ <Chat
256
+ loading={loading}
257
+ messages={messages}
258
+ sendMessage={sendMessage}
259
+ messageAppeared={messageAppeared}
260
+ rewrite={rewrite}
261
+ />
262
+ </>
263
+ ) : (
264
+ <EmptyChat
265
+ sendMessage={sendMessage}
266
+ focusMode={focusMode}
267
+ setFocusMode={setFocusMode}
268
+ />
269
+ )}
270
+ </div>
271
+ ) : (
272
+ <div className="flex flex-row items-center justify-center min-h-screen">
273
+ <svg
274
+ aria-hidden="true"
275
+ className="w-8 h-8 text-[#202020] animate-spin fill-[#ffffff3b]"
276
+ viewBox="0 0 100 101"
277
+ fill="none"
278
+ xmlns="http://www.w3.org/2000/svg"
279
+ >
280
+ <path
281
+ d="M100 50.5908C100 78.2051 77.6142 100.591 50 100.591C22.3858 100.591 0 78.2051 0 50.5908C0 22.9766 22.3858 0.59082 50 0.59082C77.6142 0.59082 100 22.9766 100 50.5908ZM9.08144 50.5908C9.08144 73.1895 27.4013 91.5094 50 91.5094C72.5987 91.5094 90.9186 73.1895 90.9186 50.5908C90.9186 27.9921 72.5987 9.67226 50 9.67226C27.4013 9.67226 9.08144 27.9921 9.08144 50.5908Z"
282
+ fill="currentColor"
283
+ />
284
+ <path
285
+ d="M93.9676 39.0409C96.393 38.4038 97.8624 35.9116 97.0079 33.5539C95.2932 28.8227 92.871 24.3692 89.8167 20.348C85.8452 15.1192 80.8826 10.7238 75.2124 7.41289C69.5422 4.10194 63.2754 1.94025 56.7698 1.05124C51.7666 0.367541 46.6976 0.446843 41.7345 1.27873C39.2613 1.69328 37.813 4.19778 38.4501 6.62326C39.0873 9.04874 41.5694 10.4717 44.0505 10.1071C47.8511 9.54855 51.7191 9.52689 55.5402 10.0491C60.8642 10.7766 65.9928 12.5457 70.6331 15.2552C75.2735 17.9648 79.3347 21.5619 82.5849 25.841C84.9175 28.9121 86.7997 32.2913 88.1811 35.8758C89.083 38.2158 91.5421 39.6781 93.9676 39.0409Z"
286
+ fill="currentFill"
287
+ />
288
+ </svg>
289
+ </div>
290
+ );
291
+ };
292
+
293
+ export default ChatWindow;
ui/components/EmptyChat.tsx ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import EmptyChatMessageInput from './EmptyChatMessageInput';
2
+
3
+ const EmptyChat = ({
4
+ sendMessage,
5
+ focusMode,
6
+ setFocusMode,
7
+ }: {
8
+ sendMessage: (message: string) => void;
9
+ focusMode: string;
10
+ setFocusMode: (mode: string) => void;
11
+ }) => {
12
+ return (
13
+ <div className="flex flex-col items-center justify-center min-h-screen max-w-screen-sm mx-auto p-2 space-y-8">
14
+ <h2 className="text-white/70 text-3xl font-medium -mt-8">
15
+ Research begins here.
16
+ </h2>
17
+ <EmptyChatMessageInput
18
+ sendMessage={sendMessage}
19
+ focusMode={focusMode}
20
+ setFocusMode={setFocusMode}
21
+ />
22
+ </div>
23
+ );
24
+ };
25
+
26
+ export default EmptyChat;
ui/components/EmptyChatMessageInput.tsx ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { ArrowRight } from 'lucide-react';
2
+ import { useState } from 'react';
3
+ import TextareaAutosize from 'react-textarea-autosize';
4
+ import { Attach, CopilotToggle, Focus } from './MessageInputActions';
5
+
6
+ const EmptyChatMessageInput = ({
7
+ sendMessage,
8
+ focusMode,
9
+ setFocusMode,
10
+ }: {
11
+ sendMessage: (message: string) => void;
12
+ focusMode: string;
13
+ setFocusMode: (mode: string) => void;
14
+ }) => {
15
+ const [copilotEnabled, setCopilotEnabled] = useState(false);
16
+ const [message, setMessage] = useState('');
17
+
18
+ return (
19
+ <form
20
+ onSubmit={(e) => {
21
+ e.preventDefault();
22
+ sendMessage(message);
23
+ setMessage('');
24
+ }}
25
+ onKeyDown={(e) => {
26
+ if (e.key === 'Enter' && !e.shiftKey) {
27
+ e.preventDefault();
28
+ sendMessage(message);
29
+ setMessage('');
30
+ }
31
+ }}
32
+ className="w-full"
33
+ >
34
+ <div className="flex flex-col bg-[#111111] px-5 pt-5 pb-2 rounded-lg w-full border border-[#1C1C1C]">
35
+ <TextareaAutosize
36
+ value={message}
37
+ onChange={(e) => setMessage(e.target.value)}
38
+ minRows={2}
39
+ className="bg-transparent placeholder:text-white/50 text-sm text-white resize-none focus:outline-none w-full max-h-24 lg:max-h-36 xl:max-h-48"
40
+ placeholder="Ask anything..."
41
+ />
42
+ <div className="flex flex-row items-center justify-between mt-4">
43
+ <div className="flex flex-row items-center space-x-1 -mx-2">
44
+ <Focus focusMode={focusMode} setFocusMode={setFocusMode} />
45
+ {/* <Attach /> */}
46
+ </div>
47
+ <div className="flex flex-row items-center space-x-4 -mx-2">
48
+ <CopilotToggle
49
+ copilotEnabled={copilotEnabled}
50
+ setCopilotEnabled={setCopilotEnabled}
51
+ />
52
+ <button
53
+ disabled={message.trim().length === 0}
54
+ className="bg-[#24A0ED] text-white disabled:text-white/50 hover:bg-opacity-85 transition duration-100 disabled:bg-[#ececec21] rounded-full p-2"
55
+ >
56
+ <ArrowRight className="bg-background" size={17} />
57
+ </button>
58
+ </div>
59
+ </div>
60
+ </div>
61
+ </form>
62
+ );
63
+ };
64
+
65
+ export default EmptyChatMessageInput;
ui/components/Layout.tsx ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ const Layout = ({ children }: { children: React.ReactNode }) => {
2
+ return (
3
+ <main className="lg:pl-20 bg-[#0A0A0A] min-h-screen">
4
+ <div className="max-w-screen-lg lg:mx-auto mx-4">{children}</div>
5
+ </main>
6
+ );
7
+ };
8
+
9
+ export default Layout;
ui/components/MessageActions/Copy.tsx ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Check, ClipboardList } from 'lucide-react';
2
+ import { Message } from '../ChatWindow';
3
+ import { useState } from 'react';
4
+
5
+ const Copy = ({
6
+ message,
7
+ initialMessage,
8
+ }: {
9
+ message: Message;
10
+ initialMessage: string;
11
+ }) => {
12
+ const [copied, setCopied] = useState(false);
13
+
14
+ return (
15
+ <button
16
+ onClick={() => {
17
+ const contentToCopy = `${initialMessage}${message.sources && message.sources.length > 0 && `\n\nCitations:\n${message.sources?.map((source: any, i: any) => `[${i + 1}] ${source.metadata.url}`).join(`\n`)}`}`;
18
+ navigator.clipboard.writeText(contentToCopy);
19
+ setCopied(true);
20
+ setTimeout(() => setCopied(false), 1000);
21
+ }}
22
+ className="p-2 text-white/70 rounded-xl hover:bg-[#1c1c1c] transition duration-200 hover:text-white"
23
+ >
24
+ {copied ? <Check size={18} /> : <ClipboardList size={18} />}
25
+ </button>
26
+ );
27
+ };
28
+
29
+ export default Copy;
ui/components/MessageActions/Rewrite.tsx ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { ArrowLeftRight } from 'lucide-react';
2
+
3
+ const Rewrite = ({
4
+ rewrite,
5
+ messageId,
6
+ }: {
7
+ rewrite: (messageId: string) => void;
8
+ messageId: string;
9
+ }) => {
10
+ return (
11
+ <button
12
+ onClick={() => rewrite(messageId)}
13
+ className="p-2 text-white/70 rounded-xl hover:bg-[#1c1c1c] transition duration-200 hover:text-white"
14
+ >
15
+ <ArrowLeftRight size={18} />
16
+ </button>
17
+ );
18
+ };
19
+
20
+ export default Rewrite;
ui/components/MessageBox.tsx ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 'use client';
2
+
3
+ /* eslint-disable @next/next/no-img-element */
4
+ import React, { MutableRefObject, useEffect, useState } from 'react';
5
+ import { Message } from './ChatWindow';
6
+ import { cn } from '@/lib/utils';
7
+ import { BookCopy, Disc3, Share, Volume2, StopCircle } from 'lucide-react';
8
+ import Markdown from 'markdown-to-jsx';
9
+ import Copy from './MessageActions/Copy';
10
+ import Rewrite from './MessageActions/Rewrite';
11
+ import MessageSources from './MessageSources';
12
+ import SearchImages from './SearchImages';
13
+ import SearchVideos from './SearchVideos';
14
+ import { useSpeech } from 'react-text-to-speech';
15
+
16
+ const MessageBox = ({
17
+ message,
18
+ messageIndex,
19
+ history,
20
+ loading,
21
+ dividerRef,
22
+ isLast,
23
+ rewrite,
24
+ }: {
25
+ message: Message;
26
+ messageIndex: number;
27
+ history: Message[];
28
+ loading: boolean;
29
+ dividerRef?: MutableRefObject<HTMLDivElement | null>;
30
+ isLast: boolean;
31
+ rewrite: (messageId: string) => void;
32
+ }) => {
33
+ const [parsedMessage, setParsedMessage] = useState(message.content);
34
+ const [speechMessage, setSpeechMessage] = useState(message.content);
35
+
36
+ useEffect(() => {
37
+ const regex = /\[(\d+)\]/g;
38
+
39
+ if (
40
+ message.role === 'assistant' &&
41
+ message?.sources &&
42
+ message.sources.length > 0
43
+ ) {
44
+ return setParsedMessage(
45
+ message.content.replace(
46
+ regex,
47
+ (_, number) =>
48
+ `<a href="${message.sources?.[number - 1]?.metadata?.url}" target="_blank" className="bg-[#1C1C1C] px-1 rounded ml-1 no-underline text-xs text-white/70 relative">${number}</a>`,
49
+ ),
50
+ );
51
+ }
52
+
53
+ setSpeechMessage(message.content.replace(regex, ''));
54
+ setParsedMessage(message.content);
55
+ }, [message.content, message.sources, message.role]);
56
+
57
+ const { speechStatus, start, stop } = useSpeech({ text: speechMessage });
58
+
59
+ return (
60
+ <div>
61
+ {message.role === 'user' && (
62
+ <div className={cn('w-full', messageIndex === 0 ? 'pt-16' : 'pt-8')}>
63
+ <h2 className="text-white font-medium text-3xl lg:w-9/12">
64
+ {message.content}
65
+ </h2>
66
+ </div>
67
+ )}
68
+
69
+ {message.role === 'assistant' && (
70
+ <div className="flex flex-col space-y-9 lg:space-y-0 lg:flex-row lg:justify-between lg:space-x-9">
71
+ <div
72
+ ref={dividerRef}
73
+ className="flex flex-col space-y-6 w-full lg:w-9/12"
74
+ >
75
+ {message.sources && message.sources.length > 0 && (
76
+ <div className="flex flex-col space-y-2">
77
+ <div className="flex flex-row items-center space-x-2">
78
+ <BookCopy className="text-white" size={20} />
79
+ <h3 className="text-white font-medium text-xl">Sources</h3>
80
+ </div>
81
+ <MessageSources sources={message.sources} />
82
+ </div>
83
+ )}
84
+ <div className="flex flex-col space-y-2">
85
+ <div className="flex flex-row items-center space-x-2">
86
+ <Disc3
87
+ className={cn(
88
+ 'text-white',
89
+ isLast && loading ? 'animate-spin' : 'animate-none',
90
+ )}
91
+ size={20}
92
+ />
93
+ <h3 className="text-white font-medium text-xl">Answer</h3>
94
+ </div>
95
+ <Markdown className="prose max-w-none break-words prose-invert prose-p:leading-relaxed prose-pre:p-0 text-white text-sm md:text-base font-medium">
96
+ {parsedMessage}
97
+ </Markdown>
98
+ {loading && isLast ? null : (
99
+ <div className="flex flex-row items-center justify-between w-full text-white py-4 -mx-2">
100
+ <div className="flex flex-row items-center space-x-1">
101
+ <button className="p-2 text-white/70 rounded-xl hover:bg-[#1c1c1c] transition duration-200 hover:text-white">
102
+ <Share size={18} />
103
+ </button>
104
+ <Rewrite rewrite={rewrite} messageId={message.id} />
105
+ </div>
106
+ <div className="flex flex-row items-center space-x-1">
107
+ <Copy initialMessage={message.content} message={message} />
108
+ <button
109
+ onClick={() => {
110
+ if (speechStatus === 'started') {
111
+ stop();
112
+ } else {
113
+ start();
114
+ }
115
+ }}
116
+ className="p-2 text-white/70 rounded-xl hover:bg-[#1c1c1c] transition duration-200 hover:text-white"
117
+ >
118
+ {speechStatus === 'started' ? (
119
+ <StopCircle size={18} />
120
+ ) : (
121
+ <Volume2 size={18} />
122
+ )}
123
+ </button>
124
+ </div>
125
+ </div>
126
+ )}
127
+ </div>
128
+ </div>
129
+ <div className="lg:sticky lg:top-20 flex flex-col items-center space-y-3 w-full lg:w-3/12 z-30 h-full pb-4">
130
+ <SearchImages
131
+ query={history[messageIndex - 1].content}
132
+ chat_history={history.slice(0, messageIndex - 1)}
133
+ />
134
+ <SearchVideos
135
+ chat_history={history.slice(0, messageIndex - 1)}
136
+ query={history[messageIndex - 1].content}
137
+ />
138
+ </div>
139
+ </div>
140
+ )}
141
+ </div>
142
+ );
143
+ };
144
+
145
+ export default MessageBox;
ui/components/MessageBoxLoading.tsx ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ const MessageBoxLoading = () => {
2
+ return (
3
+ <div className="flex flex-col space-y-2 w-full lg:w-9/12 bg-[#111111] animate-pulse rounded-lg p-3">
4
+ <div className="h-2 rounded-full w-full bg-[#1c1c1c]" />
5
+ <div className="h-2 rounded-full w-9/12 bg-[#1c1c1c]" />
6
+ <div className="h-2 rounded-full w-10/12 bg-[#1c1c1c]" />
7
+ </div>
8
+ );
9
+ };
10
+
11
+ export default MessageBoxLoading;
ui/components/MessageInput.tsx ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { cn } from '@/lib/utils';
2
+ import { ArrowUp } from 'lucide-react';
3
+ import { useEffect, useState } from 'react';
4
+ import TextareaAutosize from 'react-textarea-autosize';
5
+ import { Attach, CopilotToggle } from './MessageInputActions';
6
+
7
+ const MessageInput = ({
8
+ sendMessage,
9
+ loading,
10
+ }: {
11
+ sendMessage: (message: string) => void;
12
+ loading: boolean;
13
+ }) => {
14
+ const [copilotEnabled, setCopilotEnabled] = useState(false);
15
+ const [message, setMessage] = useState('');
16
+ const [textareaRows, setTextareaRows] = useState(1);
17
+ const [mode, setMode] = useState<'multi' | 'single'>('single');
18
+
19
+ useEffect(() => {
20
+ if (textareaRows >= 2 && message && mode === 'single') {
21
+ setMode('multi');
22
+ } else if (!message && mode === 'multi') {
23
+ setMode('single');
24
+ }
25
+ }, [textareaRows, mode, message]);
26
+
27
+ return (
28
+ <form
29
+ onSubmit={(e) => {
30
+ if (loading) return;
31
+ e.preventDefault();
32
+ sendMessage(message);
33
+ setMessage('');
34
+ }}
35
+ onKeyDown={(e) => {
36
+ if (e.key === 'Enter' && !e.shiftKey && !loading) {
37
+ e.preventDefault();
38
+ sendMessage(message);
39
+ setMessage('');
40
+ }
41
+ }}
42
+ className={cn(
43
+ 'bg-[#111111] p-4 flex items-center overflow-hidden border border-[#1C1C1C]',
44
+ mode === 'multi' ? 'flex-col rounded-lg' : 'flex-row rounded-full',
45
+ )}
46
+ >
47
+ {mode === 'single' && <Attach />}
48
+ <TextareaAutosize
49
+ value={message}
50
+ onChange={(e) => setMessage(e.target.value)}
51
+ onHeightChange={(height, props) => {
52
+ setTextareaRows(Math.ceil(height / props.rowHeight));
53
+ }}
54
+ className="transition bg-transparent placeholder:text-white/50 placeholder:text-sm text-sm text-white resize-none focus:outline-none w-full px-2 max-h-24 lg:max-h-36 xl:max-h-48 flex-grow flex-shrink"
55
+ placeholder="Ask a follow-up"
56
+ />
57
+ {mode === 'single' && (
58
+ <div className="flex flex-row items-center space-x-4">
59
+ <CopilotToggle
60
+ copilotEnabled={copilotEnabled}
61
+ setCopilotEnabled={setCopilotEnabled}
62
+ />
63
+ <button
64
+ disabled={message.trim().length === 0 || loading}
65
+ className="bg-[#24A0ED] text-white disabled:text-white/50 hover:bg-opacity-85 transition duration-100 disabled:bg-[#ececec21] rounded-full p-2"
66
+ >
67
+ <ArrowUp className="bg-background" size={17} />
68
+ </button>
69
+ </div>
70
+ )}
71
+ {mode === 'multi' && (
72
+ <div className="flex flex-row items-center justify-between w-full pt-2">
73
+ <Attach />
74
+ <div className="flex flex-row items-center space-x-4">
75
+ <CopilotToggle
76
+ copilotEnabled={copilotEnabled}
77
+ setCopilotEnabled={setCopilotEnabled}
78
+ />
79
+ <button
80
+ disabled={message.trim().length === 0 || loading}
81
+ className="bg-[#24A0ED] text-white disabled:text-white/50 hover:bg-opacity-85 transition duration-100 disabled:bg-[#ececec21] rounded-full p-2"
82
+ >
83
+ <ArrowUp className="bg-background" size={17} />
84
+ </button>
85
+ </div>
86
+ </div>
87
+ )}
88
+ </form>
89
+ );
90
+ };
91
+
92
+ export default MessageInput;
ui/components/MessageInputActions.tsx ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import {
2
+ BadgePercent,
3
+ ChevronDown,
4
+ CopyPlus,
5
+ Globe,
6
+ Pencil,
7
+ ScanEye,
8
+ SwatchBook,
9
+ } from 'lucide-react';
10
+ import { cn } from '@/lib/utils';
11
+ import { Popover, Switch, Transition } from '@headlessui/react';
12
+ import { SiReddit, SiYoutube } from '@icons-pack/react-simple-icons';
13
+ import { Fragment } from 'react';
14
+
15
+ export const Attach = () => {
16
+ return (
17
+ <button
18
+ type="button"
19
+ className="p-2 text-white/50 rounded-xl hover:bg-[#1c1c1c] transition duration-200 hover:text-white"
20
+ >
21
+ <CopyPlus />
22
+ </button>
23
+ );
24
+ };
25
+
26
+ const focusModes = [
27
+ {
28
+ key: 'webSearch',
29
+ title: 'All',
30
+ description: 'Searches across all of the internet',
31
+ icon: <Globe size={20} />,
32
+ },
33
+ {
34
+ key: 'academicSearch',
35
+ title: 'Academic',
36
+ description: 'Search in published academic papers',
37
+ icon: <SwatchBook size={20} />,
38
+ },
39
+ {
40
+ key: 'writingAssistant',
41
+ title: 'Writing',
42
+ description: 'Chat without searching the web',
43
+ icon: <Pencil size={16} />,
44
+ },
45
+ {
46
+ key: 'wolframAlphaSearch',
47
+ title: 'Wolfram Alpha',
48
+ description: 'Computational knowledge engine',
49
+ icon: <BadgePercent size={20} />,
50
+ },
51
+ {
52
+ key: 'youtubeSearch',
53
+ title: 'Youtube',
54
+ description: 'Search and watch videos',
55
+ icon: (
56
+ <SiYoutube
57
+ className="h-5 w-auto mr-0.5"
58
+ onPointerEnterCapture={undefined}
59
+ onPointerLeaveCapture={undefined}
60
+ />
61
+ ),
62
+ },
63
+ {
64
+ key: 'redditSearch',
65
+ title: 'Reddit',
66
+ description: 'Search for discussions and opinions',
67
+ icon: (
68
+ <SiReddit
69
+ className="h-5 w-auto mr-0.5"
70
+ onPointerEnterCapture={undefined}
71
+ onPointerLeaveCapture={undefined}
72
+ />
73
+ ),
74
+ },
75
+ ];
76
+
77
+ export const Focus = ({
78
+ focusMode,
79
+ setFocusMode,
80
+ }: {
81
+ focusMode: string;
82
+ setFocusMode: (mode: string) => void;
83
+ }) => {
84
+ return (
85
+ <Popover className="fixed w-full max-w-[15rem] md:max-w-md lg:max-w-lg">
86
+ <Popover.Button
87
+ type="button"
88
+ className="p-2 text-white/50 rounded-xl hover:bg-[#1c1c1c] active:scale-95 transition duration-200 hover:text-white"
89
+ >
90
+ {focusMode !== 'webSearch' ? (
91
+ <div className="flex flex-row items-center space-x-1">
92
+ {focusModes.find((mode) => mode.key === focusMode)?.icon}
93
+ <p className="text-xs font-medium">
94
+ {focusModes.find((mode) => mode.key === focusMode)?.title}
95
+ </p>
96
+ <ChevronDown size={20} />
97
+ </div>
98
+ ) : (
99
+ <ScanEye />
100
+ )}
101
+ </Popover.Button>
102
+ <Transition
103
+ as={Fragment}
104
+ enter="transition ease-out duration-150"
105
+ enterFrom="opacity-0 translate-y-1"
106
+ enterTo="opacity-100 translate-y-0"
107
+ leave="transition ease-in duration-150"
108
+ leaveFrom="opacity-100 translate-y-0"
109
+ leaveTo="opacity-0 translate-y-1"
110
+ >
111
+ <Popover.Panel className="absolute z-10 w-full">
112
+ <div className="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-3 gap-1 bg-[#0A0A0A] border rounded-lg border-[#1c1c1c] w-full p-2 max-h-[200px] md:max-h-none overflow-y-auto">
113
+ {focusModes.map((mode, i) => (
114
+ <Popover.Button
115
+ onClick={() => setFocusMode(mode.key)}
116
+ key={i}
117
+ className={cn(
118
+ 'p-2 rounded-lg flex flex-col items-start justify-start text-start space-y-2 duration-200 cursor-pointer transition',
119
+ focusMode === mode.key
120
+ ? 'bg-[#111111]'
121
+ : 'hover:bg-[#111111]',
122
+ )}
123
+ >
124
+ <div
125
+ className={cn(
126
+ 'flex flex-row items-center space-x-1',
127
+ focusMode === mode.key ? 'text-[#24A0ED]' : 'text-white',
128
+ )}
129
+ >
130
+ {mode.icon}
131
+ <p className="text-sm font-medium">{mode.title}</p>
132
+ </div>
133
+ <p className="text-white/70 text-xs">{mode.description}</p>
134
+ </Popover.Button>
135
+ ))}
136
+ </div>
137
+ </Popover.Panel>
138
+ </Transition>
139
+ </Popover>
140
+ );
141
+ };
142
+
143
+ export const CopilotToggle = ({
144
+ copilotEnabled,
145
+ setCopilotEnabled,
146
+ }: {
147
+ copilotEnabled: boolean;
148
+ setCopilotEnabled: (enabled: boolean) => void;
149
+ }) => {
150
+ return (
151
+ <div className="group flex flex-row items-center space-x-1 active:scale-95 duration-200 transition cursor-pointer">
152
+ <Switch
153
+ checked={copilotEnabled}
154
+ onChange={setCopilotEnabled}
155
+ className="bg-[#111111] border border-[#1C1C1C] relative inline-flex h-5 w-10 sm:h-6 sm:w-11 items-center rounded-full"
156
+ >
157
+ <span className="sr-only">Copilot</span>
158
+ <span
159
+ className={cn(
160
+ copilotEnabled
161
+ ? 'translate-x-6 bg-[#24A0ED]'
162
+ : 'translate-x-1 bg-white/50',
163
+ 'inline-block h-3 w-3 sm:h-4 sm:w-4 transform rounded-full transition-all duration-200',
164
+ )}
165
+ />
166
+ </Switch>
167
+ <p
168
+ onClick={() => setCopilotEnabled(!copilotEnabled)}
169
+ className={cn(
170
+ 'text-xs font-medium transition-colors duration-150 ease-in-out',
171
+ copilotEnabled
172
+ ? 'text-[#24A0ED]'
173
+ : 'text-white/50 group-hover:text-white',
174
+ )}
175
+ >
176
+ Copilot
177
+ </p>
178
+ </div>
179
+ );
180
+ };