sohoso commited on
Commit
d137d29
·
verified ·
1 Parent(s): 8f06bac

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -30
app.py CHANGED
@@ -1,4 +1,11 @@
1
- import time, os, multiprocessing, torch, requests, asyncio, json, aiohttp
 
 
 
 
 
 
 
2
  from minivectordb.embedding_model import EmbeddingModel
3
  from minivectordb.vector_database import VectorDatabase
4
  from text_util_en_pt.cleaner import structurize_text, detect_language, Language
@@ -24,7 +31,7 @@ def index_and_search(query, text):
24
 
25
  # Indexing
26
  vector_db = VectorDatabase()
27
- sentences = [ s['sentence'] for s in structurize_text(text)]
28
 
29
  for idx, sentence in enumerate(sentences):
30
  sentence_embedding = model.extract_embeddings(sentence)
@@ -34,7 +41,7 @@ def index_and_search(query, text):
34
 
35
  # Retrieval
36
  start = time.time()
37
- search_results = vector_db.find_most_similar(query_embedding, k = 30)
38
  retrieval_time = time.time() - start
39
  return '\n'.join([s['sentence'] for s in search_results[2]]), embedding_time, retrieval_time
40
 
@@ -45,21 +52,26 @@ def generate_search_terms(message, lang):
45
  prompt = f"From the following text, generate some search terms: \"{message}\"\nYour answer should be just the most appropriate search term, and nothing else."
46
 
47
  url = "https://openrouter.ai/api/v1/chat/completions"
48
- headers = { "Content-Type": "application/json",
49
- "Authorization": f"Bearer {openrouter_key}" }
50
- body = { "stream": False,
51
- "models": [
52
- "mistralai/mistral-7b-instruct:free",
53
- "openchat/openchat-7b:free"
54
- ],
55
- "route": "fallback",
56
- "max_tokens": 1024,
57
- "messages": [
58
- {"role": "user", "content": prompt}
59
- ] }
60
 
61
  response = requests.post(url, headers=headers, json=body)
62
- return response.json()['choices'][0]['message']['content']
 
 
 
 
 
63
 
64
  async def predict(message, history):
65
  full_response = ""
@@ -120,18 +132,18 @@ async def predict(message, history):
120
  full_response += "\nResponse: "
121
 
122
  url = "https://openrouter.ai/api/v1/chat/completions"
123
- headers = { "Content-Type": "application/json",
124
- "Authorization": f"Bearer {openrouter_key}" }
125
- body = { "stream": True,
126
- "models": [
127
- "mistralai/mistral-7b-instruct:free",
128
- "openchat/openchat-7b:free"
129
- ],
130
- "route": "fallback",
131
- "max_tokens": 1024,
132
- "messages": [
133
- {"role": "user", "content": prompt}
134
- ] }
135
 
136
  async with aiohttp.ClientSession() as session:
137
  async with session.post(url, headers=headers, json=body) as response:
@@ -168,6 +180,6 @@ gr.ChatInterface(
168
  'When did the first human land on the moon?',
169
  'Liquid vs solid vs gas?',
170
  'What is the capital of France?',
171
- 'Why does Brazil has a high tax rate?'
172
  ]
173
- ).launch()
 
1
+ import time
2
+ import os
3
+ import multiprocessing
4
+ import torch
5
+ import requests
6
+ import asyncio
7
+ import json
8
+ import aiohttp
9
  from minivectordb.embedding_model import EmbeddingModel
10
  from minivectordb.vector_database import VectorDatabase
11
  from text_util_en_pt.cleaner import structurize_text, detect_language, Language
 
31
 
32
  # Indexing
33
  vector_db = VectorDatabase()
34
+ sentences = [s['sentence'] for s in structurize_text(text)]
35
 
36
  for idx, sentence in enumerate(sentences):
37
  sentence_embedding = model.extract_embeddings(sentence)
 
41
 
42
  # Retrieval
43
  start = time.time()
44
+ search_results = vector_db.find_most_similar(query_embedding, k=30)
45
  retrieval_time = time.time() - start
46
  return '\n'.join([s['sentence'] for s in search_results[2]]), embedding_time, retrieval_time
47
 
 
52
  prompt = f"From the following text, generate some search terms: \"{message}\"\nYour answer should be just the most appropriate search term, and nothing else."
53
 
54
  url = "https://openrouter.ai/api/v1/chat/completions"
55
+ headers = {"Content-Type": "application/json",
56
+ "Authorization": f"Bearer {openrouter_key}"}
57
+ body = {"stream": False,
58
+ "models": [
59
+ "mistralai/mistral-7b-instruct:free",
60
+ "openchat/openchat-7b:free"
61
+ ],
62
+ "route": "fallback",
63
+ "max_tokens": 1024,
64
+ "messages": [
65
+ {"role": "user", "content": prompt}
66
+ ]}
67
 
68
  response = requests.post(url, headers=headers, json=body)
69
+ response_json = response.json()
70
+ choices = response_json.get('choices', [])
71
+ if choices:
72
+ return choices[0].get('message', {}).get('content', 'Default content if key is missing')
73
+ else:
74
+ raise ValueError('No choices available in the response')
75
 
76
  async def predict(message, history):
77
  full_response = ""
 
132
  full_response += "\nResponse: "
133
 
134
  url = "https://openrouter.ai/api/v1/chat/completions"
135
+ headers = {"Content-Type": "application/json",
136
+ "Authorization": f"Bearer {openrouter_key}"}
137
+ body = {"stream": True,
138
+ "models": [
139
+ "mistralai/mistral-7b-instruct:free",
140
+ "openchat/openchat-7b:free"
141
+ ],
142
+ "route": "fallback",
143
+ "max_tokens": 1024,
144
+ "messages": [
145
+ {"role": "user", "content": prompt}
146
+ ]}
147
 
148
  async with aiohttp.ClientSession() as session:
149
  async with session.post(url, headers=headers, json=body) as response:
 
180
  'When did the first human land on the moon?',
181
  'Liquid vs solid vs gas?',
182
  'What is the capital of France?',
183
+ 'Why does Brazil have a high tax rate?'
184
  ]
185
+ ).launch()