thejagstudio commited on
Commit
ba5136e
·
verified ·
1 Parent(s): 6557919

Upload 13 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ chroma/chroma.sqlite3 filter=lfs diff=lfs merge=lfs -text
Dockerfile ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9
2
+
3
+ WORKDIR /code
4
+
5
+ COPY ./requirements.txt /code/requirements.txt
6
+
7
+ RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
8
+
9
+ COPY . .
10
+
11
+ CMD ["python", "main.py"]
README.md CHANGED
@@ -1,11 +1,9 @@
1
- ---
2
- title: Narayangpt
3
- emoji: 🏢
4
- colorFrom: blue
5
- colorTo: yellow
6
- sdk: docker
7
- pinned: false
8
- license: apache-2.0
9
- ---
10
-
11
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+ ---
2
+ title: Narayangpt
3
+ emoji: 😻
4
+ colorFrom: gray
5
+ colorTo: green
6
+ sdk: docker
7
+ pinned: false
8
+ license: cc-by-3.0
9
+ ---
 
 
chroma/468d1e28-05e8-41cd-a9e6-27b3066ef48a/data_level0.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:998e0cca15fc892538d911e028c1661336ba6c465037bba4619908939edcd98b
3
+ size 29652000
chroma/468d1e28-05e8-41cd-a9e6-27b3066ef48a/header.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee810497fc5b8c99f0b6ffea36b49f5aaa802fb1fdd969845acc766e4cc33727
3
+ size 100
chroma/468d1e28-05e8-41cd-a9e6-27b3066ef48a/index_metadata.pickle ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7bb2921a6158c0c3d90cae154d9915b3029b3ba1cea6b2d2ab909c58579f63ca
3
+ size 466769
chroma/468d1e28-05e8-41cd-a9e6-27b3066ef48a/length.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:869697aa4f1bec42bd2dd030b8f950ec956cfc77ff973e28a600915f446b1a5d
3
+ size 28000
chroma/468d1e28-05e8-41cd-a9e6-27b3066ef48a/link_lists.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0b0384bec817cb17e10361b7a9f530011d5d25a83b9b56a16290ce9b1315b9d
3
+ size 62408
chroma/chroma.sqlite3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:460fccfb79271f6ad2c8d8906810f61f495ff33fd0f9ae5a5827870747aab6f2
3
+ size 124534784
databaseCreator.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import shutil
4
+ from langchain_community.document_loaders import PyPDFDirectoryLoader
5
+ from langchain_text_splitters import RecursiveCharacterTextSplitter
6
+ from langchain.schema.document import Document
7
+ from langchain_community.vectorstores import Chroma
8
+ from langchain_community.embeddings.bedrock import BedrockEmbeddings
9
+ import json
10
+ import requests
11
+ from chromadb import Documents, EmbeddingFunction, Embeddings
12
+
13
+ CHROMA_PATH = "chroma"
14
+ DATA_PATH = "pdfs"
15
+
16
+
17
+ class MyEmbeddingFunction(EmbeddingFunction):
18
+
19
+ def embed_documents(self, input: Documents) -> Embeddings:
20
+ for i in range(5):
21
+ try:
22
+ embeddings = []
23
+ url = "https://api.deepinfra.com/v1/inference/BAAI/bge-large-en-v1.5"
24
+
25
+ payload = json.dumps({
26
+ "inputs": input
27
+ })
28
+ headers = {
29
+ 'Accept': 'application/json, text/plain, */*',
30
+ 'Accept-Language': 'en-US,en;q=0.9,gu;q=0.8,ru;q=0.7,hi;q=0.6',
31
+ 'Connection': 'keep-alive',
32
+ 'Content-Type': 'application/json',
33
+ 'Origin': 'https://deepinfra.com',
34
+ 'Referer': 'https://deepinfra.com/',
35
+ 'Sec-Fetch-Dest': 'empty',
36
+ 'Sec-Fetch-Mode': 'cors',
37
+ 'Sec-Fetch-Site': 'same-site',
38
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36',
39
+ 'sec-ch-ua': '"Not)A;Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
40
+ 'sec-ch-ua-mobile': '?0',
41
+ 'sec-ch-ua-platform': '"Windows"'
42
+ }
43
+
44
+ response = requests.request("POST", url, headers=headers, data=payload)
45
+ return response.json()["embeddings"]
46
+ except:
47
+ pass
48
+
49
+
50
+ def main():
51
+
52
+ # Check if the database should be cleared (using the --clear flag).
53
+ parser = argparse.ArgumentParser()
54
+ parser.add_argument("--reset", action="store_true", help="Reset the database.")
55
+ args = parser.parse_args()
56
+ if args.reset:
57
+ print("✨ Clearing Database")
58
+ clear_database()
59
+
60
+ # Create (or update) the data store.
61
+ documents = load_documents()
62
+ chunks = split_documents(documents)
63
+ add_to_chroma(chunks)
64
+
65
+
66
+ def load_documents():
67
+ print("📚 Loading Documents")
68
+ document_loader = PyPDFDirectoryLoader(DATA_PATH)
69
+ return document_loader.load()
70
+
71
+
72
+ def split_documents(documents: list[Document]):
73
+ print("🔪 Splitting Documents")
74
+ text_splitter = RecursiveCharacterTextSplitter(
75
+ chunk_size=4000,
76
+ chunk_overlap=100,
77
+ length_function=len,
78
+ is_separator_regex=True
79
+ )
80
+ return text_splitter.split_documents(documents)
81
+
82
+
83
+ def add_to_chroma(chunks: list[Document]):
84
+ print("🔗 Adding to Chroma")
85
+ # Load the existing database.
86
+ custom_embeddings = MyEmbeddingFunction()
87
+ db = Chroma(
88
+ persist_directory=CHROMA_PATH, embedding_function=custom_embeddings
89
+ )
90
+
91
+ # Calculate Page IDs.
92
+ chunks_with_ids = calculate_chunk_ids(chunks)
93
+
94
+ # Add or Update the documents.
95
+ existing_items = db.get(include=[]) # IDs are always included by default
96
+ existing_ids = set(existing_items["ids"])
97
+ print(f"Number of existing documents in DB: {len(existing_ids)}")
98
+
99
+ # Only add documents that don't exist in the DB.
100
+ new_chunks = []
101
+ for chunk in chunks_with_ids:
102
+ if chunk.metadata["id"] not in existing_ids:
103
+ new_chunks.append(chunk)
104
+
105
+ if len(new_chunks):
106
+ print(f"👉 Adding new documents: {len(new_chunks)}")
107
+ new_chunk_ids = [chunk.metadata["id"] for chunk in new_chunks]
108
+ for i in range(0, len(new_chunks), 100):
109
+ try:
110
+ db.add_documents(new_chunks[i:i+100], ids=new_chunk_ids[i:i+100])
111
+ db.persist()
112
+ print(f"Added {i+100} documents")
113
+ except:
114
+ pass
115
+ else:
116
+ print("✅ No new documents to add")
117
+
118
+
119
+ def calculate_chunk_ids(chunks):
120
+
121
+ last_page_id = None
122
+ current_chunk_index = 0
123
+
124
+ for chunk in chunks:
125
+ source = chunk.metadata.get("source")
126
+ page = chunk.metadata.get("page")
127
+ current_page_id = f"{source}:{page}"
128
+
129
+ # If the page ID is the same as the last one, increment the index.
130
+ if current_page_id == last_page_id:
131
+ current_chunk_index += 1
132
+ else:
133
+ current_chunk_index = 0
134
+
135
+ # Calculate the chunk ID.
136
+ chunk_id = f"{current_page_id}:{current_chunk_index}"
137
+ last_page_id = current_page_id
138
+
139
+ # Add it to the page meta-data.
140
+ chunk.metadata["id"] = chunk_id
141
+
142
+ return chunks
143
+
144
+
145
+ def clear_database():
146
+ if os.path.exists(CHROMA_PATH):
147
+ shutil.rmtree(CHROMA_PATH)
148
+
149
+
150
+ if __name__ == "__main__":
151
+ main()
encodingGen.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import json
3
+
4
+ with open("embeddingData.json", "r") as f:
5
+ data = json.loads(f.read())
6
+
7
+ for i in range(0,len(data),10):
8
+ newData = []
9
+ for j in range(i,i+10):
10
+ try:
11
+ newData.append(data[j]["text"])
12
+ except:
13
+ pass
14
+
15
+ url = "https://api.deepinfra.com/v1/inference/BAAI/bge-large-en-v1.5"
16
+
17
+ payload = json.dumps({
18
+ "inputs": newData
19
+ })
20
+ headers = {
21
+ 'Accept': 'application/json, text/plain, */*',
22
+ 'Accept-Language': 'en-US,en;q=0.9,gu;q=0.8,ru;q=0.7,hi;q=0.6',
23
+ 'Connection': 'keep-alive',
24
+ 'Content-Type': 'application/json',
25
+ 'Origin': 'https://deepinfra.com',
26
+ 'Referer': 'https://deepinfra.com/',
27
+ 'Sec-Fetch-Dest': 'empty',
28
+ 'Sec-Fetch-Mode': 'cors',
29
+ 'Sec-Fetch-Site': 'same-site',
30
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36',
31
+ 'sec-ch-ua': '"Not)A;Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
32
+ 'sec-ch-ua-mobile': '?0',
33
+ 'sec-ch-ua-platform': '"Windows"'
34
+ }
35
+
36
+ response = requests.request("POST", url, headers=headers, data=payload)
37
+ for j in range(len(response.json()["embeddings"])):
38
+ data[i+j]["embedding"] = response.json()["embeddings"][j]
39
+ print(data[i+j]["text"])
40
+
41
+ with open("embeddingData.json", "w") as f:
42
+ f.write(json.dumps(data, indent=4))
main.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, request, jsonify, render_template, Response
2
+ import os
3
+ import requests
4
+ import json
5
+ from scipy import spatial
6
+ from flask_cors import CORS
7
+ import random
8
+ import numpy as np
9
+ from langchain_chroma import Chroma
10
+ from chromadb import Documents, EmbeddingFunction, Embeddings
11
+
12
+
13
+ app = Flask(__name__)
14
+ CORS(app)
15
+
16
+
17
+ class MyEmbeddingFunction(EmbeddingFunction):
18
+
19
+ def embed_documents(self, input: Documents) -> Embeddings:
20
+ for i in range(5):
21
+ try:
22
+ embeddings = []
23
+ url = "https://api.deepinfra.com/v1/inference/BAAI/bge-large-en-v1.5"
24
+
25
+ payload = json.dumps({
26
+ "inputs": input
27
+ })
28
+ headers = {
29
+ 'Accept': 'application/json, text/plain, */*',
30
+ 'Accept-Language': 'en-US,en;q=0.9,gu;q=0.8,ru;q=0.7,hi;q=0.6',
31
+ 'Connection': 'keep-alive',
32
+ 'Content-Type': 'application/json',
33
+ 'Origin': 'https://deepinfra.com',
34
+ 'Referer': 'https://deepinfra.com/',
35
+ 'Sec-Fetch-Dest': 'empty',
36
+ 'Sec-Fetch-Mode': 'cors',
37
+ 'Sec-Fetch-Site': 'same-site',
38
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36',
39
+ 'sec-ch-ua': '"Not)A;Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
40
+ 'sec-ch-ua-mobile': '?0',
41
+ 'sec-ch-ua-platform': '"Windows"'
42
+ }
43
+
44
+ response = requests.request("POST", url, headers=headers, data=payload)
45
+ return response.json()["embeddings"]
46
+ except:
47
+ pass
48
+
49
+ def embed_query(self, input: Documents) -> Embeddings:
50
+ print(input)
51
+ for i in range(5):
52
+ try:
53
+ embeddings = []
54
+ url = "https://api.deepinfra.com/v1/inference/BAAI/bge-large-en-v1.5"
55
+
56
+ payload = json.dumps({
57
+ "inputs": [input]
58
+ })
59
+ headers = {
60
+ 'Accept': 'application/json, text/plain, */*',
61
+ 'Accept-Language': 'en-US,en;q=0.9,gu;q=0.8,ru;q=0.7,hi;q=0.6',
62
+ 'Connection': 'keep-alive',
63
+ 'Content-Type': 'application/json',
64
+ 'Origin': 'https://deepinfra.com',
65
+ 'Referer': 'https://deepinfra.com/',
66
+ 'Sec-Fetch-Dest': 'empty',
67
+ 'Sec-Fetch-Mode': 'cors',
68
+ 'Sec-Fetch-Site': 'same-site',
69
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36',
70
+ 'sec-ch-ua': '"Not)A;Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
71
+ 'sec-ch-ua-mobile': '?0',
72
+ 'sec-ch-ua-platform': '"Windows"'
73
+ }
74
+
75
+ response = requests.request("POST", url, headers=headers, data=payload)
76
+ return response.json()["embeddings"][0]
77
+ except:
78
+ pass
79
+
80
+ CHROMA_PATH = "chroma"
81
+ custom_embeddings = MyEmbeddingFunction()
82
+ db = Chroma(
83
+ persist_directory=CHROMA_PATH, embedding_function=custom_embeddings
84
+ )
85
+
86
+
87
+ def embeddingGen(query):
88
+ url = "https://api.deepinfra.com/v1/inference/BAAI/bge-large-en-v1.5"
89
+
90
+ payload = json.dumps({
91
+ "inputs": [query]
92
+ })
93
+ headers = {
94
+ 'Accept': 'application/json, text/plain, */*',
95
+ 'Accept-Language': 'en-US,en;q=0.9,gu;q=0.8,ru;q=0.7,hi;q=0.6',
96
+ 'Connection': 'keep-alive',
97
+ 'Content-Type': 'application/json',
98
+ 'Origin': 'https://deepinfra.com',
99
+ 'Referer': 'https://deepinfra.com/',
100
+ 'Sec-Fetch-Dest': 'empty',
101
+ 'Sec-Fetch-Mode': 'cors',
102
+ 'Sec-Fetch-Site': 'same-site',
103
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36',
104
+ 'sec-ch-ua': '"Not)A;Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
105
+ 'sec-ch-ua-mobile': '?0',
106
+ 'sec-ch-ua-platform': '"Windows"'
107
+ }
108
+
109
+ response = requests.request("POST", url, headers=headers, data=payload)
110
+ return response.json()
111
+
112
+
113
+ def strings_ranked_by_relatedness(query, df, top_n=5):
114
+ def relatedness_fn(x, y):
115
+ x_norm = np.linalg.norm(x)
116
+ y_norm = np.linalg.norm(y)
117
+ return np.dot(x, y) / (x_norm * y_norm)
118
+
119
+ query_embedding_response = embeddingGen(query)
120
+ query_embedding = query_embedding_response["embeddings"][0]
121
+ strings_and_relatednesses = [
122
+ (row["text"], relatedness_fn(query_embedding, row["embedding"])) for row in df
123
+ ]
124
+ strings_and_relatednesses.sort(key=lambda x: x[1], reverse=True)
125
+ strings, relatednesses = zip(*strings_and_relatednesses)
126
+ return strings[:top_n], relatednesses[:top_n]
127
+
128
+
129
+ @app.route("/api/gpt", methods=["POST"])
130
+ def gptRes():
131
+ data = request.get_json()
132
+ messages = data["messages"]
133
+ def inference():
134
+ url = "https://api.deepinfra.com/v1/openai/chat/completions"
135
+
136
+ payload = json.dumps({
137
+ "model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
138
+ "messages": messages,
139
+ "stream": True,
140
+ "max_tokens": 1024,
141
+ })
142
+ headers = {
143
+ 'Accept-Language': 'en-US,en;q=0.9,gu;q=0.8,ru;q=0.7,hi;q=0.6',
144
+ 'Connection': 'keep-alive',
145
+ 'Content-Type': 'application/json',
146
+ 'Origin': 'https://deepinfra.com',
147
+ 'Referer': 'https://deepinfra.com/',
148
+ 'Sec-Fetch-Dest': 'empty',
149
+ 'Sec-Fetch-Mode': 'cors',
150
+ 'Sec-Fetch-Site': 'same-site',
151
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36',
152
+ 'X-Deepinfra-Source': 'web-page',
153
+ 'accept': 'text/event-stream',
154
+ 'sec-ch-ua': '"Not)A;Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
155
+ 'sec-ch-ua-mobile': '?0',
156
+ 'sec-ch-ua-platform': '"Windows"'
157
+ }
158
+
159
+ response = requests.request("POST", url, headers=headers, data=payload, stream=True)
160
+
161
+ for line in response.iter_lines(decode_unicode=True):
162
+ if line:
163
+ # try:
164
+ # line = line.split("data:")[1]
165
+ # line = json.loads(line)
166
+ # yield line["choices"][0]["delta"]["content"]
167
+ # except:
168
+ # yield ""
169
+ yield line
170
+
171
+ return Response(inference(), content_type='text/event-stream')
172
+
173
+
174
+ @app.route("/", methods=["GET"])
175
+ def index():
176
+ return render_template("index.html")
177
+
178
+
179
+ @app.route("/api/getAPI", methods=["POST"])
180
+ def getAPI():
181
+ return jsonify({"API": random.choice(apiKeys)})
182
+
183
+
184
+ @app.route("/api/getContext", methods=["POST"])
185
+ def getContext():
186
+ global db
187
+ question = request.form["question"]
188
+ results = db.similarity_search_with_score(question, k=5)
189
+ context = "\n\n---\n\n".join([doc.page_content for doc, _score in results])
190
+ sources = [doc.metadata.get("id", None) for doc, _score in results]
191
+ return jsonify({"context": context, "sources": sources})
192
+
193
+
194
+ @app.route("/api/audioGenerate", methods=["POST"])
195
+ def audioGenerate():
196
+ answer = request.form["answer"]
197
+ audio = []
198
+ for i in answer.split("\n"):
199
+ url = "https://deepgram.com/api/ttsAudioGeneration"
200
+
201
+ payload = json.dumps({
202
+ "text": i,
203
+ "model": "aura-asteria-en",
204
+ "demoType": "landing-page",
205
+ "params": "tag=landingpage-product-texttospeech"
206
+ })
207
+ headers = {
208
+ 'accept': '*/*',
209
+ 'accept-language': 'en-US,en;q=0.9,gu;q=0.8,ru;q=0.7,hi;q=0.6',
210
+ 'content-type': 'application/json',
211
+ 'origin': 'https://deepgram.com',
212
+ 'priority': 'u=1, i',
213
+ 'referer': 'https://deepgram.com/',
214
+ 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="126", "Google Chrome";v="126"',
215
+ 'sec-ch-ua-mobile': '?0',
216
+ 'sec-ch-ua-platform': '"Windows"',
217
+ 'sec-fetch-dest': 'empty',
218
+ 'sec-fetch-mode': 'cors',
219
+ 'sec-fetch-site': 'same-origin',
220
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36'
221
+ }
222
+
223
+ response = requests.request("POST", url, headers=headers, data=payload)
224
+ audio.append(response.json()["data"])
225
+ return jsonify({"audio": audio})
226
+
227
+
228
+ if __name__ == "__main__":
229
+ # app.run(debug=True)
230
+ from waitress import serve
231
+
232
+ serve(app, host="0.0.0.0", port=7860)
requirements.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Flask
2
+ scipy
3
+ requests
4
+ Flask-Cors
5
+ pypdf
6
+ langchain
7
+ chromadb
8
+ pytest
9
+ langchain-community
10
+ langchain_chroma
11
+ waitress
12
+ uvicorn
templates/index.html ADDED
@@ -0,0 +1 @@
 
 
1
+ <!doctype html><html lang="en"><head><meta charset="utf-8"><link rel="icon" href="/images/logo.png"><meta name="viewport" content="width=device-width,initial-scale=1"><meta name="theme-color" content="#000000"><meta name="description" content="Web site created using create-react-app"><link rel="apple-touch-icon" href="/logo192.png"><link rel="manifest" href="/manifest.json"><title>NarayanGPT</title><script defer="defer" src="/static/js/main.99ba6a2a.js"></script><link href="/static/css/main.3346b154.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>