Working with Chat, context only to Rome, gradio
Browse files
app.ipynb
CHANGED
@@ -9,7 +9,7 @@
|
|
9 |
},
|
10 |
{
|
11 |
"cell_type": "code",
|
12 |
-
"execution_count":
|
13 |
"metadata": {},
|
14 |
"outputs": [
|
15 |
{
|
@@ -54,18 +54,9 @@
|
|
54 |
},
|
55 |
{
|
56 |
"cell_type": "code",
|
57 |
-
"execution_count":
|
58 |
"metadata": {},
|
59 |
-
"outputs": [
|
60 |
-
{
|
61 |
-
"name": "stderr",
|
62 |
-
"output_type": "stream",
|
63 |
-
"text": [
|
64 |
-
"c:\\sc\\ai\\rag-demo-1\\.venv\\lib\\site-packages\\pinecone\\index.py:4: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
|
65 |
-
" from tqdm.autonotebook import tqdm\n"
|
66 |
-
]
|
67 |
-
}
|
68 |
-
],
|
69 |
"source": [
|
70 |
"import os\n",
|
71 |
"import pinecone\n",
|
@@ -76,9 +67,19 @@
|
|
76 |
"import gradio as gr"
|
77 |
]
|
78 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
{
|
80 |
"cell_type": "code",
|
81 |
-
"execution_count":
|
82 |
"metadata": {},
|
83 |
"outputs": [
|
84 |
{
|
@@ -95,7 +96,6 @@
|
|
95 |
"source": [
|
96 |
"embeddings = OpenAIEmbeddings()\n",
|
97 |
"\n",
|
98 |
-
"\n",
|
99 |
"course = \"Roman History\"\n",
|
100 |
"if course == \"Roman History\":\n",
|
101 |
" print(f\"Using course: {course}\")\n",
|
@@ -123,20 +123,9 @@
|
|
123 |
},
|
124 |
{
|
125 |
"cell_type": "code",
|
126 |
-
"execution_count":
|
127 |
"metadata": {},
|
128 |
-
"outputs": [
|
129 |
-
{
|
130 |
-
"data": {
|
131 |
-
"text/plain": [
|
132 |
-
"'\\n\\nHow are you?'"
|
133 |
-
]
|
134 |
-
},
|
135 |
-
"execution_count": 6,
|
136 |
-
"metadata": {},
|
137 |
-
"output_type": "execute_result"
|
138 |
-
}
|
139 |
-
],
|
140 |
"source": [
|
141 |
"llm = OpenAI(temperature=0)"
|
142 |
]
|
@@ -150,9 +139,18 @@
|
|
150 |
},
|
151 |
{
|
152 |
"cell_type": "code",
|
153 |
-
"execution_count":
|
154 |
"metadata": {},
|
155 |
-
"outputs": [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
156 |
"source": [
|
157 |
"from langchain.memory import ConversationBufferMemory\n",
|
158 |
"from langchain.chains import ConversationalRetrievalChain\n",
|
@@ -163,7 +161,7 @@
|
|
163 |
},
|
164 |
{
|
165 |
"cell_type": "code",
|
166 |
-
"execution_count":
|
167 |
"metadata": {},
|
168 |
"outputs": [
|
169 |
{
|
@@ -172,7 +170,7 @@
|
|
172 |
"' \\nCrassus was a Roman general who was killed in battle by the Parthians. He was killed while trying to extend the Roman Empire into the Middle East. Ceasar was avenging Crassus by trying to defeat the Parthians and expand the Roman Empire. He was also trying to avenge the death of his friend and mentor.'"
|
173 |
]
|
174 |
},
|
175 |
-
"execution_count":
|
176 |
"metadata": {},
|
177 |
"output_type": "execute_result"
|
178 |
}
|
@@ -197,140 +195,55 @@
|
|
197 |
"metadata": {},
|
198 |
"source": [
|
199 |
"# UI\n",
|
200 |
-
"https://medium.com/@gabriel_renno/how-to-build-a-gpt3-5-powered-chatbot-for-your-landing-page-with-langchain-and-gradio-1236ddfb0cf1"
|
|
|
201 |
]
|
202 |
},
|
203 |
{
|
204 |
"cell_type": "code",
|
205 |
-
"execution_count":
|
206 |
"metadata": {},
|
207 |
-
"outputs": [
|
208 |
-
{
|
209 |
-
"name": "stdout",
|
210 |
-
"output_type": "stream",
|
211 |
-
"text": [
|
212 |
-
"Running on local URL: http://127.0.0.1:7862\n",
|
213 |
-
"\n",
|
214 |
-
"To create a public link, set `share=True` in `launch()`.\n"
|
215 |
-
]
|
216 |
-
},
|
217 |
-
{
|
218 |
-
"data": {
|
219 |
-
"text/html": [
|
220 |
-
"<div><iframe src=\"http://127.0.0.1:7862/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
|
221 |
-
],
|
222 |
-
"text/plain": [
|
223 |
-
"<IPython.core.display.HTML object>"
|
224 |
-
]
|
225 |
-
},
|
226 |
-
"metadata": {},
|
227 |
-
"output_type": "display_data"
|
228 |
-
},
|
229 |
-
{
|
230 |
-
"data": {
|
231 |
-
"text/plain": []
|
232 |
-
},
|
233 |
-
"execution_count": 40,
|
234 |
-
"metadata": {},
|
235 |
-
"output_type": "execute_result"
|
236 |
-
},
|
237 |
-
{
|
238 |
-
"name": "stderr",
|
239 |
-
"output_type": "stream",
|
240 |
-
"text": [
|
241 |
-
"c:\\sc\\ai\\rag-demo-1\\.venv\\lib\\site-packages\\gradio\\components\\button.py:89: UserWarning: Using the update method is deprecated. Simply return a new object instead, e.g. `return gr.Button(...)` instead of `return gr.Button.update(...)`.\n",
|
242 |
-
" warnings.warn(\n",
|
243 |
-
"Traceback (most recent call last):\n",
|
244 |
-
" File \"c:\\sc\\ai\\rag-demo-1\\.venv\\lib\\site-packages\\gradio\\queueing.py\", line 406, in call_prediction\n",
|
245 |
-
" output = await route_utils.call_process_api(\n",
|
246 |
-
" File \"c:\\sc\\ai\\rag-demo-1\\.venv\\lib\\site-packages\\gradio\\route_utils.py\", line 226, in call_process_api\n",
|
247 |
-
" output = await app.get_blocks().process_api(\n",
|
248 |
-
" File \"c:\\sc\\ai\\rag-demo-1\\.venv\\lib\\site-packages\\gradio\\blocks.py\", line 1554, in process_api\n",
|
249 |
-
" result = await self.call_function(\n",
|
250 |
-
" File \"c:\\sc\\ai\\rag-demo-1\\.venv\\lib\\site-packages\\gradio\\blocks.py\", line 1206, in call_function\n",
|
251 |
-
" prediction = await utils.async_iteration(iterator)\n",
|
252 |
-
" File \"c:\\sc\\ai\\rag-demo-1\\.venv\\lib\\site-packages\\gradio\\utils.py\", line 517, in async_iteration\n",
|
253 |
-
" return await iterator.__anext__()\n",
|
254 |
-
" File \"c:\\sc\\ai\\rag-demo-1\\.venv\\lib\\site-packages\\gradio\\utils.py\", line 621, in asyncgen_wrapper\n",
|
255 |
-
" async for response in f(*args, **kwargs):\n",
|
256 |
-
" File \"c:\\sc\\ai\\rag-demo-1\\.venv\\lib\\site-packages\\gradio\\chat_interface.py\", line 424, in _stream_fn\n",
|
257 |
-
" first_response = await async_iteration(generator)\n",
|
258 |
-
" File \"c:\\sc\\ai\\rag-demo-1\\.venv\\lib\\site-packages\\gradio\\utils.py\", line 517, in async_iteration\n",
|
259 |
-
" return await iterator.__anext__()\n",
|
260 |
-
" File \"c:\\sc\\ai\\rag-demo-1\\.venv\\lib\\site-packages\\gradio\\utils.py\", line 510, in __anext__\n",
|
261 |
-
" return await anyio.to_thread.run_sync(\n",
|
262 |
-
" File \"c:\\sc\\ai\\rag-demo-1\\.venv\\lib\\site-packages\\anyio\\to_thread.py\", line 33, in run_sync\n",
|
263 |
-
" return await get_asynclib().run_sync_in_worker_thread(\n",
|
264 |
-
" File \"c:\\sc\\ai\\rag-demo-1\\.venv\\lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 877, in run_sync_in_worker_thread\n",
|
265 |
-
" return await future\n",
|
266 |
-
" File \"c:\\sc\\ai\\rag-demo-1\\.venv\\lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 807, in run\n",
|
267 |
-
" result = context.run(func, *args)\n",
|
268 |
-
" File \"c:\\sc\\ai\\rag-demo-1\\.venv\\lib\\site-packages\\gradio\\utils.py\", line 493, in run_sync_iterator_async\n",
|
269 |
-
" return next(iterator)\n",
|
270 |
-
" File \"C:\\Users\\Garth Raulstone\\AppData\\Local\\Temp\\ipykernel_23576\\3750532605.py\", line 13, in predict\n",
|
271 |
-
" response = openai.ChatCompletion.create(\n",
|
272 |
-
" File \"c:\\sc\\ai\\rag-demo-1\\.venv\\lib\\site-packages\\openai\\api_resources\\chat_completion.py\", line 25, in create\n",
|
273 |
-
" return super().create(*args, **kwargs)\n",
|
274 |
-
" File \"c:\\sc\\ai\\rag-demo-1\\.venv\\lib\\site-packages\\openai\\api_resources\\abstract\\engine_api_resource.py\", line 155, in create\n",
|
275 |
-
" response, _, api_key = requestor.request(\n",
|
276 |
-
" File \"c:\\sc\\ai\\rag-demo-1\\.venv\\lib\\site-packages\\openai\\api_requestor.py\", line 289, in request\n",
|
277 |
-
" result = self.request_raw(\n",
|
278 |
-
" File \"c:\\sc\\ai\\rag-demo-1\\.venv\\lib\\site-packages\\openai\\api_requestor.py\", line 591, in request_raw\n",
|
279 |
-
" abs_url, headers, data = self._prepare_request_raw(\n",
|
280 |
-
" File \"c:\\sc\\ai\\rag-demo-1\\.venv\\lib\\site-packages\\openai\\api_requestor.py\", line 563, in _prepare_request_raw\n",
|
281 |
-
" data = json.dumps(params).encode()\n",
|
282 |
-
" File \"C:\\Users\\Garth Raulstone\\AppData\\Local\\Programs\\Python\\Python310\\lib\\json\\__init__.py\", line 231, in dumps\n",
|
283 |
-
" return _default_encoder.encode(obj)\n",
|
284 |
-
" File \"C:\\Users\\Garth Raulstone\\AppData\\Local\\Programs\\Python\\Python310\\lib\\json\\encoder.py\", line 199, in encode\n",
|
285 |
-
" chunks = self.iterencode(o, _one_shot=True)\n",
|
286 |
-
" File \"C:\\Users\\Garth Raulstone\\AppData\\Local\\Programs\\Python\\Python310\\lib\\json\\encoder.py\", line 257, in iterencode\n",
|
287 |
-
" return _iterencode(o, 0)\n",
|
288 |
-
" File \"C:\\Users\\Garth Raulstone\\AppData\\Local\\Programs\\Python\\Python310\\lib\\json\\encoder.py\", line 179, in default\n",
|
289 |
-
" raise TypeError(f'Object of type {o.__class__.__name__} '\n",
|
290 |
-
"TypeError: Object of type Pinecone is not JSON serializable\n"
|
291 |
-
]
|
292 |
-
}
|
293 |
-
],
|
294 |
"source": [
|
295 |
-
"
|
296 |
-
"
|
297 |
-
"\n",
|
298 |
-
"
|
299 |
-
"\n",
|
300 |
-
"
|
301 |
-
"
|
302 |
-
" for human, assistant in history:\n",
|
303 |
-
" history_openai_format.append({\"role\": \"user\", \"content\": human })\n",
|
304 |
-
" history_openai_format.append({\"role\": \"assistant\", \"content\":assistant})\n",
|
305 |
-
" history_openai_format.append({\"role\": \"user\", \"content\": message})\n",
|
306 |
-
"\n",
|
307 |
-
" response = openai.ChatCompletion.create(\n",
|
308 |
-
" model='gpt-3.5-turbo',\n",
|
309 |
-
" messages= history_openai_format,\n",
|
310 |
-
" temperature=1.0,\n",
|
311 |
-
" stream=True,\n",
|
312 |
-
" vector_store=vector_store\n",
|
313 |
-
" )\n",
|
314 |
-
"\n",
|
315 |
-
" partial_message = \"\"\n",
|
316 |
-
" for chunk in response:\n",
|
317 |
-
" if len(chunk['choices'][0]['delta']) != 0:\n",
|
318 |
-
" partial_message = partial_message + chunk['choices'][0]['delta']['content']\n",
|
319 |
-
" yield partial_message\n",
|
320 |
"\n",
|
321 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
322 |
]
|
323 |
},
|
324 |
{
|
325 |
"cell_type": "code",
|
326 |
-
"execution_count":
|
327 |
"metadata": {},
|
328 |
"outputs": [
|
329 |
{
|
330 |
"name": "stdout",
|
331 |
"output_type": "stream",
|
332 |
"text": [
|
333 |
-
"Running on local URL: http://127.0.0.1:
|
334 |
"\n",
|
335 |
"To create a public link, set `share=True` in `launch()`.\n"
|
336 |
]
|
@@ -338,7 +251,7 @@
|
|
338 |
{
|
339 |
"data": {
|
340 |
"text/html": [
|
341 |
-
"<div><iframe src=\"http://127.0.0.1:
|
342 |
],
|
343 |
"text/plain": [
|
344 |
"<IPython.core.display.HTML object>"
|
@@ -346,35 +259,25 @@
|
|
346 |
},
|
347 |
"metadata": {},
|
348 |
"output_type": "display_data"
|
349 |
-
},
|
350 |
-
{
|
351 |
-
"data": {
|
352 |
-
"text/plain": []
|
353 |
-
},
|
354 |
-
"execution_count": 3,
|
355 |
-
"metadata": {},
|
356 |
-
"output_type": "execute_result"
|
357 |
}
|
358 |
],
|
359 |
"source": [
|
360 |
-
"
|
361 |
-
" return \"Your question is \" + question + \"!!\"\n",
|
362 |
"\n",
|
363 |
-
"
|
364 |
-
"
|
365 |
-
"
|
366 |
-
"
|
367 |
-
"
|
368 |
-
"
|
369 |
-
"
|
|
|
|
|
|
|
370 |
"\n",
|
371 |
-
"
|
|
|
372 |
]
|
373 |
-
},
|
374 |
-
{
|
375 |
-
"cell_type": "markdown",
|
376 |
-
"metadata": {},
|
377 |
-
"source": []
|
378 |
}
|
379 |
],
|
380 |
"metadata": {
|
|
|
9 |
},
|
10 |
{
|
11 |
"cell_type": "code",
|
12 |
+
"execution_count": 9,
|
13 |
"metadata": {},
|
14 |
"outputs": [
|
15 |
{
|
|
|
54 |
},
|
55 |
{
|
56 |
"cell_type": "code",
|
57 |
+
"execution_count": 10,
|
58 |
"metadata": {},
|
59 |
+
"outputs": [],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
"source": [
|
61 |
"import os\n",
|
62 |
"import pinecone\n",
|
|
|
67 |
"import gradio as gr"
|
68 |
]
|
69 |
},
|
70 |
+
{
|
71 |
+
"cell_type": "markdown",
|
72 |
+
"metadata": {},
|
73 |
+
"source": [
|
74 |
+
"# Setup Vector Store\n",
|
75 |
+
"There are two vector stores in Pinecone (hence the two API Keys). Each has a separate knowledge base\n",
|
76 |
+
"1. Roman history\n",
|
77 |
+
"2. A list of literature"
|
78 |
+
]
|
79 |
+
},
|
80 |
{
|
81 |
"cell_type": "code",
|
82 |
+
"execution_count": 11,
|
83 |
"metadata": {},
|
84 |
"outputs": [
|
85 |
{
|
|
|
96 |
"source": [
|
97 |
"embeddings = OpenAIEmbeddings()\n",
|
98 |
"\n",
|
|
|
99 |
"course = \"Roman History\"\n",
|
100 |
"if course == \"Roman History\":\n",
|
101 |
" print(f\"Using course: {course}\")\n",
|
|
|
123 |
},
|
124 |
{
|
125 |
"cell_type": "code",
|
126 |
+
"execution_count": 12,
|
127 |
"metadata": {},
|
128 |
+
"outputs": [],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
129 |
"source": [
|
130 |
"llm = OpenAI(temperature=0)"
|
131 |
]
|
|
|
139 |
},
|
140 |
{
|
141 |
"cell_type": "code",
|
142 |
+
"execution_count": 13,
|
143 |
"metadata": {},
|
144 |
+
"outputs": [
|
145 |
+
{
|
146 |
+
"name": "stderr",
|
147 |
+
"output_type": "stream",
|
148 |
+
"text": [
|
149 |
+
"c:\\sc\\ai\\rag-demo-1\\.venv\\lib\\site-packages\\gradio\\components\\button.py:89: UserWarning: Using the update method is deprecated. Simply return a new object instead, e.g. `return gr.Button(...)` instead of `return gr.Button.update(...)`.\n",
|
150 |
+
" warnings.warn(\n"
|
151 |
+
]
|
152 |
+
}
|
153 |
+
],
|
154 |
"source": [
|
155 |
"from langchain.memory import ConversationBufferMemory\n",
|
156 |
"from langchain.chains import ConversationalRetrievalChain\n",
|
|
|
161 |
},
|
162 |
{
|
163 |
"cell_type": "code",
|
164 |
+
"execution_count": 6,
|
165 |
"metadata": {},
|
166 |
"outputs": [
|
167 |
{
|
|
|
170 |
"' \\nCrassus was a Roman general who was killed in battle by the Parthians. He was killed while trying to extend the Roman Empire into the Middle East. Ceasar was avenging Crassus by trying to defeat the Parthians and expand the Roman Empire. He was also trying to avenge the death of his friend and mentor.'"
|
171 |
]
|
172 |
},
|
173 |
+
"execution_count": 6,
|
174 |
"metadata": {},
|
175 |
"output_type": "execute_result"
|
176 |
}
|
|
|
195 |
"metadata": {},
|
196 |
"source": [
|
197 |
"# UI\n",
|
198 |
+
"1. https://medium.com/@gabriel_renno/how-to-build-a-gpt3-5-powered-chatbot-for-your-landing-page-with-langchain-and-gradio-1236ddfb0cf1\n",
|
199 |
+
"2. https://github.com/RajKKapadia/YouTube-Pinecone-Demo"
|
200 |
]
|
201 |
},
|
202 |
{
|
203 |
"cell_type": "code",
|
204 |
+
"execution_count": 49,
|
205 |
"metadata": {},
|
206 |
+
"outputs": [],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
207 |
"source": [
|
208 |
+
"# RajKKapadi's \n",
|
209 |
+
"from langchain.chains import ConversationalRetrievalChain\n",
|
210 |
+
"from langchain.chat_models import ChatOpenAI\n",
|
211 |
+
"from langchain.vectorstores import Pinecone\n",
|
212 |
+
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
213 |
+
"from langchain.memory import ConversationBufferMemory\n",
|
214 |
+
"import pinecone\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
215 |
"\n",
|
216 |
+
"def create_conversation(query: str, chat_history: list) -> tuple:\n",
|
217 |
+
" try:\n",
|
218 |
+
" memory = ConversationBufferMemory(\n",
|
219 |
+
" memory_key='chat_history',\n",
|
220 |
+
" return_messages=False\n",
|
221 |
+
" )\n",
|
222 |
+
" cqa = ConversationalRetrievalChain.from_llm(\n",
|
223 |
+
" llm=ChatOpenAI(temperature=0.0,\n",
|
224 |
+
" openai_api_key=os.environ['OPENAI_API_KEY']),\n",
|
225 |
+
" retriever=vector_store.as_retriever(search_kwargs={\"k\": 5}),\n",
|
226 |
+
" memory=memory,\n",
|
227 |
+
" get_chat_history=lambda h: h,\n",
|
228 |
+
" )\n",
|
229 |
+
" result = cqa({'question': query, 'chat_history': chat_history})\n",
|
230 |
+
" chat_history.append((query, result['answer']))\n",
|
231 |
+
" return '', chat_history\n",
|
232 |
+
" except Exception as e:\n",
|
233 |
+
" chat_history.append((query, e))\n",
|
234 |
+
" return '', chat_history"
|
235 |
]
|
236 |
},
|
237 |
{
|
238 |
"cell_type": "code",
|
239 |
+
"execution_count": 53,
|
240 |
"metadata": {},
|
241 |
"outputs": [
|
242 |
{
|
243 |
"name": "stdout",
|
244 |
"output_type": "stream",
|
245 |
"text": [
|
246 |
+
"Running on local URL: http://127.0.0.1:7886\n",
|
247 |
"\n",
|
248 |
"To create a public link, set `share=True` in `launch()`.\n"
|
249 |
]
|
|
|
251 |
{
|
252 |
"data": {
|
253 |
"text/html": [
|
254 |
+
"<div><iframe src=\"http://127.0.0.1:7886/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
|
255 |
],
|
256 |
"text/plain": [
|
257 |
"<IPython.core.display.HTML object>"
|
|
|
259 |
},
|
260 |
"metadata": {},
|
261 |
"output_type": "display_data"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
262 |
}
|
263 |
],
|
264 |
"source": [
|
265 |
+
"import gradio as gr\n",
|
|
|
266 |
"\n",
|
267 |
+
"with gr.Blocks() as demo:\n",
|
268 |
+
" gr.Markdown(\"Chat to docs\")\n",
|
269 |
+
" chatbot = gr.Chatbot(label='Talk to the Doument')\n",
|
270 |
+
" msg = gr.Textbox()\n",
|
271 |
+
" \n",
|
272 |
+
" submitBtn = gr.Button(value=\"Submit\") \n",
|
273 |
+
" clear = gr.ClearButton([msg, chatbot])\n",
|
274 |
+
"\n",
|
275 |
+
" msg.submit(create_conversation, [msg, chatbot], [msg, chatbot])\n",
|
276 |
+
" submitBtn.click(create_conversation, [msg, chatbot], [msg, chatbot])\n",
|
277 |
"\n",
|
278 |
+
"if __name__ == '__main__':\n",
|
279 |
+
" demo.launch(show_error=True) #show_error=True, debug=True)"
|
280 |
]
|
|
|
|
|
|
|
|
|
|
|
281 |
}
|
282 |
],
|
283 |
"metadata": {
|