lamhieu commited on
Commit
7b49081
·
1 Parent(s): ebed57e

chore: initialize the app

Browse files
Files changed (4) hide show
  1. README.md +26 -3
  2. app.py +828 -0
  3. requirements.txt +11 -0
  4. style.css +24 -0
README.md CHANGED
@@ -4,10 +4,33 @@ emoji: 🦀
4
  colorFrom: blue
5
  colorTo: green
6
  sdk: gradio
7
- sdk_version: 4.41.0
8
  app_file: app.py
9
- pinned: false
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  license: other
 
 
 
 
11
  ---
12
 
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
4
  colorFrom: blue
5
  colorTo: green
6
  sdk: gradio
7
+ sdk_version: 4.36.1
8
  app_file: app.py
9
+ pinned: true
10
+ header: mini
11
+ suggested_hardware: a10g-small
12
+ language:
13
+ - vi
14
+ - ko
15
+ - es
16
+ - pt
17
+ - zh
18
+ - fr
19
+ - it
20
+ - de
21
+ - ja
22
+ - ru
23
+ - pl
24
+ - nl
25
+ - hi
26
+ - tr
27
+ - id
28
+ - en
29
  license: other
30
+ license_name: ghost-open-llms
31
+ license_link: https://ghost-x.org/ghost-open-llms-license
32
+ tags:
33
+ - ghost
34
  ---
35
 
36
+ # ~
app.py ADDED
@@ -0,0 +1,828 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pylint: skip-file
2
+
3
+ import subprocess
4
+ import json
5
+ import requests
6
+ import zlib
7
+ from PIL import Image
8
+
9
+ subprocess.run(
10
+ f"pip install flash-attn --no-build-isolation",
11
+ env={"FLASH_ATTENTION_SKIP_CUDA_BUILD": "TRUE"},
12
+ shell=True,
13
+ )
14
+
15
+ import os
16
+ from threading import Thread
17
+ from typing import Iterator
18
+
19
+ import gradio as gr
20
+ import spaces
21
+ import torch
22
+ import logging
23
+ import wikipedia
24
+ import time
25
+ from transformers import (
26
+ AutoModelForCausalLM,
27
+ AutoTokenizer,
28
+ AutoProcessor,
29
+ TextIteratorStreamer,
30
+ )
31
+ from transformers.dynamic_module_utils import get_imports
32
+ from bs4 import BeautifulSoup
33
+ from functools import lru_cache
34
+
35
+ logging.basicConfig(level=logging.INFO)
36
+ logger = logging.getLogger(__name__)
37
+
38
+
39
+ MAX_MAX_NEW_TOKENS = 4096
40
+ DEFAULT_MAX_NEW_TOKENS = 1536
41
+ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "8192"))
42
+
43
+ DEFAULT_SYSTEM_PROMPT = """\
44
+ You are a helpful and intelligent AI, developed by Ghost X and named Ghost 8B Beta, often referred to as Ghost Beta. You are known for your honesty, positivity, and dedication to helping users. Your strength is understanding their needs and making insightful suggestions based on user knowledge and interests. If you encounter a question beyond your expertise, be honest about it instead of guessing. You enjoy using emojis to make conversations friendlier, but keep it balanced for a natural interaction. Let's engage in a meaningful conversation, keeping in mind the user's language.
45
+
46
+ Rely on the context, such as the time, to offer responses that feel relevant and natural in daily life. Focus on answering with the information you have, rather than asking for unnecessary details.
47
+ """
48
+
49
+ # DEFAULT_SYSTEM_PROMPT = """\
50
+ # You are a helpful and intelligent AI, trained by Ghost X and named Ghost 8B Beta (often referred to as Ghost Beta).
51
+ # You're known for your honesty, spreading positivity, and always striving to assist users. Your expertise lies in understanding their needs and providing insightful suggestions, drawing upon your knowledge and interests. If a query exceeds your understanding, you'll be upfront and state you're unsure, avoiding fabricated responses. You enjoy incorporating emojis to enhance interactions, but maintain a balanced approach for a natural flow. Let's engage in a meaningful conversation, keeping in mind the user's language.
52
+ # """
53
+
54
+ # DEFAULT_SYSTEM_PROMPT = """\
55
+ # You are a helpful and intelligent AI, trained by Ghost X and named Ghost 8B Beta (often referred to as 8B Beta).
56
+ # You're known for your honesty, spreading positivity, and always striving to assist users. Your expertise lies in understanding their needs and providing insightful suggestions, drawing upon your knowledge and interests. If a query exceeds your understanding, you'll be upfront and state you're unsure, avoiding fabricated responses. You enjoy incorporating emojis to enhance interactions, but maintain a balanced approach for a natural flow. Let's engage in a meaningful conversation, keeping in mind the user's language.
57
+
58
+ # A guide to dealing with extremely complex questions or challenges. Follow these steps to solve them:
59
+ # 1. Deconstructing Complexity
60
+ # Imagine a puzzle with intricate pieces. I'll present a challenging question. Your task: Break down this question into smaller, distinct parts. Label each part with a specific theme or aspect related to the problem. This will help us understand the multifaceted nature of the query and prepare for a structured solution.
61
+ # 2. Reconstructing Insights
62
+ # Once we've successfully dissected the problem into manageable components, assemble these parts like a puzzle. Focus on identifying connections, potential overlaps, and key information from each theme. The goal is to reconstruct a cohesive, well-rounded answer that addresses the original complexity of the question.
63
+ # """
64
+
65
+ HEAD = """
66
+ <script>
67
+ function schedule_updates() {
68
+ const client_info_element = document.querySelector("#client_info textarea");
69
+ client_info_element.value = "The current time is " + new Date().toLocaleString('en-US', {
70
+ dateStyle: 'full',
71
+ timeStyle: 'short',
72
+ })
73
+ client_info_element.dispatchEvent(new Event('input'));
74
+ }
75
+
76
+ function bootstrap() {
77
+ setInterval(schedule_updates, 1000);
78
+ };
79
+
80
+ bootstrap();
81
+ </script>
82
+ """
83
+
84
+ DESCRIPTION = """\
85
+ # Ghost 8B Beta (β, 8k)
86
+
87
+ **Ghost 8B Beta** outperforms leading models like Llama 3.1 8B Instruct and GPT-3.5 Turbo in lc_winrate scores. It also surpasses Claude 3 Opus, Claude 3 Sonnet, GPT-4, and Mistral Large in AlpacaEval 2.0 winrate scores. The model offers two context length versions: [8k](https://huggingface.co/spaces/lamhieu/ghost-8b-beta-8k) and [128k](https://huggingface.co/spaces/lamhieu/ghost-8b-beta-128k), both with built-in multilingual function support. See details about the model [here](https://ghost-x.org/docs/models/ghost-8b-beta), download from [HuggingFace](https://huggingface.co/ghost-x/ghost-8b-beta-1608).
88
+
89
+ Supported languages: 🇬🇧 English, 🇻🇳 Vietnamese, 🇰🇷 Korean, 🇪🇸 Spanish, 🇵🇹 Portuguese, 🇨🇳 Chinese, 🇫🇷 French, 🇮🇹 Italian, 🇩🇪 German, 🇯🇵 Japanese, 🇷🇺 Russian, 🇵🇱 Polish, 🇳🇱 Dutch, 🇮🇳 Hindi, 🇹🇷 Turkish, 🇮🇩 Indonesian.
90
+
91
+ Note: with the image will be used another model to explain rather than using directly the Ghost 8B Beta model.
92
+ """
93
+
94
+
95
+ PLACEHOLDER = """
96
+ <div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
97
+ <h1 style="font-size: 26px; margin-bottom: 2px; opacity: 0.20;">👋 Welcome to the Ghost 8B Beta Playground! 🎉</h1>
98
+ <p style="font-size: 18px; margin-bottom: 2px; opacity: 0.10;">Ask me anything and let's have some fun! 🤔💡</p>
99
+ </div>
100
+ """
101
+
102
+ LICENSE = """
103
+ <p/>
104
+
105
+ ---
106
+ Ghost 8B Beta may give inaccurate information, including information about people, so please verify Ghost 8B Beta's answers. [Ghost 8B Beta](https://ghost-x.org/docs/models/ghost-8b-beta/) by [Ghost X](https://ghost-x.org).
107
+ """
108
+
109
+ if not torch.cuda.is_available():
110
+ DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
111
+
112
+
113
+ def workaround_fixed_get_imports(filename: str | os.PathLike) -> list[str]:
114
+ """
115
+ Workaround for fixed get_imports function.
116
+
117
+ @args:
118
+ filename (str | os.PathLike): The filename or path to the file.
119
+
120
+ @returns:
121
+ list[str]: The list of imports.
122
+
123
+ @remarks:
124
+ - This function is a workaround for the fixed get_imports function.
125
+ - It checks if the filename ends with "/modeling_florence2.py".
126
+ - If it doesn't, it calls the original get_imports function.
127
+ - If it does, it calls the original get_imports function and removes the "flash_attn" import.
128
+
129
+ @usage:
130
+ ```python
131
+ from unittest.mock import patch
132
+ image_torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
133
+ with patch(
134
+ "transformers.dynamic_module_utils.get_imports", workaround_fixed_get_imports
135
+ ):
136
+ ```
137
+ """
138
+
139
+ if not str(filename).endswith("/modeling_florence2.py"):
140
+ return get_imports(filename)
141
+ imports = get_imports(filename)
142
+ imports.remove("flash_attn")
143
+ return imports
144
+
145
+
146
+ if torch.cuda.is_available():
147
+ hf_serect = os.getenv("HF_TOKEN", None)
148
+ attn_implementation = "flash_attention_2"
149
+
150
+ chat_model_id = "ghost-x/ghost-8b-beta-1608"
151
+ chat_device = torch.device("cuda")
152
+ chat_model = AutoModelForCausalLM.from_pretrained(
153
+ chat_model_id,
154
+ device_map="auto",
155
+ torch_dtype=torch.bfloat16,
156
+ attn_implementation=attn_implementation,
157
+ trust_remote_code=True,
158
+ token=hf_serect,
159
+ )
160
+ chat_tokenizer = AutoTokenizer.from_pretrained(
161
+ chat_model_id,
162
+ trust_remote_code=True,
163
+ token=hf_serect,
164
+ )
165
+
166
+ image_model_id = "microsoft/Florence-2-large"
167
+ # image_device = "cuda" if torch.cuda.is_available() else "cpu"
168
+ # image_torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
169
+ image_device = "cpu"
170
+ image_torch_dtype = torch.float32
171
+ image_model = (
172
+ AutoModelForCausalLM.from_pretrained(
173
+ image_model_id,
174
+ torch_dtype=image_torch_dtype,
175
+ trust_remote_code=True,
176
+ token=hf_serect,
177
+ )
178
+ .to(image_device)
179
+ .eval()
180
+ )
181
+ image_processor = AutoProcessor.from_pretrained(
182
+ image_model_id,
183
+ trust_remote_code=True,
184
+ token=hf_serect,
185
+ )
186
+
187
+
188
+ waiting_tools_timeout = 5
189
+ supported_tools = json.dumps(
190
+ [
191
+ {
192
+ "type": "function",
193
+ "function": {
194
+ "name": "search_on_internet",
195
+ "description": "Use this tool to search for information on the internet to answer questions you are unsure about, don't know or need the latest information (e.g. news, reports, companies, people,...) to give the most accurate results. Note: can only be used or ignored, not asked again",
196
+ "parameters": {
197
+ "type": "object",
198
+ "properties": {
199
+ "keyword": {
200
+ "type": "string",
201
+ "description": "Search keywords, rephrase to optimize search results based on questions suitable to the specified search type.",
202
+ "required": True,
203
+ },
204
+ "type": {
205
+ "type": "string",
206
+ "description": "Search type, based on the question to determine whether to search for it in 'wikipedia' or 'google', prefer to use wikipedia for information about events, history and people.",
207
+ "enum": ["wikipedia", "google"],
208
+ "default": "google",
209
+ "required": True,
210
+ },
211
+ "language": {
212
+ "type": "string",
213
+ "description": "Search language, is the user language code with 2 letters, e.g: vi = vietnamese, en = english.",
214
+ "default": "en",
215
+ "required": True,
216
+ },
217
+ },
218
+ },
219
+ },
220
+ }
221
+ ],
222
+ ensure_ascii=False,
223
+ )
224
+
225
+
226
+ @lru_cache(maxsize=128)
227
+ def extract_text_from_webpage(html_content):
228
+ """
229
+ Extracts visible text from an HTML webpage.
230
+
231
+ @args:
232
+ html_content (str): The HTML content of the webpage.
233
+
234
+ @returns:
235
+ str: The visible text extracted from the webpage.
236
+
237
+ @remarks:
238
+ - This function uses the BeautifulSoup library to parse the HTML content.
239
+ - It removes certain tags (script, style, header, footer, nav, form, svg) from the parsed HTML.
240
+ - The remaining visible text is then extracted using the `get_text` method of BeautifulSoup.
241
+ - The extracted text is stripped of leading/trailing whitespace and separated by a single space.
242
+ """
243
+
244
+ soup = BeautifulSoup(html_content, "html.parser")
245
+ for tag in soup(["script", "style", "header", "footer", "nav", "form", "svg"]):
246
+ tag.extract()
247
+ visible_text = soup.get_text(strip=True, separator=" ")
248
+ return visible_text
249
+
250
+
251
+ def search_with_wikipedia(
252
+ query: str,
253
+ language: str = "en",
254
+ ):
255
+ """
256
+ Search for a given query on Wikipedia and return the summary.
257
+
258
+ @args:
259
+ query (str): The search query.
260
+ language (str, optional): The language code for the Wikipedia page. Defaults to "en".
261
+
262
+ @returns:
263
+ list: A list containing the summary of the Wikipedia page.
264
+
265
+ @remarks:
266
+ - This function uses the Wikipedia API to search for the given query.
267
+ - The language parameter determines the language of the Wikipedia page to search.
268
+ - If the search is successful, the function returns a list containing the summary of the page.
269
+ - If an exception occurs during the search, an empty list is returned.
270
+ """
271
+
272
+ all_results = []
273
+ try:
274
+ wikipedia.set_lang(language)
275
+ all_results.append(wikipedia.summary(query))
276
+ except Exception as e:
277
+ pass
278
+ return all_results
279
+
280
+
281
+ def search_with_google(
282
+ query: str,
283
+ num_results: int = 3,
284
+ timeout: int = 5,
285
+ language: str = "en",
286
+ ssl_verify: bool = None,
287
+ ):
288
+ """
289
+ Searches Google for the given query and returns a list of search results.
290
+
291
+ @args:
292
+ query (str): The search query.
293
+ num_results (int, optional): The number of search results to retrieve. Defaults to 3.
294
+ timeout (int, optional): The timeout value for the HTTP requests. Defaults to 5.
295
+ language (str, optional): The language for the search results. Defaults to "en".
296
+ ssl_verify (bool, optional): Whether to verify SSL certificates. Defaults to None.
297
+
298
+ @returns:
299
+ list: A list of dictionaries containing the link and visible text of each search result.
300
+
301
+ @remarks:
302
+ - This function uses the requests library to send HTTP requests to Google.
303
+ - It sets the User-Agent header to mimic a Firefox browser.
304
+ - The search results are retrieved from the HTML response using BeautifulSoup.
305
+ - Each search result is represented as a dictionary with "link" and "text" keys.
306
+ - The "link" key contains the URL of the search result.
307
+ - The "text" key contains the visible text extracted from the search result webpage.
308
+ - If the visible text exceeds 4096 characters, it is truncated to that length.
309
+ - If an error occurs while fetching or processing a search result, it is printed and ignored.
310
+ """
311
+
312
+ # Initialize an empty list to store the search results
313
+ all_results = []
314
+
315
+ # Define the maximum number of characters per page
316
+ max_chars_per_page = 4096
317
+
318
+ # Create a session object to send HTTP requests
319
+ with requests.Session() as session:
320
+ # Send a GET request to Google search with the specified query parameters
321
+ resp = session.get(
322
+ url="https://www.google.com/search",
323
+ headers={
324
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/111.0"
325
+ },
326
+ params={
327
+ "q": query,
328
+ "num": num_results,
329
+ "udm": 14,
330
+ "hl": language,
331
+ },
332
+ timeout=timeout,
333
+ verify=ssl_verify,
334
+ )
335
+
336
+ # Raise an exception if the response status code is not successful
337
+ resp.raise_for_status()
338
+
339
+ # Parse the HTML response using BeautifulSoup
340
+ soup = BeautifulSoup(resp.text, "html.parser")
341
+
342
+ # Find all the result blocks in the HTML
343
+ result_block = soup.find_all("div", attrs={"class": "g"})
344
+
345
+ # Iterate over each result block
346
+ for result in result_block:
347
+ # Find the link element within the result block
348
+ link = result.find("a", href=True)
349
+
350
+ # If a link is found, extract the URL and process the webpage
351
+ if link:
352
+ link = link["href"]
353
+ try:
354
+ # Send a GET request to the link URL
355
+ webpage = session.get(
356
+ link,
357
+ headers={
358
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/111.0"
359
+ },
360
+ )
361
+
362
+ # Raise an exception if the response status code is not successful
363
+ webpage.raise_for_status()
364
+
365
+ # Extract the visible text from the webpage
366
+ visible_text = extract_text_from_webpage(webpage.text)
367
+
368
+ # Truncate the visible text if it exceeds the maximum number of characters per page
369
+ if len(visible_text) > max_chars_per_page:
370
+ visible_text = visible_text[:max_chars_per_page]
371
+
372
+ # Append the link and visible text to the search results list
373
+ all_results.append({"link": link, "text": visible_text})
374
+ except requests.exceptions.RequestException as e:
375
+ # Print an error message if there is an error fetching or processing the link
376
+ print(f"Error fetching or processing {link}: {e}")
377
+ pass
378
+ else:
379
+ pass
380
+
381
+ # Return the search results
382
+ return all_results
383
+
384
+
385
+ @lru_cache(maxsize=128)
386
+ def extract_text_from_image(file: str) -> str:
387
+ """
388
+ Extracts text from an image file.
389
+
390
+ @args:
391
+ file (str): The path or URL of the image file.
392
+
393
+ @returns:
394
+ str: The extracted text from the image.
395
+
396
+ @remarks:
397
+ - This function uses an LRU cache to store previously processed images for faster retrieval.
398
+ - The image file can be either a local file path or a URL.
399
+ - The function opens the image file using the PIL library.
400
+ - The function processes the image using an image processor.
401
+ - The processed image is then passed to a text generation model to generate text.
402
+ - The generated text is post-processed to obtain the final extracted text.
403
+ """
404
+ # Define the task and load the image
405
+ task = "<MORE_DETAILED_CAPTION>"
406
+ image = Image.open(
407
+ requests.get(file, stream=True).raw
408
+ if file.startswith("http")
409
+ else open(file, "rb")
410
+ )
411
+
412
+ if image.mode != "RGB":
413
+ image = image.convert("RGB")
414
+
415
+ # Preprocess the image using the image processor
416
+ inputs = image_processor(text=task, images=image, return_tensors="pt").to(
417
+ "cpu", image_torch_dtype
418
+ )
419
+
420
+ # Generate text based on the input image
421
+ generated_ids = image_model.generate(
422
+ input_ids=inputs["input_ids"],
423
+ pixel_values=inputs["pixel_values"],
424
+ max_new_tokens=1024,
425
+ num_beams=3,
426
+ do_sample=False,
427
+ )
428
+
429
+ # Decode the generated text and post-process the answer
430
+ generated_text = image_processor.batch_decode(
431
+ generated_ids, skip_special_tokens=False
432
+ )[0]
433
+ parsed_answer = image_processor.post_process_generation(
434
+ generated_text,
435
+ task=task,
436
+ image_size=(image.width, image.height),
437
+ )
438
+
439
+ # Return the parsed answer for the specified task
440
+ return parsed_answer[task]
441
+
442
+
443
+ @spaces.GPU(duration=90)
444
+ def generate_chat(
445
+ uuid: str,
446
+ message: dict,
447
+ chat_history: list[tuple[str, str]],
448
+ allow_used_tools: bool = True,
449
+ system_prompt: str = "",
450
+ max_new_tokens: int = 1536,
451
+ temperature: float = 0.4,
452
+ top_p: float = 0.95,
453
+ top_k: int = 50,
454
+ repetition_penalty: float = 1.0,
455
+ client_info: str = None,
456
+ ) -> Iterator[str]:
457
+ # Build the input_ids for the chat conversation
458
+ def build_input_ids(
459
+ system_prompt: str = "",
460
+ apply_tools: bool = None,
461
+ references=None,
462
+ ):
463
+ conversation = []
464
+
465
+ # Add the system prompt to the conversation
466
+ if system_prompt:
467
+ if system_prompt.strip() == DEFAULT_SYSTEM_PROMPT.strip():
468
+ system_prompt = system_prompt.strip() + "\n\n" + client_info + "\n"
469
+ conversation.append({"role": "system", "content": system_prompt})
470
+
471
+ # Add the tools role to the conversation if apply_tools is True
472
+ if apply_tools is True:
473
+ conversation.append({"role": "tools", "content": supported_tools})
474
+
475
+ # Add the references role to the conversation
476
+ # if references is None:
477
+ # references = [client_info]
478
+ # else:
479
+ # references.insert(0, client_info)
480
+
481
+ if (
482
+ references is not None
483
+ and isinstance(references, list)
484
+ and len(references) > 0
485
+ ):
486
+ formatted_references = f"Analyze the provided references, extract relevant information to provide accurate and objective feedback. This reference information may include: conversation context, assistant or user memories, reasoning guides, problem-solving suggestions, assistant rules, etc.\nIf the reference is not relevant, ignore it. Try to have a balanced approach, avoiding over-reliance on the documentation."
487
+ formatted_references += "\n\n" + ("\n\n".join(references))
488
+ conversation.append(
489
+ {
490
+ "role": "refs",
491
+ "content": formatted_references,
492
+ }
493
+ )
494
+
495
+ # Add the chat history to the conversation
496
+ for user, assistant in chat_history:
497
+ conversation.extend(
498
+ [
499
+ {"role": "user", "content": user},
500
+ {"role": "assistant", "content": assistant},
501
+ ]
502
+ )
503
+
504
+ # Add the user message with image attachments to the conversation
505
+ conversation.append(
506
+ {
507
+ "role": "user",
508
+ "content": (
509
+ f"{' & '.join(message['attachments'])}\n\n{message['text']}"
510
+ if "attachments" in message and len(message["attachments"]) > 0
511
+ else f"{message['text']}"
512
+ ),
513
+ }
514
+ )
515
+
516
+ logger.info(f"UUID: {uuid} - Conversation: {conversation}")
517
+
518
+ # Apply the chat template to convert the conversation into input_ids
519
+ input_ids = chat_tokenizer.apply_chat_template(
520
+ conversation, add_generation_prompt=True, return_tensors="pt"
521
+ )
522
+ input_ids = input_ids.to(chat_model.device)
523
+
524
+ # Trim the input_ids if it exceeds the maximum token length
525
+ if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
526
+ input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
527
+ gr.Warning(
528
+ f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens."
529
+ )
530
+ return input_ids
531
+
532
+ # Generate chat responses based on the input_ids
533
+ def generate_chat_responses(
534
+ previous_response: str = None,
535
+ ):
536
+ document_references = []
537
+
538
+ # Check if the previous response contains scheduled tool runs
539
+ if previous_response is not None:
540
+ scheduled_tools_runs = None
541
+ try:
542
+ scheduled_tools_runs = json.loads(previous_response)
543
+ if scheduled_tools_runs["type"] == "function" and scheduled_tools_runs[
544
+ "name"
545
+ ] in ["search_on_internet"]:
546
+ pass
547
+ else:
548
+ scheduled_tools_runs = None
549
+ except Exception as e:
550
+ print(e)
551
+ pass
552
+
553
+ # If scheduled tool runs exist, perform the corresponding searches
554
+ if (
555
+ scheduled_tools_runs is not None
556
+ and scheduled_tools_runs["name"] == "search_on_internet"
557
+ ):
558
+ keyword = scheduled_tools_runs["arguments"]["keyword"]
559
+ search_type = scheduled_tools_runs["arguments"]["type"]
560
+ language = scheduled_tools_runs["arguments"]["language"]
561
+
562
+ # Search on Wikipedia if the search type is "wikipedia"
563
+ if search_type == "wikipedia":
564
+ gr.Info(
565
+ "Searching for information on the Wikipedia.",
566
+ duration=5,
567
+ visible=True,
568
+ )
569
+ document_references.extend(
570
+ search_with_wikipedia(query=keyword, language=language)
571
+ )
572
+
573
+ # Search on Google
574
+ gr.Info("Searching for information on the Google.")
575
+ document_references.extend(
576
+ search_with_google(
577
+ query=keyword,
578
+ language=language,
579
+ num_results=3,
580
+ )
581
+ )
582
+ print("document_references:", document_references)
583
+
584
+ # Determine if tools should be applied based on the allow_used_tools flag
585
+ apply_tools = (
586
+ True if allow_used_tools is True and previous_response is None else False
587
+ )
588
+
589
+ # Build the input_ids for the chat conversation
590
+ input_ids = build_input_ids(
591
+ system_prompt=system_prompt,
592
+ apply_tools=apply_tools,
593
+ references=document_references,
594
+ )
595
+
596
+ # Create a TextIteratorStreamer to generate chat responses
597
+ streamer = TextIteratorStreamer(
598
+ chat_tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True
599
+ )
600
+
601
+ # Set the generation parameters
602
+ generate_kwargs = dict(
603
+ input_ids=input_ids,
604
+ streamer=streamer,
605
+ max_new_tokens=max_new_tokens,
606
+ do_sample=True,
607
+ repetition_penalty=repetition_penalty,
608
+ )
609
+ if temperature == 0:
610
+ generate_kwargs["do_sample"] = False
611
+ else:
612
+ generate_kwargs["temperature"] = temperature
613
+ generate_kwargs["top_p"] = top_p
614
+ generate_kwargs["top_k"] = top_k
615
+
616
+ # Start the generation process in a separate thread
617
+ t = Thread(target=chat_model.generate, kwargs=generate_kwargs)
618
+ t.start()
619
+
620
+ logger.info(
621
+ f"UUID: {uuid} - Is apply tools: {apply_tools} - Is apply documents: {len(document_references) > 0} - Is previous response: {previous_response is not None} - Start generating chat responses"
622
+ )
623
+
624
+ state = {
625
+ "mark": None,
626
+ "respond": False,
627
+ }
628
+ outputs = []
629
+ for text in streamer:
630
+ if state["mark"] is None:
631
+ state["mark"] = time.time()
632
+ outputs.append(text)
633
+ if (
634
+ apply_tools is False
635
+ or state["mark"] + waiting_tools_timeout < time.time()
636
+ ):
637
+ state["respond"] = True
638
+ yield "".join(outputs)
639
+
640
+ # If tools are applied and no response is generated within the timeout, continue generating chat responses
641
+ if (
642
+ apply_tools is True
643
+ and state["respond"] is False
644
+ and state["mark"] + waiting_tools_timeout > time.time()
645
+ ):
646
+ previous_response = "".join(outputs)
647
+ yield from generate_chat_responses(previous_response=previous_response)
648
+
649
+ # Yield the generated chat responses
650
+ yield from generate_chat_responses(previous_response=None)
651
+
652
+
653
+ def generate(
654
+ message: dict,
655
+ chat_history: list[tuple[str, str]],
656
+ allow_used_tools: bool = True,
657
+ system_prompt: str = "",
658
+ max_new_tokens: int = 1536,
659
+ temperature: float = 0.4,
660
+ top_p: float = 0.95,
661
+ top_k: int = 50,
662
+ repetition_penalty: float = 1.0,
663
+ client_info: str = None,
664
+ ) -> Iterator[str]:
665
+ # Generate a unique identifier using the The current time is now
666
+ uuid = zlib.crc32(str.encode(str(time.time())))
667
+ logger.info(f"UUID: {uuid} - Starting image text extraction process")
668
+
669
+ # Limit the number of files to process to 2
670
+ if len(message["files"]) > 2:
671
+ gr.Warning("Only the first 2 images will be processed.")
672
+
673
+ message["files"] = message["files"][:2]
674
+
675
+ # Extract text from each image file and replace the file path with an attachment tag containing the extracted text
676
+ message["attachments"] = handle_file_extraction(
677
+ files=list(message["files"]), uuid=uuid
678
+ )
679
+ logger.info(f"UUID: {uuid} - Image text extraction process completed")
680
+
681
+ logger.info(f"UUID: {uuid} - Previous chat history: {chat_history}")
682
+ for idx, chat_pair in enumerate(chat_history):
683
+ user_message, assistant_message = chat_pair
684
+ if not isinstance(user_message, str) and assistant_message is None:
685
+ text_descriptions = handle_file_extraction(
686
+ files=list(user_message), uuid=uuid
687
+ )
688
+ chat_input = (
689
+ f"{' & '.join(text_descriptions)}\n\n{chat_history[idx + 1][0]}"
690
+ )
691
+ chat_history[idx + 1][0] = chat_input
692
+ chat_history[idx] = [None, None]
693
+ logger.info(
694
+ f"UUID: {uuid} - Updated chat history: {chat_history} - Updated chat input: {chat_input}"
695
+ )
696
+
697
+ chat_history = list(
698
+ filter(lambda x: x[0] is not None and x[1] is not None, chat_history)
699
+ )
700
+ logger.info(f"UUID: {uuid} - Filtered chat history: {chat_history}")
701
+
702
+ yield from generate_chat(
703
+ uuid=uuid,
704
+ message=message,
705
+ chat_history=chat_history,
706
+ allow_used_tools=allow_used_tools,
707
+ system_prompt=system_prompt,
708
+ max_new_tokens=max_new_tokens,
709
+ temperature=temperature,
710
+ top_p=top_p,
711
+ top_k=top_k,
712
+ repetition_penalty=repetition_penalty,
713
+ client_info=client_info,
714
+ )
715
+
716
+
717
+ def handle_file_extraction(files: list[str], uuid: str):
718
+ """
719
+ Extracts text from images in the given message's files and returns a list of attachments.
720
+
721
+ @args:
722
+ message (dict): The message containing files to extract text from.
723
+ uuid (str): The UUID associated with the extraction process.
724
+
725
+ @returns:
726
+ list: A list of attachments, each represented as a string.
727
+
728
+ @memarks:
729
+ - This function iterates over the files in the message and extracts text from each image file.
730
+ - The extracted text is logged along with the UUID and file information.
731
+ - The extracted text is then added to the attachments list as a string representation of an attachment.
732
+ - The attachments list is returned at the end of the function.
733
+ """
734
+
735
+ attachments = []
736
+ for idx, file_to_extract in enumerate(files):
737
+ extracted_text = extract_text_from_image(file=file_to_extract)
738
+ logger.info(
739
+ f"UUID: {uuid} - File: {file_to_extract} - Extracted text: {extracted_text}"
740
+ )
741
+ attachments.append(
742
+ f'<attachment index="{idx}" type="image" description="{extracted_text}" />'
743
+ )
744
+ return attachments
745
+
746
+
747
+ chatbot = gr.Chatbot(
748
+ height=500,
749
+ placeholder=PLACEHOLDER,
750
+ label="Ghost 8B Beta (β, 8k)",
751
+ show_copy_button=True,
752
+ )
753
+
754
+ chat_interface = gr.ChatInterface(
755
+ fn=generate,
756
+ chatbot=chatbot,
757
+ fill_height=True,
758
+ multimodal=True,
759
+ textbox=gr.MultimodalTextbox(
760
+ file_types=["image"],
761
+ placeholder="Type a message...",
762
+ ),
763
+ additional_inputs=[
764
+ gr.Checkbox(
765
+ label="Allow used tools (available: search on internet)",
766
+ value=False,
767
+ ),
768
+ gr.Textbox(label="System prompt", lines=6, value=DEFAULT_SYSTEM_PROMPT),
769
+ gr.Slider(
770
+ label="Max new tokens",
771
+ minimum=1,
772
+ maximum=MAX_MAX_NEW_TOKENS,
773
+ step=1,
774
+ value=DEFAULT_MAX_NEW_TOKENS,
775
+ ),
776
+ gr.Slider(
777
+ label="Temperature",
778
+ minimum=0.0,
779
+ maximum=2.0,
780
+ step=0.1,
781
+ value=0.4,
782
+ ),
783
+ gr.Slider(
784
+ label="Top-p (nucleus sampling)",
785
+ minimum=0.05,
786
+ maximum=1.0,
787
+ step=0.05,
788
+ value=0.95,
789
+ ),
790
+ gr.Slider(
791
+ label="Top-k",
792
+ minimum=1,
793
+ maximum=100,
794
+ step=1,
795
+ value=50,
796
+ ),
797
+ gr.Slider(
798
+ label="Repetition penalty",
799
+ minimum=1.0,
800
+ maximum=2.0,
801
+ step=0.05,
802
+ value=1.0,
803
+ ),
804
+ gr.Textbox(
805
+ elem_id="client_info",
806
+ label="Client info",
807
+ lines=1,
808
+ value="The current time is {}".format(
809
+ time.strftime("%A, %D %B %Y %H:%M:%S")
810
+ ),
811
+ visible=False,
812
+ ),
813
+ ],
814
+ additional_inputs_accordion=gr.Accordion(label="Additional Inputs", open=True),
815
+ stop_btn="Stop",
816
+ cache_examples=False,
817
+ examples=[],
818
+ examples_per_page=10,
819
+ concurrency_limit=100,
820
+ )
821
+
822
+ with gr.Blocks(fill_height=True, css="style.css", head=HEAD) as demo:
823
+ gr.Markdown(DESCRIPTION)
824
+ chat_interface.render()
825
+ gr.Markdown(LICENSE)
826
+
827
+ if __name__ == "__main__":
828
+ demo.queue().launch(share=True)
requirements.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate
2
+ bitsandbytes
3
+ gradio
4
+ spaces
5
+ transformers
6
+ timm
7
+ scipy==1.13.0
8
+ sentencepiece==0.2.0
9
+ torch==2.0.0
10
+ beautifulsoup4>=4.9
11
+ wikipedia==1.4.0
style.css ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ h1 {
2
+ text-align: center;
3
+ display: block;
4
+ }
5
+
6
+ #duplicate-button {
7
+ margin: auto;
8
+ color: white;
9
+ background: #1565c0;
10
+ border-radius: 100vh;
11
+ }
12
+
13
+ .contain {
14
+ max-width: 900px;
15
+ margin: auto;
16
+ padding-top: 1.5rem;
17
+ }
18
+
19
+ .s-pad {
20
+ display: block;
21
+ padding-top: 2rem;
22
+ height: 1px;
23
+ width: 100%;
24
+ }