Spaces:
Runtime error
Runtime error
Upload 2 files
Browse files
app.py
CHANGED
@@ -1,211 +1,161 @@
|
|
|
|
1 |
import json
|
2 |
import subprocess
|
3 |
from llama_cpp import Llama
|
4 |
from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType
|
5 |
from llama_cpp_agent.providers import LlamaCppPythonProvider
|
6 |
-
from
|
|
|
7 |
import gradio as gr
|
8 |
-
import
|
9 |
-
import spaces
|
10 |
-
import time
|
11 |
-
import torch
|
12 |
-
|
13 |
-
logging.basicConfig(level=logging.INFO)
|
14 |
-
logger = logging.getLogger(__name__)
|
15 |
-
|
16 |
-
repo_id = "QuantFactory/Meta-Llama-3-8B-Instruct-GGUF"
|
17 |
-
filename = "Meta-Llama-3-8B-Instruct.Q8_0.gguf"
|
18 |
-
|
19 |
-
|
20 |
-
try:
|
21 |
-
start_time = time.time()
|
22 |
-
logger.info("Downloading Model....")
|
23 |
-
hf_hub_download(
|
24 |
-
repo_id = repo_id ,
|
25 |
-
filename = filename,
|
26 |
-
local_dir="./model"
|
27 |
-
)
|
28 |
-
end_time = time.time()
|
29 |
-
logger.info(f"Download complete. Time taken : {start_time - end_time} seconds.")
|
30 |
-
|
31 |
-
except Exception as e:
|
32 |
-
logger.error(f"Unable to download Model : {e}")
|
33 |
-
raise
|
34 |
-
|
35 |
-
def chunk_text(text, chunk_size=5000):
|
36 |
-
"""
|
37 |
-
Splits the input text into chunks of specified size.
|
38 |
|
39 |
-
|
40 |
-
|
41 |
-
|
|
|
|
|
42 |
|
43 |
-
Returns:
|
44 |
-
list: A list of text chunks.
|
45 |
-
"""
|
46 |
-
words = text.split()
|
47 |
-
chunks = [' '.join(words[i:i + chunk_size]) for i in range(0, len(words), chunk_size)]
|
48 |
-
return chunks
|
49 |
|
50 |
-
# def combine_responses(responses):
|
51 |
-
# """
|
52 |
-
# Combines the responses from all chunks into a final output string.
|
53 |
|
54 |
-
|
55 |
-
|
|
|
|
|
|
|
56 |
|
57 |
-
# Returns:
|
58 |
-
# str: The combined output string.
|
59 |
-
# """
|
60 |
-
# combined_output = " ".join(responses)
|
61 |
-
# return combined_output
|
62 |
|
63 |
llm = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
|
65 |
-
@spaces.GPU(duration = 110)
|
66 |
-
def chat_gguf(message : str, history : list, temperature : float, max_new_tokens : int):
|
67 |
-
"""
|
68 |
-
Generate a streaming response using the llama3-8b model with chunking.
|
69 |
-
|
70 |
-
Args:
|
71 |
-
message (str): The input message.
|
72 |
-
history (list): The conversation history used by ChatInterface. - Not used.
|
73 |
-
temperature (float): The temperature for generating the response.
|
74 |
-
max_new_tokens (int): The maximum number of new tokens to generate.
|
75 |
-
|
76 |
-
Returns:
|
77 |
-
str: The generated response.
|
78 |
-
"""
|
79 |
global llm
|
80 |
-
|
81 |
-
try:
|
82 |
-
'''Load the Model'''
|
83 |
-
|
84 |
-
start_time = time.time()
|
85 |
-
logger.info("Loading model...")
|
86 |
-
model = Llama(
|
87 |
-
model_path = f"./model/{filename}",
|
88 |
-
flash_attn = True,
|
89 |
-
n_gpu_layers = -1, #all the layers are offloaded,
|
90 |
-
n_batch = 512, #prompt processing batch size (higher the value consumes more hardware)
|
91 |
-
n_ctx = 8192, #max tokens (input + output)
|
92 |
-
last_n_tokens = 0, #Context from last N tokens
|
93 |
-
)
|
94 |
-
llm = model
|
95 |
-
end_time = time.time()
|
96 |
-
logger.info(f"Model loaded. Time taken : {start_time - end_time} seconds.")
|
97 |
-
logger.info(f"Device : {torch.cuda.is_available()}")
|
98 |
-
|
99 |
-
except Exception as e:
|
100 |
-
logger.error(f"Model cannot be loaded : {e}")
|
101 |
-
raise
|
102 |
-
|
103 |
-
start_time = time.time()
|
104 |
-
logger.info("Loading provider...")
|
105 |
-
provider = LlamaCppPythonProvider(model)
|
106 |
-
|
107 |
-
settings = provider.get_provider_default_settings()
|
108 |
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
|
|
|
|
116 |
|
117 |
-
|
118 |
-
SYS_PROMPT = '''
|
119 |
-
Extract all relevant keywords and add quantity from the following text and format the result in nested JSON, ignoring personal details and focusing only on the scope of work as shown in the example:
|
120 |
-
Good JSON example: {'lobby': {'frcm': {'replace': {'carpet': 1, 'carpet_pad': 1, 'base': 1, 'window_treatments': 1, 'artwork_and_decorative_accessories': 1, 'portable_lighting': 1, 'upholstered_furniture_and_decorative_pillows': 1, 'millwork': 1} } } }
|
121 |
-
Bad JSON example: {'lobby': { 'frcm': { 'replace': [ 'carpet', 'carpet_pad', 'base', 'window_treatments', 'artwork_and_decorative_accessories', 'portable_lighting', 'upholstered_furniture_and_decorative_pillows', 'millwork'] } } }
|
122 |
-
Make sure to fetch details from the provided text and ignore unnecessary information. The response should be in JSON format only, without any additional comments.
|
123 |
-
'''
|
124 |
-
end_time = time.time()
|
125 |
-
logger.info(f"Provider settings updated. Prompt loaded. Time taken : {start_time - end_time} seconds.")
|
126 |
|
127 |
-
start_time = time.time()
|
128 |
-
logger.info("Loading agent...")
|
129 |
-
# Initialize the agent
|
130 |
agent = LlamaCppAgent(
|
131 |
provider,
|
132 |
-
system_prompt=
|
133 |
predefined_messages_formatter_type=chat_template,
|
134 |
-
debug_output=
|
135 |
)
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
logger.info("Generating responses...")
|
145 |
-
for chunk in chunks:
|
146 |
-
response = agent.get_chat_response(
|
147 |
-
chunk,
|
148 |
-
llm_sampling_settings=settings,
|
149 |
-
returns_streaming_generator = True, #generate streamer
|
150 |
-
print_output = False
|
151 |
-
)
|
152 |
-
|
153 |
-
responses.append(response)
|
154 |
-
logger.info(f"Responses generated. Time taken : {time.time() - start_time} seconds.")
|
155 |
-
|
156 |
-
output = ""
|
157 |
-
for response in responses:
|
158 |
-
for text in response:
|
159 |
-
output += text
|
160 |
-
|
161 |
-
yield output
|
162 |
-
|
163 |
-
DESCRIPTION = '''
|
164 |
-
<div>
|
165 |
-
<h1 style="text-align: center;">ContenteaseAI custom trained model</h1>
|
166 |
-
</div>
|
167 |
-
'''
|
168 |
-
|
169 |
-
LICENSE = """
|
170 |
-
<p/>
|
171 |
-
---
|
172 |
-
For more information, visit our [website](https://contentease.ai).
|
173 |
-
"""
|
174 |
-
|
175 |
-
PLACEHOLDER = """
|
176 |
-
<div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
|
177 |
-
<h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">ContenteaseAI Custom AI trained model</h1>
|
178 |
-
<p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Enter the text extracted from the PDF:</p>
|
179 |
-
</div>
|
180 |
-
"""
|
181 |
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
187 |
"""
|
188 |
-
# Gradio block
|
189 |
-
chatbot = gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='Gradio ChatInterface')
|
190 |
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
203 |
)
|
204 |
-
|
205 |
-
gr.Markdown(LICENSE)
|
206 |
|
207 |
if __name__ == "__main__":
|
208 |
-
|
209 |
-
demo.launch(show_error=True, debug = True)
|
210 |
-
except Exception as e:
|
211 |
-
logger.error(f"Error launching Gradio demo: {e}")
|
|
|
1 |
+
import spaces
|
2 |
import json
|
3 |
import subprocess
|
4 |
from llama_cpp import Llama
|
5 |
from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType
|
6 |
from llama_cpp_agent.providers import LlamaCppPythonProvider
|
7 |
+
from llama_cpp_agent.chat_history import BasicChatHistory
|
8 |
+
from llama_cpp_agent.chat_history.messages import Roles
|
9 |
import gradio as gr
|
10 |
+
from huggingface_hub import hf_hub_download
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
+
hf_hub_download(
|
13 |
+
repo_id="bartowski/gemma-2-9b-it-GGUF",
|
14 |
+
filename="gemma-2-9b-it-Q5_K_M.gguf",
|
15 |
+
local_dir="./models"
|
16 |
+
)
|
17 |
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
|
|
|
|
|
|
19 |
|
20 |
+
hf_hub_download(
|
21 |
+
repo_id="bartowski/gemma-2-27b-it-GGUF",
|
22 |
+
filename="gemma-2-27b-it-Q5_K_M.gguf",
|
23 |
+
local_dir="./models"
|
24 |
+
)
|
25 |
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
llm = None
|
28 |
+
llm_model = None
|
29 |
+
|
30 |
+
@spaces.GPU(duration=120)
|
31 |
+
def respond(
|
32 |
+
message,
|
33 |
+
history: list[tuple[str, str]],
|
34 |
+
model,
|
35 |
+
system_message,
|
36 |
+
max_tokens,
|
37 |
+
temperature,
|
38 |
+
top_p,
|
39 |
+
top_k,
|
40 |
+
repeat_penalty,
|
41 |
+
):
|
42 |
+
chat_template = MessagesFormatterType.GEMMA_2
|
43 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
global llm
|
45 |
+
global llm_model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
|
47 |
+
if llm is None or llm_model != model:
|
48 |
+
llm = Llama(
|
49 |
+
model_path=f"models/{model}",
|
50 |
+
flash_attn=True,
|
51 |
+
n_gpu_layers=81,
|
52 |
+
n_batch=1024,
|
53 |
+
n_ctx=8192,
|
54 |
+
)
|
55 |
+
llm_model = model
|
56 |
|
57 |
+
provider = LlamaCppPythonProvider(llm)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
|
|
|
|
|
|
|
59 |
agent = LlamaCppAgent(
|
60 |
provider,
|
61 |
+
system_prompt=f"{system_message}",
|
62 |
predefined_messages_formatter_type=chat_template,
|
63 |
+
debug_output=True
|
64 |
)
|
65 |
+
|
66 |
+
settings = provider.get_provider_default_settings()
|
67 |
+
settings.temperature = temperature
|
68 |
+
settings.top_k = top_k
|
69 |
+
settings.top_p = top_p
|
70 |
+
settings.max_tokens = max_tokens
|
71 |
+
settings.repeat_penalty = repeat_penalty
|
72 |
+
settings.stream = True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
|
74 |
+
messages = BasicChatHistory()
|
75 |
+
|
76 |
+
for msn in history:
|
77 |
+
user = {
|
78 |
+
'role': Roles.user,
|
79 |
+
'content': msn[0]
|
80 |
+
}
|
81 |
+
assistant = {
|
82 |
+
'role': Roles.assistant,
|
83 |
+
'content': msn[1]
|
84 |
+
}
|
85 |
+
messages.add_message(user)
|
86 |
+
messages.add_message(assistant)
|
87 |
+
|
88 |
+
stream = agent.get_chat_response(
|
89 |
+
message,
|
90 |
+
llm_sampling_settings=settings,
|
91 |
+
chat_history=messages,
|
92 |
+
returns_streaming_generator=True,
|
93 |
+
print_output=False
|
94 |
+
)
|
95 |
+
|
96 |
+
outputs = ""
|
97 |
+
for output in stream:
|
98 |
+
outputs += output
|
99 |
+
yield outputs
|
100 |
+
|
101 |
+
description = """<p align="center"><a href="https://huggingface.co/spaces/gokaygokay/Gemma-2-llamacpp" target="_blank">[Reference Space]</a></p>
|
102 |
+
<p><center>
|
103 |
+
<p align="center">Defaults to 9B it (you can switch to other from additional inputs)</p>
|
104 |
+
<p><center>
|
105 |
+
<a href="https://huggingface.co/google/gemma-2-27b-it" target="_blank">[27B it Model]</a>
|
106 |
+
<a href="https://huggingface.co/google/gemma-2-9b-it" target="_blank">[9B it Model]</a>
|
107 |
+
<a href="https://huggingface.co/bartowski/gemma-2-27b-it-GGUF" target="_blank">[27B it Model GGUF]</a>
|
108 |
+
<a href="https://huggingface.co/bartowski/gemma-2-9b-it-GGUF" target="_blank">[9B it Model GGUF]</a>
|
109 |
+
</center></p>
|
110 |
"""
|
|
|
|
|
111 |
|
112 |
+
demo = gr.ChatInterface(
|
113 |
+
respond,
|
114 |
+
additional_inputs=[
|
115 |
+
gr.Dropdown([
|
116 |
+
'gemma-2-9b-it-Q5_K_M.gguf',
|
117 |
+
'gemma-2-27b-it-Q5_K_M.gguf'
|
118 |
+
],
|
119 |
+
value="gemma-2-9b-it-Q5_K_M.gguf",
|
120 |
+
label="Model"
|
121 |
+
),
|
122 |
+
gr.Textbox(value="You are a helpful assistant.", label="System message"),
|
123 |
+
gr.Slider(minimum=1, maximum=4096, value=2048, step=1, label="Max tokens"),
|
124 |
+
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
125 |
+
gr.Slider(
|
126 |
+
minimum=0.1,
|
127 |
+
maximum=1.0,
|
128 |
+
value=0.95,
|
129 |
+
step=0.05,
|
130 |
+
label="Top-p",
|
131 |
+
),
|
132 |
+
gr.Slider(
|
133 |
+
minimum=0,
|
134 |
+
maximum=100,
|
135 |
+
value=40,
|
136 |
+
step=1,
|
137 |
+
label="Top-k",
|
138 |
+
),
|
139 |
+
gr.Slider(
|
140 |
+
minimum=0.0,
|
141 |
+
maximum=2.0,
|
142 |
+
value=1.1,
|
143 |
+
step=0.1,
|
144 |
+
label="Repetition penalty",
|
145 |
+
),
|
146 |
+
],
|
147 |
+
retry_btn="Retry",
|
148 |
+
undo_btn="Undo",
|
149 |
+
clear_btn="Clear",
|
150 |
+
submit_btn="Send",
|
151 |
+
title="Chat with Gemma 2 using llama.cpp",
|
152 |
+
description=description,
|
153 |
+
chatbot=gr.Chatbot(
|
154 |
+
scale=1,
|
155 |
+
likeable=False,
|
156 |
+
show_copy_button=True
|
157 |
)
|
158 |
+
)
|
|
|
159 |
|
160 |
if __name__ == "__main__":
|
161 |
+
demo.launch()
|
|
|
|
|
|