Spaces:
Sleeping
Sleeping
update
Browse files
app.py
CHANGED
@@ -188,12 +188,37 @@ def get_llm(cfg):
|
|
188 |
|
189 |
|
190 |
def run(text, intensity):
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
195 |
|
196 |
-
|
197 |
**input_ids,
|
198 |
streamer=streamer,
|
199 |
pad_token_id=tokenizer.pad_token_id,
|
@@ -202,19 +227,33 @@ def run(text, intensity):
|
|
202 |
temperature=cfg.generation.temperature
|
203 |
)
|
204 |
|
205 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
206 |
|
|
|
|
|
207 |
|
208 |
|
209 |
-
res="Chatbot Data Mining 2024 \n \n \n"
|
210 |
-
max_length=intensity
|
211 |
|
212 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
213 |
|
214 |
|
215 |
-
def vistral_chat(
|
216 |
demo = gr.Interface(fn=run,
|
217 |
-
inputs=[gr.Textbox(label="Nhập vào nội dung input",value="Con đường xưa em đi"),gr.Slider(label="Độ dài output muốn tạo ra", value=20, minimum=10, maximum=100, step=2)],
|
218 |
outputs=gr.Textbox(label="Output"), # <-- Number of output components: 1
|
219 |
)
|
220 |
|
|
|
188 |
|
189 |
|
190 |
def run(text, intensity):
|
191 |
+
# Configure logging
|
192 |
+
logging.basicConfig(level=logging.INFO,
|
193 |
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
194 |
+
logger = logging.getLogger(__name__)
|
195 |
+
sample_outputs = "start: "
|
196 |
+
|
197 |
+
try:
|
198 |
+
# Log the start of the process
|
199 |
+
logger.info("Starting the process with config file: %s", config_path)
|
200 |
+
|
201 |
+
# Load configuration from the file
|
202 |
+
config = load_config(config_path)
|
203 |
+
|
204 |
+
# Load necessary components
|
205 |
+
prompt_template = get_prompt_template()
|
206 |
+
|
207 |
+
# Replace OpenAI embed model and llm with custom ones
|
208 |
+
reset_settings(config)
|
209 |
+
|
210 |
+
# Get retriever
|
211 |
+
retriever = get_retriever(config, prompt_template)
|
212 |
+
|
213 |
+
# Load tokenizer and language model
|
214 |
+
tokenizer = load_tokenizer(config)
|
215 |
+
language_model = get_llm(config)
|
216 |
+
prompt = retriever.query(text).response
|
217 |
+
prompt = tokenizer.bos_token + '[INST] ' + prompt + ' [/INST]'
|
218 |
+
streamer = TextStreamer(tokenizer, skip_prompt=True)
|
219 |
+
input_ids = tokenizer([prompt], return_tensors='pt').to(cfg.environment.device)
|
220 |
|
221 |
+
sample_outputs = language_model.generate(
|
222 |
**input_ids,
|
223 |
streamer=streamer,
|
224 |
pad_token_id=tokenizer.pad_token_id,
|
|
|
227 |
temperature=cfg.generation.temperature
|
228 |
)
|
229 |
|
230 |
+
|
231 |
+
# Start the command line interface
|
232 |
+
# vistral_chat(config, retriever, tokenizer, language_model)
|
233 |
+
|
234 |
+
# Log successful completion
|
235 |
+
logger.info("Process completed successfully.")
|
236 |
+
|
237 |
+
except FileNotFoundError as e:
|
238 |
+
logger.error("Configuration file not found: %s", e)
|
239 |
|
240 |
+
except Exception as e:
|
241 |
+
logger.exception("An error occurred: %s", e)
|
242 |
|
243 |
|
|
|
|
|
244 |
|
245 |
+
|
246 |
+
# print(20*'---')
|
247 |
+
|
248 |
+
# res="Chatbot Data Mining 2024 \n \n \n"
|
249 |
+
# max_length=intensity
|
250 |
+
|
251 |
+
return sample_outputs
|
252 |
|
253 |
|
254 |
+
def vistral_chat():
|
255 |
demo = gr.Interface(fn=run,
|
256 |
+
inputs=[gr.Textbox(label="Nhập vào nội dung input",value="Con đường xưa em đi"),gr.Slider(label="Độ dài output muốn tạo ra", value=20, minimum=10, maximum=100, step=2),],
|
257 |
outputs=gr.Textbox(label="Output"), # <-- Number of output components: 1
|
258 |
)
|
259 |
|