Werli commited on
Commit
36e1bda
·
verified ·
1 Parent(s): d63e2b7

Removed Llama3 loader as it is slow, takes a lot of RAM\VRAM and no one uses. Added [gokaygokay/Lamini-Prompt-Enchance-Long](https://huggingface.co/gokaygokay/Lamini-Prompt-Enchance-Long) instead.

Files changed (1) hide show
  1. app.py +23 -23
app.py CHANGED
@@ -10,7 +10,7 @@ from apscheduler.schedulers.background import BackgroundScheduler
10
  import json
11
  from modules.classifyTags import classify_tags,process_tags
12
  from modules.florence2 import process_image,single_task_list,update_task_dropdown
13
- from modules.llama_loader import llama_list,llama3reorganize
14
  from modules.tag_enhancer import prompt_enhancer
15
  os.environ['PYTORCH_ENABLE_MPS_FALLBACK']='1'
16
 
@@ -147,7 +147,7 @@ class Predictor:
147
  character_thresh,
148
  character_mcut_enabled,
149
  characters_merge_enabled,
150
- llama3_reorganize_model_repo,
151
  additional_tags_prepend,
152
  additional_tags_append,
153
  tag_results,
@@ -160,7 +160,7 @@ class Predictor:
160
  print(f"Predict load model: {model_repo}, gallery length: {gallery_len}")
161
 
162
  timer = Timer() # Create a timer
163
- progressRatio = 0.5 if llama3_reorganize_model_repo else 1
164
  progressTotal = gallery_len + 1
165
  current_progress = 0
166
 
@@ -181,12 +181,12 @@ class Predictor:
181
  character_res = None
182
  general_res = None
183
 
184
- if llama3_reorganize_model_repo:
185
- print(f"Llama3 reorganize load model {llama3_reorganize_model_repo}")
186
- llama3_reorganize = llama3reorganize(llama3_reorganize_model_repo, loadModel=True)
187
  current_progress += progressRatio/progressTotal;
188
- progress(current_progress, desc="Initialize llama3 model finished")
189
- timer.checkpoint(f"Initialize llama3 model")
190
 
191
  timer.report()
192
 
@@ -290,17 +290,17 @@ class Predictor:
290
  progress(current_progress, desc=f"image{idx:02d}, predict finished")
291
  timer.checkpoint(f"image{idx:02d}, predict finished")
292
 
293
- if llama3_reorganize_model_repo:
294
- print(f"Starting reorganize with llama3...")
295
- reorganize_strings = llama_loader.llama3_reorganize.reorganize(sorted_general_strings)
296
  reorganize_strings = re.sub(r" *Title: *", "", reorganize_strings)
297
  reorganize_strings = re.sub(r"\n+", ",", reorganize_strings)
298
  reorganize_strings = re.sub(r",,+", ",", reorganize_strings)
299
- sorted_general_strings += "," + reorganize_strings
300
 
301
  current_progress += progressRatio/progressTotal;
302
- progress(current_progress, desc=f"image{idx:02d}, llama3 reorganize finished")
303
- timer.checkpoint(f"image{idx:02d}, llama3 reorganize finished")
304
 
305
  txt_file = self.create_file(sorted_general_strings, output_dir, image_name + ".txt")
306
  txt_infos.append({"path":txt_file, "name": image_name + ".txt"})
@@ -331,9 +331,9 @@ class Predictor:
331
  taggers_zip.write(info["path"], arcname=info["name"])
332
  download.append(downloadZipPath)
333
  # End zip creation logic
334
- if llama3_reorganize_model_repo:
335
- llama_loader.llama3_reorganize.release_vram()
336
- del llama3_reorganize
337
 
338
  progress(1, desc=f"Predict completed")
339
  timer.report_all() # Print all recorded times
@@ -460,11 +460,11 @@ with gr.Blocks(title=TITLE, css=css, theme="Werli/Multi-Tagger", fill_width=True
460
  scale=1,
461
  )
462
  with gr.Row():
463
- llama3_reorganize_model_repo = gr.Dropdown(
464
- [None] + llama_list,
465
  value=None,
466
- label="Llama3 Model",
467
- info="Use the Llama3 model to reorganize the article (Note: very slow)",
468
  )
469
  with gr.Row():
470
  additional_tags_prepend = gr.Text(label="Prepend Additional tags (comma split)")
@@ -479,7 +479,7 @@ with gr.Blocks(title=TITLE, css=css, theme="Werli/Multi-Tagger", fill_width=True
479
  character_thresh,
480
  character_mcut_enabled,
481
  characters_merge_enabled,
482
- llama3_reorganize_model_repo,
483
  additional_tags_prepend,
484
  additional_tags_append,
485
  ],
@@ -534,7 +534,7 @@ with gr.Blocks(title=TITLE, css=css, theme="Werli/Multi-Tagger", fill_width=True
534
  character_thresh,
535
  character_mcut_enabled,
536
  characters_merge_enabled,
537
- llama3_reorganize_model_repo,
538
  additional_tags_prepend,
539
  additional_tags_append,
540
  tag_results,
 
10
  import json
11
  from modules.classifyTags import classify_tags,process_tags
12
  from modules.florence2 import process_image,single_task_list,update_task_dropdown
13
+ from modules.reorganizer_model import reorganizer_list,reorganizer_class
14
  from modules.tag_enhancer import prompt_enhancer
15
  os.environ['PYTORCH_ENABLE_MPS_FALLBACK']='1'
16
 
 
147
  character_thresh,
148
  character_mcut_enabled,
149
  characters_merge_enabled,
150
+ reorganizer_model_repo,
151
  additional_tags_prepend,
152
  additional_tags_append,
153
  tag_results,
 
160
  print(f"Predict load model: {model_repo}, gallery length: {gallery_len}")
161
 
162
  timer = Timer() # Create a timer
163
+ progressRatio = 0.5 if reorganizer_model_repo else 1
164
  progressTotal = gallery_len + 1
165
  current_progress = 0
166
 
 
181
  character_res = None
182
  general_res = None
183
 
184
+ if reorganizer_model_repo:
185
+ print(f"Reorganizer load model {reorganizer_model_repo}")
186
+ reorganizer = reorganizer_class(reorganizer_model_repo, loadModel=True)
187
  current_progress += progressRatio/progressTotal;
188
+ progress(current_progress, desc="Initialize reoganizer model finished")
189
+ timer.checkpoint(f"Initialize reoganizer model")
190
 
191
  timer.report()
192
 
 
290
  progress(current_progress, desc=f"image{idx:02d}, predict finished")
291
  timer.checkpoint(f"image{idx:02d}, predict finished")
292
 
293
+ if reorganizer_model_repo:
294
+ print(f"Starting reorganizer...")
295
+ reorganize_strings = reorganizer.reorganize(sorted_general_strings)
296
  reorganize_strings = re.sub(r" *Title: *", "", reorganize_strings)
297
  reorganize_strings = re.sub(r"\n+", ",", reorganize_strings)
298
  reorganize_strings = re.sub(r",,+", ",", reorganize_strings)
299
+ sorted_general_strings += ",\n\n" + reorganize_strings
300
 
301
  current_progress += progressRatio/progressTotal;
302
+ progress(current_progress, desc=f"image{idx:02d}, reorganizer finished")
303
+ timer.checkpoint(f"image{idx:02d}, reorganizer finished")
304
 
305
  txt_file = self.create_file(sorted_general_strings, output_dir, image_name + ".txt")
306
  txt_infos.append({"path":txt_file, "name": image_name + ".txt"})
 
331
  taggers_zip.write(info["path"], arcname=info["name"])
332
  download.append(downloadZipPath)
333
  # End zip creation logic
334
+ if reorganizer_model_repo:
335
+ reorganizer.release_vram()
336
+ del reorganizer
337
 
338
  progress(1, desc=f"Predict completed")
339
  timer.report_all() # Print all recorded times
 
460
  scale=1,
461
  )
462
  with gr.Row():
463
+ reorganizer_model_repo = gr.Dropdown(
464
+ [None] + reorganizer_list,
465
  value=None,
466
+ label="Reorganizer Model",
467
+ info="Use a model to create a description for you",
468
  )
469
  with gr.Row():
470
  additional_tags_prepend = gr.Text(label="Prepend Additional tags (comma split)")
 
479
  character_thresh,
480
  character_mcut_enabled,
481
  characters_merge_enabled,
482
+ reorganizer_model_repo,
483
  additional_tags_prepend,
484
  additional_tags_append,
485
  ],
 
534
  character_thresh,
535
  character_mcut_enabled,
536
  characters_merge_enabled,
537
+ reorganizer_model_repo,
538
  additional_tags_prepend,
539
  additional_tags_append,
540
  tag_results,