Spaces:
Running
Running
Fix
Browse filesChanged `categorized_output` to `final_categorized_output`.
I realized that `categorized_output_string` was being accumulated for all images in the `categorized_output_strings` list, and then a final concatenated string was created from this list resulting in the tags from all images being merged together.
I fixed this issue by removing the Accumulation of categorized_output_strings; Instead of accumulating all `categorized_output_strings` into a single list, store the `categorized_output_string` for each image separately in the `tag_results` dictionary. Also updated the `get_selection_from_gallery` to ensure that this function correctly retrieves the `categorized_output_string` for the selected image from the `tag_results` dictionary.
app.py
CHANGED
@@ -75,7 +75,7 @@ Features:
|
|
75 |
- Supports batch processing of multiple images.
|
76 |
- Tags images with multiple categories: general tags, character tags, and ratings.
|
77 |
- Displays categorized tags in a structured format.
|
78 |
-
- Includes a separate tab for image captioning using Florence 2. This
|
79 |
- Supports various captioning tasks (e.g., Caption, Detailed Caption, Object Detection), as well it can display output text and images for tasks that generate visual outputs.
|
80 |
|
81 |
Example image by [me.](https://huggingface.co/Werli)
|
@@ -448,176 +448,196 @@ class Predictor:
|
|
448 |
additional_tags_append,
|
449 |
tag_results,
|
450 |
progress=gr.Progress()
|
451 |
-
|
452 |
-
|
453 |
-
|
454 |
-
|
455 |
-
timer = Timer() # Create a timer
|
456 |
-
progressRatio = 0.5 if llama3_reorganize_model_repo else 1
|
457 |
-
progressTotal = gallery_len + 1
|
458 |
-
current_progress = 0
|
459 |
-
|
460 |
-
self.load_model(model_repo)
|
461 |
-
current_progress += progressRatio/progressTotal;
|
462 |
-
progress(current_progress, desc="Initialize wd model finished")
|
463 |
-
timer.checkpoint(f"Initialize wd model")
|
464 |
-
|
465 |
-
# Result
|
466 |
-
txt_infos = []
|
467 |
-
output_dir = tempfile.mkdtemp()
|
468 |
-
if not os.path.exists(output_dir):
|
469 |
-
os.makedirs(output_dir)
|
470 |
-
|
471 |
-
sorted_general_strings = ""
|
472 |
-
rating = None
|
473 |
-
character_res = None
|
474 |
-
general_res = None
|
475 |
-
|
476 |
-
if llama3_reorganize_model_repo:
|
477 |
-
print(f"Llama3 reorganize load model {llama3_reorganize_model_repo}")
|
478 |
-
llama3_reorganize = Llama3Reorganize(llama3_reorganize_model_repo, loadModel=True)
|
479 |
-
current_progress += progressRatio/progressTotal;
|
480 |
-
progress(current_progress, desc="Initialize llama3 model finished")
|
481 |
-
timer.checkpoint(f"Initialize llama3 model")
|
482 |
-
|
483 |
-
timer.report()
|
484 |
-
|
485 |
-
prepend_list = [tag.strip() for tag in additional_tags_prepend.split(",") if tag.strip()]
|
486 |
-
append_list = [tag.strip() for tag in additional_tags_append.split(",") if tag.strip()]
|
487 |
-
if prepend_list and append_list:
|
488 |
-
append_list = [item for item in append_list if item not in prepend_list]
|
489 |
-
|
490 |
-
# Dictionary to track counters for each filename
|
491 |
-
name_counters = defaultdict(int)
|
492 |
-
# New code to create categorized output string
|
493 |
-
categorized_output_strings = []
|
494 |
-
for idx, value in enumerate(gallery):
|
495 |
-
try:
|
496 |
-
image_path = value[0]
|
497 |
-
image_name = os.path.splitext(os.path.basename(image_path))[0]
|
498 |
-
|
499 |
-
# Increment the counter for the current name
|
500 |
-
name_counters[image_name] += 1
|
501 |
-
|
502 |
-
if name_counters[image_name] > 1:
|
503 |
-
image_name = f"{image_name}_{name_counters[image_name]:02d}"
|
504 |
|
505 |
-
|
|
|
506 |
|
507 |
-
|
508 |
-
|
509 |
-
|
510 |
-
|
511 |
|
512 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
513 |
|
514 |
-
|
515 |
-
|
516 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
517 |
|
518 |
-
|
519 |
-
|
|
|
|
|
|
|
520 |
|
521 |
-
|
522 |
-
general_probs = np.array([x[1] for x in general_names])
|
523 |
-
general_thresh = mcut_threshold(general_probs)
|
524 |
|
525 |
-
|
526 |
-
|
|
|
|
|
527 |
|
528 |
-
|
529 |
-
character_names = [labels[i] for i in self.character_indexes]
|
530 |
|
531 |
-
|
532 |
-
|
533 |
-
|
534 |
-
character_thresh = max(0.15, character_thresh)
|
535 |
|
536 |
-
|
537 |
-
|
538 |
-
character_list = list(character_res.keys())
|
539 |
|
540 |
-
|
541 |
-
|
542 |
-
|
543 |
-
reverse=True,
|
544 |
-
)
|
545 |
-
sorted_general_list = [x[0] for x in sorted_general_list]
|
546 |
-
#Remove values from character_list that already exist in sorted_general_list
|
547 |
-
character_list = [item for item in character_list if item not in sorted_general_list]
|
548 |
-
#Remove values from sorted_general_list that already exist in prepend_list or append_list
|
549 |
-
if prepend_list:
|
550 |
-
sorted_general_list = [item for item in sorted_general_list if item not in prepend_list]
|
551 |
-
if append_list:
|
552 |
-
sorted_general_list = [item for item in sorted_general_list if item not in append_list]
|
553 |
|
554 |
-
|
|
|
555 |
|
556 |
-
|
|
|
557 |
|
558 |
-
|
559 |
-
|
560 |
-
|
561 |
-
|
562 |
-
categorized_output_strings.append(categorized_output_string)
|
563 |
|
564 |
-
|
565 |
-
|
566 |
-
|
567 |
-
|
568 |
-
if llama3_reorganize_model_repo:
|
569 |
-
print(f"Starting reorganize with llama3...")
|
570 |
-
reorganize_strings = llama3_reorganize.reorganize(sorted_general_strings)
|
571 |
-
reorganize_strings = re.sub(r" *Title: *", "", reorganize_strings)
|
572 |
-
reorganize_strings = re.sub(r"\n+", ",", reorganize_strings)
|
573 |
-
reorganize_strings = re.sub(r",,+", ",", reorganize_strings)
|
574 |
-
sorted_general_strings += "," + reorganize_strings
|
575 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
576 |
current_progress += progressRatio/progressTotal;
|
577 |
-
progress(current_progress, desc=f"image{idx:02d},
|
578 |
-
timer.checkpoint(f"image{idx:02d},
|
579 |
-
|
580 |
-
|
581 |
-
|
582 |
-
|
583 |
-
|
584 |
-
|
585 |
-
|
586 |
-
|
587 |
-
|
588 |
-
|
589 |
-
|
590 |
-
|
591 |
-
|
592 |
-
|
593 |
-
|
594 |
-
|
595 |
-
|
596 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
597 |
|
598 |
-
|
599 |
-
llama3_reorganize.release_vram()
|
600 |
-
del llama3_reorganize
|
601 |
-
|
602 |
-
progress(1, desc=f"Predict completed")
|
603 |
-
timer.report_all() # Print all recorded times
|
604 |
-
print("Predict is complete.")
|
605 |
-
|
606 |
-
# Collect all categorized output strings into a single string
|
607 |
-
final_categorized_output = ', '.join(categorized_output_strings)
|
608 |
-
|
609 |
-
return download, sorted_general_strings, classified_tags, rating, character_res, general_res, unclassified_tags, tag_results, final_categorized_output
|
610 |
-
# END
|
611 |
|
612 |
def get_selection_from_gallery(gallery: list, tag_results: dict, selected_state: gr.SelectData):
|
613 |
if not selected_state:
|
614 |
return selected_state
|
615 |
|
616 |
-
tag_result = {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
617 |
if selected_state.value["image"]["path"] in tag_results:
|
618 |
tag_result = tag_results[selected_state.value["image"]["path"]]
|
619 |
|
620 |
-
return (selected_state.value["image"]["path"], selected_state.value["caption"]), tag_result["strings"], tag_result["classified_tags"], tag_result["rating"], tag_result["character_res"], tag_result["general_res"], tag_result["unclassified_tags"]
|
621 |
|
622 |
def append_gallery(gallery: list, image: str):
|
623 |
if gallery is None:
|
@@ -645,7 +665,7 @@ def remove_image_from_gallery(gallery: list, selected_image: str):
|
|
645 |
if not gallery or not selected_image:
|
646 |
return gallery
|
647 |
|
648 |
-
selected_image = ast.literal_eval(selected_image) #Use ast.literal_eval to parse text into a tuple.
|
649 |
# Remove the selected image from the gallery
|
650 |
if selected_image in gallery:
|
651 |
gallery.remove(selected_image)
|
@@ -860,7 +880,7 @@ def process_image(image, task_prompt, text_input=None):
|
|
860 |
return results, output_image
|
861 |
else:
|
862 |
return "", None # Return empty string and None for unknown task prompts
|
863 |
-
|
864 |
# Custom CSS to set the height of the gr.Dropdown menu
|
865 |
css = """
|
866 |
div.progress-level div.progress-level-inner {
|
@@ -927,7 +947,6 @@ scheduler.start()
|
|
927 |
next_run_time_utc = restart_space_job.next_run_time.astimezone(timezone.utc)
|
928 |
NEXT_RESTART = f"Next Restart: {next_run_time_utc.strftime('%Y-%m-%d %H:%M:%S')} (UTC) - The space will restart every 2 days to ensure stability and performance. It uses a background scheduler to handle the restart process."
|
929 |
|
930 |
-
# Using "JohnSmith9982/small_and_pretty" theme
|
931 |
with gr.Blocks(title=TITLE, css=css, theme="Werli/Multi-Tagger", fill_width=True) as demo:
|
932 |
gr.Markdown(value=f"<h1 style='text-align: center; margin-bottom: 1rem'>{TITLE}</h1>")
|
933 |
gr.Markdown(value=DESCRIPTION)
|
@@ -1014,7 +1033,7 @@ with gr.Blocks(title=TITLE, css=css, theme="Werli/Multi-Tagger", fill_width=True
|
|
1014 |
download_file = gr.File(label="Output (Download)") # 0
|
1015 |
character_res = gr.Label(label="Output (characters)") # 1
|
1016 |
sorted_general_strings = gr.Textbox(label="Output (string)", show_label=True, show_copy_button=True) # 2
|
1017 |
-
|
1018 |
categorized = gr.JSON(label="Categorized (tags)") # 4
|
1019 |
rating = gr.Label(label="Rating") # 5
|
1020 |
general_res = gr.Label(label="Output (tags)") # 6
|
@@ -1023,6 +1042,7 @@ with gr.Blocks(title=TITLE, css=css, theme="Werli/Multi-Tagger", fill_width=True
|
|
1023 |
[
|
1024 |
download_file,
|
1025 |
sorted_general_strings,
|
|
|
1026 |
categorized,
|
1027 |
rating,
|
1028 |
character_res,
|
@@ -1037,7 +1057,7 @@ with gr.Blocks(title=TITLE, css=css, theme="Werli/Multi-Tagger", fill_width=True
|
|
1037 |
upload_button.upload(extend_gallery, inputs=[gallery, upload_button], outputs=gallery)
|
1038 |
# Event to update the selected image when an image is clicked in the gallery
|
1039 |
selected_image = gr.Textbox(label="Selected Image", visible=False)
|
1040 |
-
gallery.select(get_selection_from_gallery, inputs=[gallery, tag_results], outputs=[selected_image, sorted_general_strings, categorized, rating, character_res, general_res, unclassified])
|
1041 |
# Event to remove a selected image from the gallery
|
1042 |
remove_button.click(remove_image_from_gallery, inputs=[gallery, selected_image], outputs=gallery)
|
1043 |
submit.click(
|
@@ -1055,8 +1075,8 @@ with gr.Blocks(title=TITLE, css=css, theme="Werli/Multi-Tagger", fill_width=True
|
|
1055 |
additional_tags_append,
|
1056 |
tag_results,
|
1057 |
],
|
1058 |
-
outputs=[download_file, sorted_general_strings, categorized, rating, character_res, general_res, unclassified, tag_results,
|
1059 |
-
)
|
1060 |
gr.Examples(
|
1061 |
[["images/1girl.png", VIT_LARGE_MODEL_DSV3_REPO, 0.35, False, 0.85, False]],
|
1062 |
inputs=[
|
@@ -1078,8 +1098,7 @@ with gr.Blocks(title=TITLE, css=css, theme="Werli/Multi-Tagger", fill_width=True
|
|
1078 |
text_input = gr.Textbox(label="Text Input (optional)")
|
1079 |
submit_btn = gr.Button(value="Submit")
|
1080 |
with gr.Column(variant="panel"):
|
1081 |
-
|
1082 |
-
output_text = gr.Textbox(label="Output Text", show_label=True, show_copy_button=True, lines=8) # Here is the problem!
|
1083 |
output_img = gr.Image(label="Output Image")
|
1084 |
gr.Examples(
|
1085 |
examples=[
|
@@ -1094,4 +1113,4 @@ with gr.Blocks(title=TITLE, css=css, theme="Werli/Multi-Tagger", fill_width=True
|
|
1094 |
)
|
1095 |
submit_btn.click(process_image, [input_img, task_prompt, text_input], [output_text, output_img])
|
1096 |
|
1097 |
-
demo.queue(max_size=2).launch()
|
|
|
75 |
- Supports batch processing of multiple images.
|
76 |
- Tags images with multiple categories: general tags, character tags, and ratings.
|
77 |
- Displays categorized tags in a structured format.
|
78 |
+
- Includes a separate tab for image captioning using Florence 2. This supports CUDA, MPS or CPU if one of them is available.
|
79 |
- Supports various captioning tasks (e.g., Caption, Detailed Caption, Object Detection), as well it can display output text and images for tasks that generate visual outputs.
|
80 |
|
81 |
Example image by [me.](https://huggingface.co/Werli)
|
|
|
448 |
additional_tags_append,
|
449 |
tag_results,
|
450 |
progress=gr.Progress()
|
451 |
+
):
|
452 |
+
# Clear tag_results before starting a new prediction
|
453 |
+
tag_results.clear()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
454 |
|
455 |
+
gallery_len = len(gallery)
|
456 |
+
print(f"Predict load model: {model_repo}, gallery length: {gallery_len}")
|
457 |
|
458 |
+
timer = Timer() # Create a timer
|
459 |
+
progressRatio = 0.5 if llama3_reorganize_model_repo else 1
|
460 |
+
progressTotal = gallery_len + 1
|
461 |
+
current_progress = 0
|
462 |
|
463 |
+
self.load_model(model_repo)
|
464 |
+
current_progress += progressRatio/progressTotal;
|
465 |
+
progress(current_progress, desc="Initialize wd model finished")
|
466 |
+
timer.checkpoint(f"Initialize wd model")
|
467 |
+
|
468 |
+
# Result
|
469 |
+
txt_infos = []
|
470 |
+
output_dir = tempfile.mkdtemp()
|
471 |
+
if not os.path.exists(output_dir):
|
472 |
+
os.makedirs(output_dir)
|
473 |
+
|
474 |
+
sorted_general_strings = ""
|
475 |
+
# New code to create categorized output string
|
476 |
+
categorized_output_strings = []
|
477 |
+
rating = None
|
478 |
+
character_res = None
|
479 |
+
general_res = None
|
480 |
+
|
481 |
+
if llama3_reorganize_model_repo:
|
482 |
+
print(f"Llama3 reorganize load model {llama3_reorganize_model_repo}")
|
483 |
+
llama3_reorganize = Llama3Reorganize(llama3_reorganize_model_repo, loadModel=True)
|
484 |
+
current_progress += progressRatio/progressTotal;
|
485 |
+
progress(current_progress, desc="Initialize llama3 model finished")
|
486 |
+
timer.checkpoint(f"Initialize llama3 model")
|
487 |
+
|
488 |
+
timer.report()
|
489 |
|
490 |
+
prepend_list = [tag.strip() for tag in additional_tags_prepend.split(",") if tag.strip()]
|
491 |
+
append_list = [tag.strip() for tag in additional_tags_append.split(",") if tag.strip()]
|
492 |
+
if prepend_list and append_list:
|
493 |
+
append_list = [item for item in append_list if item not in prepend_list]
|
494 |
+
|
495 |
+
# Dictionary to track counters for each filename
|
496 |
+
name_counters = defaultdict(int)
|
497 |
+
|
498 |
+
for idx, value in enumerate(gallery):
|
499 |
+
try:
|
500 |
+
image_path = value[0]
|
501 |
+
image_name = os.path.splitext(os.path.basename(image_path))[0]
|
502 |
|
503 |
+
# Increment the counter for the current name
|
504 |
+
name_counters[image_name] += 1
|
505 |
+
|
506 |
+
if name_counters[image_name] > 1:
|
507 |
+
image_name = f"{image_name}_{name_counters[image_name]:02d}"
|
508 |
|
509 |
+
image = self.prepare_image(image_path)
|
|
|
|
|
510 |
|
511 |
+
input_name = self.model.get_inputs()[0].name
|
512 |
+
label_name = self.model.get_outputs()[0].name
|
513 |
+
print(f"Gallery {idx:02d}: Starting run wd model...")
|
514 |
+
preds = self.model.run([label_name], {input_name: image})[0]
|
515 |
|
516 |
+
labels = list(zip(self.tag_names, preds[0].astype(float)))
|
|
|
517 |
|
518 |
+
# First 4 labels are actually ratings: pick one with argmax
|
519 |
+
ratings_names = [labels[i] for i in self.rating_indexes]
|
520 |
+
rating = dict(ratings_names)
|
|
|
521 |
|
522 |
+
# Then we have general tags: pick any where prediction confidence > threshold
|
523 |
+
general_names = [labels[i] for i in self.general_indexes]
|
|
|
524 |
|
525 |
+
if general_mcut_enabled:
|
526 |
+
general_probs = np.array([x[1] for x in general_names])
|
527 |
+
general_thresh = mcut_threshold(general_probs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
528 |
|
529 |
+
general_res = [x for x in general_names if x[1] > general_thresh]
|
530 |
+
general_res = dict(general_res)
|
531 |
|
532 |
+
# Everything else is characters: pick any where prediction confidence > threshold
|
533 |
+
character_names = [labels[i] for i in self.character_indexes]
|
534 |
|
535 |
+
if character_mcut_enabled:
|
536 |
+
character_probs = np.array([x[1] for x in character_names])
|
537 |
+
character_thresh = mcut_threshold(character_probs)
|
538 |
+
character_thresh = max(0.15, character_thresh)
|
|
|
539 |
|
540 |
+
character_res = [x for x in character_names if x[1] > character_thresh]
|
541 |
+
character_res = dict(character_res)
|
542 |
+
character_list = list(character_res.keys())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
543 |
|
544 |
+
sorted_general_list = sorted(
|
545 |
+
general_res.items(),
|
546 |
+
key=lambda x: x[1],
|
547 |
+
reverse=True,
|
548 |
+
)
|
549 |
+
sorted_general_list = [x[0] for x in sorted_general_list]
|
550 |
+
#Remove values from character_list that already exist in sorted_general_list
|
551 |
+
character_list = [item for item in character_list if item not in sorted_general_list]
|
552 |
+
#Remove values from sorted_general_list that already exist in prepend_list or append_list
|
553 |
+
if prepend_list:
|
554 |
+
sorted_general_list = [item for item in sorted_general_list if item not in prepend_list]
|
555 |
+
if append_list:
|
556 |
+
sorted_general_list = [item for item in sorted_general_list if item not in append_list]
|
557 |
+
|
558 |
+
sorted_general_list = prepend_list + sorted_general_list + append_list
|
559 |
+
|
560 |
+
sorted_general_strings = ", ".join((character_list if characters_merge_enabled else []) + sorted_general_list).replace("(", "\(").replace(")", "\)")
|
561 |
+
|
562 |
+
classified_tags, unclassified_tags = classify_tags(sorted_general_list)
|
563 |
+
|
564 |
+
# Create a single string of ALL categorized tags for the current image
|
565 |
+
categorized_output_string = ', '.join([', '.join(tags) for tags in classified_tags.values()])
|
566 |
+
categorized_output_strings.append(categorized_output_string)
|
567 |
+
# Collect all categorized output strings into a single string
|
568 |
+
final_categorized_output = ', '.join(categorized_output_strings)
|
569 |
+
|
570 |
current_progress += progressRatio/progressTotal;
|
571 |
+
progress(current_progress, desc=f"image{idx:02d}, predict finished")
|
572 |
+
timer.checkpoint(f"image{idx:02d}, predict finished")
|
573 |
+
|
574 |
+
if llama3_reorganize_model_repo:
|
575 |
+
print(f"Starting reorganize with llama3...")
|
576 |
+
reorganize_strings = llama3_reorganize.reorganize(sorted_general_strings)
|
577 |
+
reorganize_strings = re.sub(r" *Title: *", "", reorganize_strings)
|
578 |
+
reorganize_strings = re.sub(r"\n+", ",", reorganize_strings)
|
579 |
+
reorganize_strings = re.sub(r",,+", ",", reorganize_strings)
|
580 |
+
sorted_general_strings += "," + reorganize_strings
|
581 |
+
|
582 |
+
current_progress += progressRatio/progressTotal;
|
583 |
+
progress(current_progress, desc=f"image{idx:02d}, llama3 reorganize finished")
|
584 |
+
timer.checkpoint(f"image{idx:02d}, llama3 reorganize finished")
|
585 |
+
|
586 |
+
txt_file = self.create_file(sorted_general_strings, output_dir, image_name + ".txt")
|
587 |
+
txt_infos.append({"path":txt_file, "name": image_name + ".txt"})
|
588 |
+
|
589 |
+
# Store the result in tag_results using image_path as the key
|
590 |
+
tag_results[image_path] = {
|
591 |
+
"strings": sorted_general_strings,
|
592 |
+
"strings2": categorized_output_string, # Store the categorized output string here
|
593 |
+
"classified_tags": classified_tags,
|
594 |
+
"rating": rating,
|
595 |
+
"character_res": character_res,
|
596 |
+
"general_res": general_res,
|
597 |
+
"unclassified_tags": unclassified_tags
|
598 |
+
}
|
599 |
+
|
600 |
+
timer.report()
|
601 |
+
except Exception as e:
|
602 |
+
print(traceback.format_exc())
|
603 |
+
print("Error predict: " + str(e))
|
604 |
+
# Result
|
605 |
+
download = []
|
606 |
+
if txt_infos is not None and len(txt_infos) > 0:
|
607 |
+
downloadZipPath = os.path.join(output_dir, "images-tagger-" + datetime.now().strftime("%Y%m%d-%H%M%S") + ".zip")
|
608 |
+
with zipfile.ZipFile(downloadZipPath, 'w', zipfile.ZIP_DEFLATED) as taggers_zip:
|
609 |
+
for info in txt_infos:
|
610 |
+
# Get file name from lookup
|
611 |
+
taggers_zip.write(info["path"], arcname=info["name"])
|
612 |
+
download.append(downloadZipPath)
|
613 |
+
|
614 |
+
if llama3_reorganize_model_repo:
|
615 |
+
llama3_reorganize.release_vram()
|
616 |
+
del llama3_reorganize
|
617 |
+
|
618 |
+
progress(1, desc=f"Predict completed")
|
619 |
+
timer.report_all() # Print all recorded times
|
620 |
+
print("Predict is complete.")
|
621 |
|
622 |
+
return download, sorted_general_strings, final_categorized_output, classified_tags, rating, character_res, general_res, unclassified_tags, tag_results
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
623 |
|
624 |
def get_selection_from_gallery(gallery: list, tag_results: dict, selected_state: gr.SelectData):
|
625 |
if not selected_state:
|
626 |
return selected_state
|
627 |
|
628 |
+
tag_result = {
|
629 |
+
"strings": "",
|
630 |
+
"strings2": "",
|
631 |
+
"classified_tags": "{}",
|
632 |
+
"rating": "",
|
633 |
+
"character_res": "",
|
634 |
+
"general_res": "",
|
635 |
+
"unclassified_tags": "{}"
|
636 |
+
}
|
637 |
if selected_state.value["image"]["path"] in tag_results:
|
638 |
tag_result = tag_results[selected_state.value["image"]["path"]]
|
639 |
|
640 |
+
return (selected_state.value["image"]["path"], selected_state.value["caption"]), tag_result["strings"], tag_result["strings2"], tag_result["classified_tags"], tag_result["rating"], tag_result["character_res"], tag_result["general_res"], tag_result["unclassified_tags"]
|
641 |
|
642 |
def append_gallery(gallery: list, image: str):
|
643 |
if gallery is None:
|
|
|
665 |
if not gallery or not selected_image:
|
666 |
return gallery
|
667 |
|
668 |
+
selected_image = ast.literal_eval(selected_image) # Use ast.literal_eval to parse text into a tuple.
|
669 |
# Remove the selected image from the gallery
|
670 |
if selected_image in gallery:
|
671 |
gallery.remove(selected_image)
|
|
|
880 |
return results, output_image
|
881 |
else:
|
882 |
return "", None # Return empty string and None for unknown task prompts
|
883 |
+
|
884 |
# Custom CSS to set the height of the gr.Dropdown menu
|
885 |
css = """
|
886 |
div.progress-level div.progress-level-inner {
|
|
|
947 |
next_run_time_utc = restart_space_job.next_run_time.astimezone(timezone.utc)
|
948 |
NEXT_RESTART = f"Next Restart: {next_run_time_utc.strftime('%Y-%m-%d %H:%M:%S')} (UTC) - The space will restart every 2 days to ensure stability and performance. It uses a background scheduler to handle the restart process."
|
949 |
|
|
|
950 |
with gr.Blocks(title=TITLE, css=css, theme="Werli/Multi-Tagger", fill_width=True) as demo:
|
951 |
gr.Markdown(value=f"<h1 style='text-align: center; margin-bottom: 1rem'>{TITLE}</h1>")
|
952 |
gr.Markdown(value=DESCRIPTION)
|
|
|
1033 |
download_file = gr.File(label="Output (Download)") # 0
|
1034 |
character_res = gr.Label(label="Output (characters)") # 1
|
1035 |
sorted_general_strings = gr.Textbox(label="Output (string)", show_label=True, show_copy_button=True) # 2
|
1036 |
+
final_categorized_output = gr.Textbox(label="Categorized Output (string)", show_label=True, show_copy_button=True) # 3
|
1037 |
categorized = gr.JSON(label="Categorized (tags)") # 4
|
1038 |
rating = gr.Label(label="Rating") # 5
|
1039 |
general_res = gr.Label(label="Output (tags)") # 6
|
|
|
1042 |
[
|
1043 |
download_file,
|
1044 |
sorted_general_strings,
|
1045 |
+
final_categorized_output,
|
1046 |
categorized,
|
1047 |
rating,
|
1048 |
character_res,
|
|
|
1057 |
upload_button.upload(extend_gallery, inputs=[gallery, upload_button], outputs=gallery)
|
1058 |
# Event to update the selected image when an image is clicked in the gallery
|
1059 |
selected_image = gr.Textbox(label="Selected Image", visible=False)
|
1060 |
+
gallery.select(get_selection_from_gallery, inputs=[gallery, tag_results], outputs=[selected_image, sorted_general_strings, final_categorized_output, categorized, rating, character_res, general_res, unclassified])
|
1061 |
# Event to remove a selected image from the gallery
|
1062 |
remove_button.click(remove_image_from_gallery, inputs=[gallery, selected_image], outputs=gallery)
|
1063 |
submit.click(
|
|
|
1075 |
additional_tags_append,
|
1076 |
tag_results,
|
1077 |
],
|
1078 |
+
outputs=[download_file, sorted_general_strings, final_categorized_output, categorized, rating, character_res, general_res, unclassified, tag_results,],
|
1079 |
+
)
|
1080 |
gr.Examples(
|
1081 |
[["images/1girl.png", VIT_LARGE_MODEL_DSV3_REPO, 0.35, False, 0.85, False]],
|
1082 |
inputs=[
|
|
|
1098 |
text_input = gr.Textbox(label="Text Input (optional)")
|
1099 |
submit_btn = gr.Button(value="Submit")
|
1100 |
with gr.Column(variant="panel"):
|
1101 |
+
output_text = gr.Textbox(label="Output Text", show_label=True, show_copy_button=True, lines=8)
|
|
|
1102 |
output_img = gr.Image(label="Output Image")
|
1103 |
gr.Examples(
|
1104 |
examples=[
|
|
|
1113 |
)
|
1114 |
submit_btn.click(process_image, [input_img, task_prompt, text_input], [output_text, output_img])
|
1115 |
|
1116 |
+
demo.queue(max_size=2).launch()
|