seawolf2357 commited on
Commit
234a076
ยท
verified ยท
1 Parent(s): d87e79b

Update web.py

Browse files
Files changed (1) hide show
  1. web.py +61 -28
web.py CHANGED
@@ -1,33 +1,66 @@
1
  import gradio as gr
2
- import datetime
3
- import asyncio
4
-
5
- def update_live_message():
6
- """ ํ˜„์žฌ ์‹œ๊ฐ„๊ณผ 'live' ๋ฉ”์‹œ์ง€๋ฅผ ๋ฐ˜ํ™˜ํ•ฉ๋‹ˆ๋‹ค. """
7
- current_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
8
- return f"{current_time} - live"
9
-
10
- async def periodic_update(interface, interval=60):
11
- """ ์ฃผ์–ด์ง„ ์ธํ„ฐํŽ˜์ด์Šค์— 1๋ถ„ ๊ฐ„๊ฒฉ์œผ๋กœ ์—…๋ฐ์ดํŠธ๋ฅผ ์‹คํ–‰ํ•ฉ๋‹ˆ๋‹ค. """
12
- while True:
13
- live_message = update_live_message()
14
- interface.update(live_message)
15
- await asyncio.sleep(interval)
16
-
17
- def run_gradio():
18
- """ Gradio ์›น ์ธํ„ฐํŽ˜์ด์Šค๋ฅผ ์„ค์ •ํ•˜๊ณ  ์‹คํ–‰ํ•ฉ๋‹ˆ๋‹ค. """
19
- live_block = gr.Textbox(label="Live Output", value="Starting...", elem_id="live_output")
20
-
21
- demo = gr.Blocks()
22
 
23
- with demo:
24
- gr.Markdown("## Live Server Output")
25
- live_block
26
 
27
- demo.launch(server_name="0.0.0.0", server_port=7860, inbrowser=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
- # ๋น„๋™๊ธฐ ์—…๋ฐ์ดํŠธ ์ž‘์—… ์‹œ์ž‘
30
- asyncio.run(periodic_update(live_block))
31
 
32
- if __name__ == "__main__":
33
- run_gradio()
 
1
  import gradio as gr
2
+ from transformers import PaliGemmaForConditionalGeneration, PaliGemmaProcessor
3
+ import torch
4
+ import re
5
+ import os
6
+
7
+ # ๋ชจ๋ธ ๋กœ๋“œ ๋ฐ ์ „์ฒ˜๋ฆฌ ์„ค์ •
8
+ model = PaliGemmaForConditionalGeneration.from_pretrained("gokaygokay/sd3-long-captioner").to("cpu").eval()
9
+ processor = PaliGemmaProcessor.from_pretrained("gokaygokay/sd3-long-captioner")
10
+
11
+ def modify_caption(caption: str) -> str:
12
+ prefix_substrings = [
13
+ ('captured from ', ''),
14
+ ('captured at ', '')
15
+ ]
16
+ pattern = '|'.join([re.escape(opening) for opening, _ in prefix_substrings])
17
+ replacers = {opening: replacer for opening, replacer in prefix_substrings}
 
 
 
 
18
 
19
+ def replace_fn(match):
20
+ return replacers[match.group(0)]
 
21
 
22
+ return re.sub(pattern, replace_fn, caption, count=1, flags=re.IGNORECASE)
23
+
24
+ def create_captions_rich(image):
25
+ prompt = "caption en"
26
+ image_tensor = processor(images=image, return_tensors="pt").pixel_values.to("cpu")
27
+ image_tensor = (image_tensor * 255).type(torch.uint8)
28
+ model_inputs = processor(text=prompt, images=image_tensor, return_tensors="pt").to("cpu")
29
+ input_len = model_inputs["input_ids"].shape[-1]
30
+
31
+ with torch.no_grad():
32
+ generation = model.generate(**model_inputs, max_new_tokens=256, do_sample=False)
33
+ generation = generation[0][input_len:]
34
+ decoded = processor.decode(generation, skip_special_tokens=True)
35
+ modified_caption = modify_caption(decoded)
36
+ return modified_caption
37
+
38
+ css = """
39
+ #mkd {
40
+ height: 500px;
41
+ overflow: auto;
42
+ border: 1px solid #ccc;
43
+ }
44
+ """
45
+
46
+ with gr.Blocks(css=css) as demo:
47
+ gr.HTML("<h1><center>PaliGemma Fine-tuned for Long Captioning<center><h1>")
48
+ with gr.Tab(label="PaliGemma Long Captioner"):
49
+ with gr.Row():
50
+ with gr.Column():
51
+ input_img = gr.Image(label="Input Picture")
52
+ submit_btn = gr.Button(value="Submit")
53
+ output = gr.Text(label="Caption")
54
+
55
+ gr.Examples(
56
+ [["image1.jpg"], ["image2.jpg"], ["image3.png"], ["image4.jpg"], ["image5.jpg"], ["image6.PNG"]],
57
+ inputs=[input_img],
58
+ outputs=[output],
59
+ fn=create_captions_rich,
60
+ label='Try captioning on examples'
61
+ )
62
 
63
+ submit_btn.click(create_captions_rich, [input_img], [output])
 
64
 
65
+ # ํฌํŠธ ๋ณ€๊ฒฝ
66
+ demo.launch(server_name="0.0.0.0", server_port=int(os.getenv