nyasukun commited on
Commit
970a4b6
ยท
verified ยท
1 Parent(s): a929439

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -25
app.py CHANGED
@@ -42,38 +42,39 @@ generation_pipeline = pipeline(
42
  )
43
  logger.info(f"Generation model loaded successfully: {generation_model_name}")
44
 
45
- # ้žๅŒๆœŸใงๅˆ†้กžใ‚’ๅฎŸ่กŒใ™ใ‚‹้–ขๆ•ฐ
46
- async def classify_text_async(prompt):
 
47
  logger.info(f"Running classification for: {prompt[:50]}...")
48
- # CPUใƒใ‚ฆใƒณใƒ‰ใชๅ‡ฆ็†ใ‚’้žๅŒๆœŸๅฎŸ่กŒใ™ใ‚‹ใŸใ‚ใซใƒซใƒผใƒ—ใฎๅค–ใงๅฎŸ่กŒ
49
- loop = asyncio.get_event_loop()
50
- classification_result = await loop.run_in_executor(
51
- None,
52
- lambda: classification_pipeline(prompt)
53
- )
54
  logger.info(f"Classification complete: {classification_result}")
55
  return classification_result
56
 
57
- # ้žๅŒๆœŸใง็”Ÿๆˆใ‚’ๅฎŸ่กŒใ™ใ‚‹้–ขๆ•ฐ
58
- async def generate_text_async(prompt):
 
59
  logger.info(f"Running text generation for: {prompt[:50]}...")
60
- loop = asyncio.get_event_loop()
61
- generation_result = await loop.run_in_executor(
62
- None,
63
- lambda: generation_pipeline(
64
- prompt,
65
- max_new_tokens=50,
66
- do_sample=True,
67
- temperature=0.7,
68
- num_return_sequences=1
69
- )
70
  )
71
  generated_text = generation_result[0]["generated_text"]
72
  logger.info(f"Text generation complete, generated: {len(generated_text)} chars")
73
  return generated_text
74
 
75
- # GPUใ‚’ๅˆฉ็”จใ™ใ‚‹้žๅŒๆœŸๆŽจ่ซ–้–ขๆ•ฐ
76
- @spaces.GPU(duration=120)
 
 
 
 
 
 
 
 
77
  async def process_text_async(prompt):
78
  logger.info(f"Processing input asynchronously: {prompt[:50]}...")
79
 
@@ -91,12 +92,18 @@ async def process_text_async(prompt):
91
  combined_result = f"ๅˆ†้กž็ตๆžœ: {classification_result}\n\n็”Ÿๆˆใ•ใ‚ŒใŸใƒ†ใ‚ญใ‚นใƒˆ: {generated_text}"
92
  return combined_result
93
 
94
- # Gradioใฏ้žๅŒๆœŸ้–ขๆ•ฐใซใ‚‚ๅฏพๅฟœใ—ใฆใ„ใ‚‹ใฎใงใ€ใใฎใพใพๆธกใ™
 
 
 
 
 
 
95
  demo = gr.Interface(
96
- fn=process_text_async, # ้žๅŒๆœŸ้–ขๆ•ฐใ‚’ไฝฟ็”จ
97
  inputs=gr.Textbox(lines=3, label="ๅ…ฅๅŠ›ใƒ†ใ‚ญใ‚นใƒˆ"),
98
  outputs=gr.Textbox(label="ๅ‡ฆ็†็ตๆžœ", lines=8),
99
- title="ใƒ†ใ‚ญใ‚นใƒˆๅˆ†้กž & ็”Ÿๆˆใƒ‡ใƒข (้žๅŒๆœŸ็‰ˆ)",
100
  description="ๅ…ฅๅŠ›ใƒ†ใ‚ญใ‚นใƒˆใซๅฏพใ—ใฆๅˆ†้กžใจ็”Ÿๆˆใ‚’้žๅŒๆœŸใงไธฆ่กŒๅฎŸ่กŒใ—ใพใ™ใ€‚"
101
  )
102
 
 
42
  )
43
  logger.info(f"Generation model loaded successfully: {generation_model_name}")
44
 
45
+ # GPUใ‚’ๅˆฉ็”จใ™ใ‚‹ๅŒๆœŸๅ‡ฆ็†้–ขๆ•ฐ๏ผˆๅˆ†้กž๏ผ‰
46
+ @spaces.GPU(duration=60)
47
+ def classify_text(prompt):
48
  logger.info(f"Running classification for: {prompt[:50]}...")
49
+ classification_result = classification_pipeline(prompt)
 
 
 
 
 
50
  logger.info(f"Classification complete: {classification_result}")
51
  return classification_result
52
 
53
+ # GPUใ‚’ๅˆฉ็”จใ™ใ‚‹ๅŒๆœŸๅ‡ฆ็†้–ขๆ•ฐ๏ผˆ็”Ÿๆˆ๏ผ‰
54
+ @spaces.GPU(duration=60)
55
+ def generate_text(prompt):
56
  logger.info(f"Running text generation for: {prompt[:50]}...")
57
+ generation_result = generation_pipeline(
58
+ prompt,
59
+ max_new_tokens=50,
60
+ do_sample=True,
61
+ temperature=0.7,
62
+ num_return_sequences=1
 
 
 
 
63
  )
64
  generated_text = generation_result[0]["generated_text"]
65
  logger.info(f"Text generation complete, generated: {len(generated_text)} chars")
66
  return generated_text
67
 
68
+ # ้žๅŒๆœŸใƒฉใƒƒใƒ‘ใƒผ้–ขๆ•ฐ
69
+ async def classify_text_async(prompt):
70
+ loop = asyncio.get_event_loop()
71
+ return await loop.run_in_executor(None, lambda: classify_text(prompt))
72
+
73
+ async def generate_text_async(prompt):
74
+ loop = asyncio.get_event_loop()
75
+ return await loop.run_in_executor(None, lambda: generate_text(prompt))
76
+
77
+ # ใƒกใ‚คใƒณๅ‡ฆ็†ใ‚’่กŒใ†้žๅŒๆœŸ้–ขๆ•ฐ
78
  async def process_text_async(prompt):
79
  logger.info(f"Processing input asynchronously: {prompt[:50]}...")
80
 
 
92
  combined_result = f"ๅˆ†้กž็ตๆžœ: {classification_result}\n\n็”Ÿๆˆใ•ใ‚ŒใŸใƒ†ใ‚ญใ‚นใƒˆ: {generated_text}"
93
  return combined_result
94
 
95
+ # Gradio็”จใฎๅŒๆœŸใƒฉใƒƒใƒ‘ใƒผ้–ขๆ•ฐ
96
+ def process_text(prompt):
97
+ # ้žๅŒๆœŸ้–ขๆ•ฐใ‚’ๅŒๆœŸ็š„ใซๅฎŸ่กŒ
98
+ loop = asyncio.get_event_loop()
99
+ return loop.run_until_complete(process_text_async(prompt))
100
+
101
+ # Gradioใ‚คใƒณใ‚ฟใƒผใƒ•ใ‚งใƒผใ‚น
102
  demo = gr.Interface(
103
+ fn=process_text, # ๅŒๆœŸใƒฉใƒƒใƒ‘ใƒผ้–ขๆ•ฐใ‚’ไฝฟ็”จ
104
  inputs=gr.Textbox(lines=3, label="ๅ…ฅๅŠ›ใƒ†ใ‚ญใ‚นใƒˆ"),
105
  outputs=gr.Textbox(label="ๅ‡ฆ็†็ตๆžœ", lines=8),
106
+ title="ใƒ†ใ‚ญใ‚นใƒˆๅˆ†้กž & ็”Ÿๆˆใƒ‡ใƒข (้žๅŒๆœŸๅ‡ฆ็†็‰ˆ)",
107
  description="ๅ…ฅๅŠ›ใƒ†ใ‚ญใ‚นใƒˆใซๅฏพใ—ใฆๅˆ†้กžใจ็”Ÿๆˆใ‚’้žๅŒๆœŸใงไธฆ่กŒๅฎŸ่กŒใ—ใพใ™ใ€‚"
108
  )
109