sashdev commited on
Commit
9908a7a
·
verified ·
1 Parent(s): d5ae2f9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -14
app.py CHANGED
@@ -7,7 +7,7 @@ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
7
  model_name = "hassaanik/grammar-correction-model"
8
  tokenizer = AutoTokenizer.from_pretrained(model_name)
9
 
10
- # Use GPU if available, otherwise fall back to CPU
11
  device = "cuda" if torch.cuda.is_available() else "cpu"
12
  model = AutoModelForSeq2SeqLM.from_pretrained(model_name).to(device)
13
 
@@ -15,26 +15,26 @@ model = AutoModelForSeq2SeqLM.from_pretrained(model_name).to(device)
15
  if torch.cuda.is_available():
16
  model.half()
17
 
18
- # Async grammar correction function
19
- async def correct_grammar_async(text):
20
- # Tokenize input and move it to the correct device (CPU/GPU)
21
- inputs = tokenizer.encode(text, return_tensors="pt", max_length=512, truncation=True).to(device)
22
 
23
- # Asynchronous operation to run grammar correction
24
- outputs = await asyncio.to_thread(model.generate, inputs, max_length=512, num_beams=5, early_stopping=True)
25
 
26
- # Decode output and return corrected text
27
- corrected_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
28
- return corrected_text
29
 
30
  # Gradio interface function to handle input and output
31
  def correct_grammar_interface(text):
32
- corrected_text = asyncio.run(correct_grammar_async(text))
33
  return corrected_text
34
 
35
- # Create Gradio Interface
36
  with gr.Blocks() as grammar_app:
37
- gr.Markdown("<h1>Async Grammar Correction App</h1>")
38
 
39
  with gr.Row():
40
  input_box = gr.Textbox(label="Input Text", placeholder="Enter text to be corrected", lines=4)
@@ -42,7 +42,7 @@ with gr.Blocks() as grammar_app:
42
 
43
  submit_button = gr.Button("Correct Grammar")
44
 
45
- # When the button is clicked, run the correction process
46
  submit_button.click(fn=correct_grammar_interface, inputs=input_box, outputs=output_box)
47
 
48
  # Launch the app
 
7
  model_name = "hassaanik/grammar-correction-model"
8
  tokenizer = AutoTokenizer.from_pretrained(model_name)
9
 
10
+ # Use GPU if available, otherwise fallback to CPU
11
  device = "cuda" if torch.cuda.is_available() else "cpu"
12
  model = AutoModelForSeq2SeqLM.from_pretrained(model_name).to(device)
13
 
 
15
  if torch.cuda.is_available():
16
  model.half()
17
 
18
+ # Async grammar correction function with batch processing
19
+ async def correct_grammar_async(texts):
20
+ # Tokenize the batch of inputs and move it to the correct device (CPU/GPU)
21
+ inputs = tokenizer(texts, return_tensors="pt", padding=True, truncation=True, max_length=512).to(device)
22
 
23
+ # Asynchronous generation process
24
+ outputs = await asyncio.to_thread(model.generate, inputs["input_ids"], max_length=512, num_beams=5, early_stopping=True)
25
 
26
+ # Decode outputs in parallel
27
+ corrected_texts = [tokenizer.decode(output, skip_special_tokens=True) for output in outputs]
28
+ return corrected_texts
29
 
30
  # Gradio interface function to handle input and output
31
  def correct_grammar_interface(text):
32
+ corrected_text = asyncio.run(correct_grammar_async([text]))[0] # Single input for now
33
  return corrected_text
34
 
35
+ # Gradio Interface with async capabilities and batch input/output
36
  with gr.Blocks() as grammar_app:
37
+ gr.Markdown("<h1>Fast Async Grammar Correction</h1>")
38
 
39
  with gr.Row():
40
  input_box = gr.Textbox(label="Input Text", placeholder="Enter text to be corrected", lines=4)
 
42
 
43
  submit_button = gr.Button("Correct Grammar")
44
 
45
+ # When the button is clicked, run the correction process asynchronously
46
  submit_button.click(fn=correct_grammar_interface, inputs=input_box, outputs=output_box)
47
 
48
  # Launch the app