ikraamkb commited on
Commit
d36238a
Β·
verified Β·
1 Parent(s): c4ce36f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -5
app.py CHANGED
@@ -18,10 +18,13 @@ import io
18
  app = FastAPI()
19
 
20
  # βœ… Load AI Models
21
- vqa_pipeline = pipeline("image-to-text", model="Salesforce/blip-vqa-base")
22
- code_generator = pipeline("text-generation", model="openai-community/gpt2-medium")
23
- table_analyzer = pipeline("table-question-answering", model="google/tapas-large-finetuned-wtq")
24
- qa_pipeline = pipeline("text2text-generation",model="google/flan-t5-large",tokenizer="google/flan-t5-large",use_fast=True)
 
 
 
25
 
26
  # βœ… Function to truncate text to 450 tokens
27
  def truncate_text(text, max_tokens=450):
@@ -167,7 +170,7 @@ viz_interface = gr.Interface(
167
  )
168
 
169
  # βœ… Mount Gradio Interfaces
170
- demo = gr.TabbedInterface([doc_interface, img_interface, viz_interface], ["Document QA", "Image QA", "Data Visualization"])
171
  app = gr.mount_gradio_app(app, demo, path="/")
172
 
173
  @app.get("/")
 
18
  app = FastAPI()
19
 
20
  # βœ… Load AI Models
21
+ from transformers import pipeline
22
+
23
+ qa_pipeline = pipeline("text2text-generation",model="google/flan-t5-large",tokenizer="google/flan-t5-large",use_fast=True,device=0)
24
+ table_analyzer = pipeline("table-question-answering",model="google/tapas-large-finetuned-wtq",tokenizer="google/tapas-large-finetuned-wtq",use_fast=True,device=0)
25
+ code_generator = pipeline("text-generation",model="openai-community/gpt2-medium",tokenizer="openai-community/gpt2-medium",use_fast=True,device=0)
26
+ vqa_pipeline = pipeline("image-to-text",model="Salesforce/blip-vqa-base",device=0 )
27
+
28
 
29
  # βœ… Function to truncate text to 450 tokens
30
  def truncate_text(text, max_tokens=450):
 
170
  )
171
 
172
  # βœ… Mount Gradio Interfaces
173
+ demo = gr.TabbedInterface([doc_interface, img_interface, viz_interface], ["Document QA", "Image QA"])
174
  app = gr.mount_gradio_app(app, demo, path="/")
175
 
176
  @app.get("/")