Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -7,7 +7,7 @@ import torch
|
|
7 |
from torchvision import transforms
|
8 |
from torchvision.models.detection import fasterrcnn_resnet50_fpn
|
9 |
from PIL import Image
|
10 |
-
from transformers import pipeline
|
11 |
import gradio as gr
|
12 |
from fastapi.responses import RedirectResponse
|
13 |
import numpy as np
|
@@ -16,25 +16,19 @@ import easyocr
|
|
16 |
# Initialize FastAPI
|
17 |
app = FastAPI()
|
18 |
|
19 |
-
# Load AI Model for Question Answering (
|
20 |
-
|
21 |
-
|
22 |
-
# Preload Hugging Face model
|
23 |
-
model_name = "microsoft/phi-2"
|
24 |
print(f"π Loading model: {model_name}...")
|
25 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
26 |
-
model = AutoModelForCausalLM.from_pretrained(model_name)
|
27 |
|
28 |
-
qa_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer, device
|
29 |
|
30 |
# Load Pretrained Object Detection Model (Torchvision)
|
31 |
from torchvision.models.detection import FasterRCNN_ResNet50_FPN_Weights
|
32 |
weights = FasterRCNN_ResNet50_FPN_Weights.DEFAULT
|
33 |
-
|
34 |
-
|
35 |
-
# Load Pretrained Object Detection Model (if needed)
|
36 |
-
model = fasterrcnn_resnet50_fpn(pretrained=True)
|
37 |
-
model.eval()
|
38 |
|
39 |
# Initialize OCR Model (Lazy Load)
|
40 |
reader = easyocr.Reader(["en"], gpu=True)
|
@@ -172,4 +166,4 @@ app = gr.mount_gradio_app(app, demo, path="/")
|
|
172 |
|
173 |
@app.get("/")
|
174 |
def home():
|
175 |
-
return RedirectResponse(url="/")
|
|
|
7 |
from torchvision import transforms
|
8 |
from torchvision.models.detection import fasterrcnn_resnet50_fpn
|
9 |
from PIL import Image
|
10 |
+
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
|
11 |
import gradio as gr
|
12 |
from fastapi.responses import RedirectResponse
|
13 |
import numpy as np
|
|
|
16 |
# Initialize FastAPI
|
17 |
app = FastAPI()
|
18 |
|
19 |
+
# Load AI Model for Question Answering (Mistral-7B)
|
20 |
+
model_name = "mistralai/Mistral-7B"
|
|
|
|
|
|
|
21 |
print(f"π Loading model: {model_name}...")
|
22 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
23 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto")
|
24 |
|
25 |
+
qa_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer, device=0)
|
26 |
|
27 |
# Load Pretrained Object Detection Model (Torchvision)
|
28 |
from torchvision.models.detection import FasterRCNN_ResNet50_FPN_Weights
|
29 |
weights = FasterRCNN_ResNet50_FPN_Weights.DEFAULT
|
30 |
+
object_detection_model = fasterrcnn_resnet50_fpn(weights=weights)
|
31 |
+
object_detection_model.eval()
|
|
|
|
|
|
|
32 |
|
33 |
# Initialize OCR Model (Lazy Load)
|
34 |
reader = easyocr.Reader(["en"], gpu=True)
|
|
|
166 |
|
167 |
@app.get("/")
|
168 |
def home():
|
169 |
+
return RedirectResponse(url="/")
|