Sai Anoushka commited on
Commit
0f6ecfb
Β·
1 Parent(s): 51a5758

Add application file

Browse files
Files changed (4) hide show
  1. Dockerfile +11 -0
  2. README copy.md +11 -0
  3. app.py +56 -0
  4. requirements.txt +6 -0
Dockerfile ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.11
2
+
3
+ WORKDIR /code
4
+
5
+ COPY requirements.txt .
6
+
7
+ RUN pip install --no-cache-dir -r requirements.txt
8
+
9
+ COPY . .
10
+
11
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
README copy.md ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: VisionMate API
3
+ emoji: 🧠
4
+ colorFrom: indigo
5
+ colorTo: pink
6
+ sdk: docker
7
+ app_file: app.py
8
+ pinned: false
9
+ ---
10
+ # VisionMate Backend
11
+ Runs a FastAPI service using BLIP model (Salesforce).
app.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, File, UploadFile
2
+ from fastapi.middleware.cors import CORSMiddleware
3
+ from PIL import Image
4
+ from transformers import GitProcessor, AutoModelForCausalLM
5
+ import torch
6
+ import io
7
+
8
+
9
+
10
+ app = FastAPI()
11
+
12
+ # Enable CORS
13
+ app.add_middleware(
14
+ CORSMiddleware,
15
+ allow_origins=["*"], # Allow all origins
16
+ allow_credentials=True,
17
+ allow_methods=["*"],
18
+ allow_headers=["*"],
19
+ )
20
+
21
+
22
+
23
+ # Load GIT-base model and processor
24
+ print("πŸš€ Loading microsoft/git-base-coco model...")
25
+ processor = GitProcessor.from_pretrained("microsoft/git-base-coco/")
26
+ model = AutoModelForCausalLM.from_pretrained("microsoft/git-base-coco", torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32)
27
+ print("βœ… Model loaded!")
28
+
29
+ @app.get("/")
30
+ def read_root():
31
+ return {"message": "VisionMate API is running!"}
32
+
33
+ @app.post("/caption/")
34
+ async def generate_caption(file: UploadFile = File(...)):
35
+ print("πŸ“₯ Received image upload request")
36
+
37
+ # Read and process image
38
+ image = Image.open(io.BytesIO(await file.read())).convert("RGB")
39
+ print("πŸ–ΌοΈ Image processed")
40
+
41
+ # Provide a better prompt to guide caption generation
42
+ prompt = "a photo of"
43
+ inputs = processor(images=image, text=prompt, return_tensors="pt")
44
+
45
+ print("πŸ€– Generating caption...")
46
+ output_ids = model.generate(**inputs, max_new_tokens=50) # increased length
47
+ caption = processor.batch_decode(output_ids, skip_special_tokens=True)[0]
48
+
49
+ print("πŸ“ Caption generated:", caption)
50
+ return {"caption": caption}
51
+
52
+
53
+ if __name__ == "__main__":
54
+ import uvicorn
55
+ uvicorn.run(app, host="0.0.0.0", port=7860)
56
+
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ transformers
2
+ torch
3
+ fastapi
4
+ uvicorn
5
+ pillow
6
+ python-multipart