import os # os.system("apt-get install tesseract-ocr") from fastapi import FastAPI, File, Request, UploadFile, Body, Depends, HTTPException from fastapi.security.api_key import APIKeyHeader from typing import Optional, Annotated from fastapi.encoders import jsonable_encoder from PIL import Image import io import cv2 import numpy as np import pytesseract from nltk.tokenize import sent_tokenize from transformers import MarianMTModel, MarianTokenizer API_KEY = os.environ.get("API_KEY") app = FastAPI() api_key_header = APIKeyHeader(name="api_key", auto_error=False) def get_api_key(api_key: Optional[str] = Depends(api_key_header)): if api_key is None or api_key != API_KEY: raise HTTPException(status_code=401, detail="Unauthorized access") return api_key @app.post("/api/ocr", response_model=dict) async def ocr( api_key: str = Depends(get_api_key), image: UploadFile = File(...), # languages: list = Body(["eng"]) ): try: print("[1]",os.popen(f'cat /etc/debian_version').read()) print("[2]",os.popen(f'cat /etc/issue').read()) print("[3]",os.popen(f'apt search tesseract').read()) # content = await image.read() # image = Image.open(BytesIO(content)) image_stream = io.BytesIO(image) image_stream.seek(0) file_bytes = np.asarray(bytearray(image_stream.read()), dtype=np.uint8) frame = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR) # label = read_img(frame) print("[image]",frame) if hasattr(pytesseract, "image_to_string"): print("Image to string function is available") else: print("Image to string function is not available") # text = pytesseract.image_to_string(image, lang="+".join(languages)) # text = pytesseract.image_to_string(image, lang = 'eng') except Exception as e: return {"error": str(e)}, 500 # return jsonable_encoder({"text": text}) return {"ImageText": "text"} @app.post("/api/translate", response_model=dict) async def translate( api_key: str = Depends(get_api_key), text: str = Body(...), src: str = "en", trg: str = "zh", ): if api_key != API_KEY: return {"error": "Invalid API key"}, 401 tokenizer, model = get_model(src, trg) translated_text = "" for sentence in sent_tokenize(text): translated_sub = model.generate(**tokenizer(sentence, return_tensors="pt"))[0] translated_text += tokenizer.decode(translated_sub, skip_special_tokens=True) + "\n" return jsonable_encoder({"translated_text": translated_text}) def get_model(src: str, trg: str): model_name = f"Helsinki-NLP/opus-mt-{src}-{trg}" tokenizer = MarianTokenizer.from_pretrained(model_name) model = MarianMTModel.from_pretrained(model_name) return tokenizer, model