Spaces:
Running
on
Zero
Running
on
Zero
""" | |
A model worker executes the model. | |
""" | |
import os, sys | |
os.environ['LOWRES_RESIZE'] = '384x32' | |
os.environ['HIGHRES_BASE'] = '0x32' | |
os.environ['VIDEO_RESIZE'] = "0x64" | |
os.environ['VIDEO_MAXRES'] = "480" | |
os.environ['VIDEO_MINRES'] = "288" | |
os.environ['MAXRES'] = '1536' | |
os.environ['MINRES'] = '0' | |
os.environ['REGIONAL_POOL'] = '2x' | |
os.environ['FORCE_NO_DOWNSAMPLE'] = '1' | |
os.environ['LOAD_VISION_EARLY'] = '1' | |
os.environ['SKIP_LOAD_VIT'] = '1' | |
sys.path.append('/mnt/lzy/Ola') | |
import argparse | |
import asyncio | |
import json | |
import time | |
import threading | |
import uuid | |
from fastapi import FastAPI, Request, BackgroundTasks | |
from fastapi.responses import StreamingResponse | |
import requests | |
import torch | |
import uvicorn | |
from functools import partial | |
from ola.constants import WORKER_HEART_BEAT_INTERVAL | |
from ola.utils import (build_logger, server_error_msg, | |
pretty_print_semaphore) | |
from ola.model.builder import load_pretrained_model | |
from ola.mm_utils import process_anyres_highres_image_genli, load_image_from_base64, tokenizer_image_token, KeywordsStoppingCriteria | |
from ola.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN | |
from transformers import TextIteratorStreamer | |
from threading import Thread | |
GB = 1 << 30 | |
worker_id = str(uuid.uuid4())[:6] | |
logger = build_logger("model_worker", f"model_worker_{worker_id}.log") | |
global_counter = 0 | |
model_semaphore = None | |
def heart_beat_worker(controller): | |
while True: | |
time.sleep(WORKER_HEART_BEAT_INTERVAL) | |
controller.send_heart_beat() | |
class ModelWorker: | |
def __init__(self, controller_addr, worker_addr, | |
worker_id, no_register, | |
model_path, model_base, model_name, | |
load_8bit, load_4bit): | |
self.controller_addr = controller_addr | |
self.worker_addr = worker_addr | |
self.worker_id = worker_id | |
if model_path.endswith("/"): | |
model_path = model_path[:-1] | |
if model_name is None: | |
model_paths = model_path.split("/") | |
if model_paths[-1].startswith('checkpoint-'): | |
self.model_name = model_paths[-2] + "_" + model_paths[-1] | |
else: | |
self.model_name = model_paths[-1] | |
else: | |
self.model_name = model_name | |
logger.info(f"Loading the model {self.model_name} on worker {worker_id} ...") | |
self.tokenizer, self.model, self.image_processor, self.context_len = load_pretrained_model( | |
model_path, None, self.model_name, load_8bit, load_4bit, device_map='cuda:0') | |
self.model = self.model.eval() | |
self.model = self.model.bfloat16() | |
self.is_multimodal = 'ola' in self.model_name.lower() | |
if not no_register: | |
self.register_to_controller() | |
self.heart_beat_thread = threading.Thread( | |
target=heart_beat_worker, args=(self,)) | |
self.heart_beat_thread.start() | |
def register_to_controller(self): | |
logger.info("Register to controller") | |
url = self.controller_addr + "/register_worker" | |
data = { | |
"worker_name": self.worker_addr, | |
"check_heart_beat": True, | |
"worker_status": self.get_status() | |
} | |
r = requests.post(url, json=data) | |
assert r.status_code == 200, f"Failed to register to controller: {r.text}" | |
def send_heart_beat(self): | |
logger.info(f"Send heart beat. Models: {[self.model_name]}. " | |
f"Semaphore: {pretty_print_semaphore(model_semaphore)}. " | |
f"global_counter: {global_counter}") | |
print('skip heart beat') | |
return | |
url = self.controller_addr + "/receive_heart_beat" | |
while True: | |
try: | |
ret = requests.post(url, json={ | |
"worker_name": self.worker_addr, | |
"queue_length": self.get_queue_length()}, timeout=5) | |
exist = ret.json()["exist"] | |
break | |
except requests.exceptions.RequestException as e: | |
logger.error(f"heart beat error: {e}") | |
time.sleep(5) | |
if not exist: | |
self.register_to_controller() | |
def get_queue_length(self): | |
if model_semaphore is None: | |
return 0 | |
else: | |
return args.limit_model_concurrency - model_semaphore._value + (len( | |
model_semaphore._waiters) if model_semaphore._waiters is not None else 0) | |
def get_status(self): | |
return { | |
"model_names": [self.model_name], | |
"speed": 1, | |
"queue_length": self.get_queue_length(), | |
} | |
def generate_stream(self, params): | |
tokenizer, model, image_processor = self.tokenizer, self.model, self.image_processor | |
prompt = params["prompt"] | |
ori_prompt = prompt | |
images = params.get("images", None) | |
num_image_tokens = 0 | |
if images is not None and len(images) > 0 and self.is_multimodal: | |
if len(images) > 0: | |
if len(images) != prompt.count(DEFAULT_IMAGE_TOKEN): | |
raise ValueError("Number of images does not match number of <image> tokens in prompt") | |
images = [load_image_from_base64(image) for image in images] | |
image_sizes = [image.size for image in images] | |
logger.info(f"image_sizes: {image_sizes}") | |
image_tensor, image_highres_tensor = process_anyres_highres_image_genli(images, image_processor, model.config) | |
if type(image_tensor) is list: | |
image_tensor = [image_.to(self.model.device, dtype=torch.bfloat16) for image_ in image_tensor] | |
else: | |
image_tensor = image_tensor.to(self.model.device, dtype=torch.bfloat16) | |
if type(image_highres_tensor) is list: | |
image_highres_tensor = [image_.to(self.model.device, dtype=torch.bfloat16) for image_ in image_highres_tensor] | |
else: | |
image_highres_tensor = image_highres_tensor.to(self.model.device, dtype=torch.bfloat16) | |
replace_token = DEFAULT_IMAGE_TOKEN | |
if getattr(self.model.config, 'mm_use_im_start_end', False): | |
replace_token = DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN | |
prompt = prompt.replace(DEFAULT_IMAGE_TOKEN, replace_token) | |
# num_image_tokens = prompt.count(replace_token) * model.get_vision_tower().num_patches | |
else: | |
images = None | |
image_sizes = None | |
image_args = {"images": images, "images_highres": image_highres_tensor, "image_sizes": image_sizes} | |
else: | |
images = None | |
image_args = {} | |
temperature = float(params.get("temperature", 1.0)) | |
top_p = float(params.get("top_p", 1.0)) | |
max_context_length = getattr(model.config, 'max_position_embeddings', 2048) | |
max_new_tokens = min(int(params.get("max_new_tokens", 256)), 1024) | |
stop_str = params.get("stop", None) | |
stop_str = '<|im_end|>' if stop_str is None else stop_str | |
do_sample = True if temperature > 0.001 else False | |
input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda() | |
keywords = [stop_str] | |
stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids) | |
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True, timeout=15) | |
# max_new_tokens = 1024 # min(max_new_tokens, max_context_length - input_ids.shape[-1] - 576) | |
if max_new_tokens < 1: | |
yield json.dumps({"text": ori_prompt + "Exceeds max token length. Please start a new conversation, thanks.", "error_code": 0}).encode() + b"\0" | |
return | |
thread = Thread(target=model.generate, kwargs=dict( | |
inputs=input_ids, | |
do_sample=do_sample, | |
temperature=temperature, | |
top_p=top_p, | |
max_new_tokens=max_new_tokens, | |
streamer=streamer, | |
# stopping_criteria=[stopping_criteria], | |
use_cache=True, | |
modalities=['image'] | |
**image_args | |
)) | |
thread.start() | |
start_time = time.time() | |
generated_text = ori_prompt | |
for new_text in streamer: | |
generated_text += new_text | |
if generated_text.endswith(stop_str): | |
generated_text = generated_text[:-len(stop_str)] | |
yield json.dumps({"text": generated_text, "error_code": 0}).encode() + b"\0" | |
end_time = time.time() | |
new_generated = generated_text[len(ori_prompt):] | |
new_generated_tokens = tokenizer(new_generated).input_ids | |
token_per_second = len(new_generated_tokens) / (end_time - start_time) | |
print(f"token_per_second: {token_per_second}") | |
def generate_stream_gate(self, params): | |
# try: | |
for x in self.generate_stream(params): | |
yield x | |
# except ValueError as e: | |
# print("Caught ValueError:", e) | |
# ret = { | |
# "text": server_error_msg, | |
# "error_code": 1, | |
# } | |
# yield json.dumps(ret).encode() + b"\0" | |
# except torch.cuda.CudaError as e: | |
# print("Caught torch.cuda.CudaError:", e) | |
# ret = { | |
# "text": server_error_msg, | |
# "error_code": 1, | |
# } | |
# yield json.dumps(ret).encode() + b"\0" | |
# except Exception as e: | |
# print("Caught Unknown Error", e) | |
# ret = { | |
# "text": server_error_msg, | |
# "error_code": 1, | |
# } | |
# yield json.dumps(ret).encode() + b"\0" | |
app = FastAPI() | |
def release_model_semaphore(fn=None): | |
model_semaphore.release() | |
if fn is not None: | |
fn() | |
async def generate_stream(request: Request): | |
global model_semaphore, global_counter | |
global_counter += 1 | |
params = await request.json() | |
if model_semaphore is None: | |
model_semaphore = asyncio.Semaphore(args.limit_model_concurrency) | |
await model_semaphore.acquire() | |
worker.send_heart_beat() | |
generator = worker.generate_stream_gate(params) | |
background_tasks = BackgroundTasks() | |
background_tasks.add_task(partial(release_model_semaphore, fn=worker.send_heart_beat)) | |
return StreamingResponse(generator, background=background_tasks) | |
async def get_status(request: Request): | |
return worker.get_status() | |
if __name__ == "__main__": | |
parser = argparse.ArgumentParser() | |
parser.add_argument("--host", type=str, default="0.0.0.0") | |
parser.add_argument("--port", type=int, default=21002) | |
parser.add_argument("--worker-address", type=str, | |
default="http://0.0.0.0:21002") | |
parser.add_argument("--controller-address", type=str, | |
default="http://0.0.0.0:12345") | |
parser.add_argument("--model-path", type=str, default="/mnt/lzy/ola-model/ola-7b") | |
parser.add_argument("--model-base", type=str, default=None) | |
parser.add_argument("--model-name", type=str) | |
parser.add_argument("--multi-modal", action="store_true", help="Multimodal mode is automatically detected with model name, please make sure `llava` is included in the model path.") | |
parser.add_argument("--limit-model-concurrency", type=int, default=5) | |
parser.add_argument("--stream-interval", type=int, default=1) | |
parser.add_argument("--no-register", action="store_true") | |
parser.add_argument("--load-8bit", action="store_true") | |
parser.add_argument("--load-4bit", action="store_true") | |
args = parser.parse_args() | |
logger.info(f"args: {args}") | |
if args.multi_modal: | |
logger.warning("Multimodal mode is automatically detected with model name, please make sure `llava` is included in the model path.") | |
worker = ModelWorker(args.controller_address, | |
args.worker_address, | |
worker_id, | |
args.no_register, | |
args.model_path, | |
args.model_base, | |
args.model_name, | |
args.load_8bit, | |
args.load_4bit) | |
uvicorn.run(app, host=args.host, port=args.port, log_level="info") | |