|
import pycuda.driver as cuda
|
|
import pycuda.autoinit
|
|
import numpy as np
|
|
import tensorrt as trt
|
|
import torch
|
|
TRT_LOGGER = trt.Logger()
|
|
|
|
|
|
class HostDeviceMem(object):
|
|
def __init__(self, host_mem, device_mem):
|
|
self.host = host_mem
|
|
self.device = device_mem
|
|
|
|
def __str__(self):
|
|
return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
|
|
|
|
def __repr__(self):
|
|
return self.__str__()
|
|
|
|
|
|
def allocate_buffers(engine):
|
|
inputs = []
|
|
outputs = []
|
|
bindings = []
|
|
stream = cuda.Stream()
|
|
out_shapes = []
|
|
input_shapes = []
|
|
out_names = []
|
|
max_batch_size = engine.get_profile_shape(0, 0)[2][0]
|
|
|
|
for binding in engine:
|
|
|
|
binding_shape = engine.get_binding_shape(binding)
|
|
|
|
if binding_shape[0] == -1:
|
|
binding_shape = (1,) + binding_shape[1:]
|
|
size = trt.volume(binding_shape) * max_batch_size
|
|
dtype = trt.nptype(engine.get_binding_dtype(binding))
|
|
|
|
host_mem = cuda.pagelocked_empty(size, dtype)
|
|
device_mem = cuda.mem_alloc(host_mem.nbytes)
|
|
|
|
bindings.append(int(device_mem))
|
|
|
|
if engine.binding_is_input(binding):
|
|
inputs.append(HostDeviceMem(host_mem, device_mem))
|
|
input_shapes.append(engine.get_binding_shape(binding))
|
|
else:
|
|
outputs.append(HostDeviceMem(host_mem, device_mem))
|
|
|
|
out_shapes.append(engine.get_binding_shape(binding))
|
|
out_names.append(binding)
|
|
return inputs, outputs, bindings, stream, input_shapes, out_shapes, out_names, max_batch_size
|
|
|
|
|
|
|
|
def do_inference(context, bindings, inputs, outputs, stream):
|
|
|
|
[cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
|
|
|
|
context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
|
|
|
|
[cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
|
|
|
|
stream.synchronize()
|
|
|
|
return [out.host for out in outputs]
|
|
|
|
class TrtModel(object):
|
|
def __init__(self, model):
|
|
self.engine_file = model
|
|
self.engine = None
|
|
self.inputs = None
|
|
self.outputs = None
|
|
self.bindings = None
|
|
self.stream = None
|
|
self.context = None
|
|
self.input_shapes = None
|
|
self.out_shapes = None
|
|
self.max_batch_size = 1
|
|
|
|
def build(self):
|
|
with open(self.engine_file, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime:
|
|
self.engine = runtime.deserialize_cuda_engine(f.read())
|
|
self.inputs, self.outputs, self.bindings, self.stream, self.input_shapes, self.out_shapes, self.out_names, self.max_batch_size = allocate_buffers(
|
|
self.engine)
|
|
|
|
self.context = self.engine.create_execution_context()
|
|
self.context.active_optimization_profile = 0
|
|
|
|
def run(self, input, deflatten: bool = False, as_dict = False, use_token_type_ids=None):
|
|
|
|
|
|
|
|
|
|
if self.engine is None:
|
|
self.build()
|
|
batch_size = input[0].shape[0]
|
|
|
|
input_ids = np.array(input[0], dtype = np.int32)
|
|
allocate_place = np.prod(input_ids.shape)
|
|
self.inputs[0].host[:allocate_place] = input_ids.flatten(order='C')
|
|
|
|
|
|
attent_mask = np.array(input[1], dtype = np.int32)
|
|
allocate_place = np.prod(attent_mask.shape)
|
|
self.inputs[1].host[:allocate_place] = attent_mask.flatten(order='C')
|
|
|
|
|
|
self.context.set_binding_shape(0, input_ids.shape)
|
|
self.context.set_binding_shape(1, attent_mask.shape)
|
|
|
|
if use_token_type_ids:
|
|
token_type_ids = np.array(input[2], dtype = np.int32)
|
|
allocate_place = np.prod(token_type_ids.shape)
|
|
self.inputs[2].host[:allocate_place] = token_type_ids.flatten(order='C')
|
|
self.context.set_binding_shape(2, token_type_ids.shape)
|
|
|
|
trt_outputs = do_inference(
|
|
self.context, bindings=self.bindings,
|
|
inputs=self.inputs, outputs=self.outputs, stream=self.stream)
|
|
|
|
|
|
|
|
|
|
|
|
if deflatten:
|
|
out_shapes = [(batch_size, ) + out_shape[1:] for out_shape in self.out_shapes]
|
|
trt_outputs = [output[:np.prod(shape)].reshape(shape) for output, shape in zip(trt_outputs, out_shapes)]
|
|
|
|
|
|
|
|
|
|
trt_outputs = [trt_output[:batch_size] for trt_output in trt_outputs]
|
|
return trt_outputs
|
|
|
|
|
|
def mean_pooling(token_embeddings, attention_mask):
|
|
|
|
|
|
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
|
|
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
|
|
|
|
def encode(sentences: list, tokenizer, trt_model:TrtModel, use_token_type_ids = False, max_lengh =512):
|
|
|
|
sentences = [sentences] if isinstance(sentences, str) else sentences
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
x = tokenizer(sentences, padding='max_length', truncation=True, max_length = max_lengh)
|
|
input_ids = x["input_ids"]
|
|
attention_mask = x["attention_mask"]
|
|
input_ids = np.array(input_ids, dtype = np.int32)
|
|
attention_mask = np.array(attention_mask, dtype = np.int32)
|
|
|
|
if use_token_type_ids:
|
|
token_type_ids = x["token_type_ids"]
|
|
token_type_ids = np.array(token_type_ids, dtype = np.int32)
|
|
hidden_states = trt_model.run([input_ids, attention_mask, token_type_ids ], deflatten=True, use_token_type_ids=True)
|
|
else:
|
|
hidden_states = trt_model.run([input_ids, attention_mask], deflatten=True)
|
|
sentence_embeddings = mean_pooling(torch.from_numpy(hidden_states[0]), torch.from_numpy(attention_mask))
|
|
|
|
|
|
|
|
|
|
return sentence_embeddings.numpy()
|
|
if __name__ == "__main__":
|
|
import torch
|
|
import json
|
|
|
|
|
|
|
|
|
|
from transformers import AutoTokenizer
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained("tensorRT/models/paraphrase-multilingual-MiniLM-L12-v2")
|
|
model = TrtModel("tensorRT/models/paraphrase-multilingual-MiniLM-L12-v2.engine")
|
|
|
|
lst_input = ["Pham Minh Chinh is Vietnam's Prime Minister"] *2
|
|
encode(lst_input, tokenizer, model, use_token_type_ids=False) |