Update backPrompt.py
Browse files- backPrompt.py +1 -9
backPrompt.py
CHANGED
@@ -26,16 +26,8 @@ def load_image(image_file):
|
|
26 |
return pixel_values
|
27 |
|
28 |
|
29 |
-
def main(image_path):
|
30 |
path = "OpenGVLab/InternVL2_5-4B"
|
31 |
-
model = AutoModel.from_pretrained(
|
32 |
-
path,
|
33 |
-
torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,
|
34 |
-
# load_in_8bit=True,
|
35 |
-
low_cpu_mem_usage=True,
|
36 |
-
use_flash_attn=True,
|
37 |
-
trust_remote_code=True).eval()
|
38 |
-
tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True, use_fast=False)
|
39 |
pixel_values = load_image(image_path).to(torch.float32).to("cpu")
|
40 |
generation_config = dict(max_new_tokens=1024, do_sample=True)
|
41 |
|
|
|
26 |
return pixel_values
|
27 |
|
28 |
|
29 |
+
def main(image_path,model,tokenizer):
|
30 |
path = "OpenGVLab/InternVL2_5-4B"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
pixel_values = load_image(image_path).to(torch.float32).to("cpu")
|
32 |
generation_config = dict(max_new_tokens=1024, do_sample=True)
|
33 |
|