Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -122,14 +122,14 @@ def generate_caption_llava(image_path, caption_bool):
|
|
122 |
|
123 |
output = model.generate(**inputs, max_new_tokens=100)
|
124 |
|
125 |
-
return processor.decode(output[0], skip_special_tokens=True)[len(
|
126 |
|
127 |
@spaces.GPU
|
128 |
def generate_answer_llava(image_path, question):
|
129 |
text_prompt =f"[INST] <image>\n{question} [/INST]"
|
130 |
inputs = processor(text_prompt, Image.open(image_path), return_tensors="pt").to(device)
|
131 |
output = model.generate(**inputs, max_new_tokens=100)
|
132 |
-
return processor.decode(output[0], skip_special_tokens=True)[len(text_prompt-1
|
133 |
|
134 |
|
135 |
|
|
|
122 |
|
123 |
output = model.generate(**inputs, max_new_tokens=100)
|
124 |
|
125 |
+
return processor.decode(output[0], skip_special_tokens=True)[len(text_prompt)-1:]
|
126 |
|
127 |
@spaces.GPU
|
128 |
def generate_answer_llava(image_path, question):
|
129 |
text_prompt =f"[INST] <image>\n{question} [/INST]"
|
130 |
inputs = processor(text_prompt, Image.open(image_path), return_tensors="pt").to(device)
|
131 |
output = model.generate(**inputs, max_new_tokens=100)
|
132 |
+
return processor.decode(output[0], skip_special_tokens=True)[len(text_prompt)-1:]
|
133 |
|
134 |
|
135 |
|