Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -30,7 +30,7 @@ phi_base_model = AutoModelForCausalLM.from_pretrained(
|
|
30 |
from peft import PeftModel
|
31 |
phi_new_model = "models/phi_adapter"
|
32 |
phi_model = PeftModel.from_pretrained(phi_base_model, phi_new_model)
|
33 |
-
phi_model = phi_model.merge_and_unload()
|
34 |
|
35 |
'''compute_type = 'float32'
|
36 |
if device != 'cpu':
|
@@ -134,9 +134,9 @@ def bot(history):
|
|
134 |
else:
|
135 |
if context_type == 'image':
|
136 |
query_ids = tokenizer.encode(query)
|
137 |
-
query_ids = torch.tensor(query_ids, dtype=torch.int32).unsqueeze(0)
|
138 |
query_embeds = phi_model.get_input_embeddings()(query_ids)
|
139 |
-
inputs_embeds = torch.cat([context, query_embeds], dim=1)
|
140 |
out = phi_model.generate(inputs_embeds=inputs_embeds, min_new_tokens=10, max_new_tokens=50,
|
141 |
bos_token_id=tokenizer.bos_token_id)
|
142 |
response = tokenizer.decode(out[0], skip_special_tokens=True)
|
@@ -144,7 +144,7 @@ def bot(history):
|
|
144 |
input_text = context + query
|
145 |
|
146 |
input_tokens = tokenizer.encode(input_text)
|
147 |
-
input_ids = torch.tensor(input_tokens, dtype=torch.int32).unsqueeze(0)
|
148 |
inputs_embeds = phi_model.get_input_embeddings()(input_ids)
|
149 |
out = phi_model.generate(inputs_embeds=inputs_embeds, min_new_tokens=10, max_new_tokens=50,
|
150 |
bos_token_id=tokenizer.bos_token_id)
|
@@ -214,12 +214,12 @@ with gr.Blocks() as app:
|
|
214 |
chatbot.like(print_like_dislike, None, None)
|
215 |
clear.click(clear_fn, None, chatbot, queue=False)
|
216 |
|
217 |
-
aud.
|
218 |
bot, chatbot, chatbot, api_name="bot_response"
|
219 |
)
|
220 |
-
aud.upload(audio_file, [chatbot, aud], [chatbot], queue=False).then(
|
221 |
bot, chatbot, chatbot, api_name="bot_response"
|
222 |
-
)
|
223 |
|
224 |
app.queue()
|
225 |
app.launch()
|
|
|
30 |
from peft import PeftModel
|
31 |
phi_new_model = "models/phi_adapter"
|
32 |
phi_model = PeftModel.from_pretrained(phi_base_model, phi_new_model)
|
33 |
+
phi_model = phi_model.merge_and_unload().to(device)
|
34 |
|
35 |
'''compute_type = 'float32'
|
36 |
if device != 'cpu':
|
|
|
134 |
else:
|
135 |
if context_type == 'image':
|
136 |
query_ids = tokenizer.encode(query)
|
137 |
+
query_ids = torch.tensor(query_ids, dtype=torch.int32).unsqueeze(0).to(device)
|
138 |
query_embeds = phi_model.get_input_embeddings()(query_ids)
|
139 |
+
inputs_embeds = torch.cat([context.to(device), query_embeds], dim=1)
|
140 |
out = phi_model.generate(inputs_embeds=inputs_embeds, min_new_tokens=10, max_new_tokens=50,
|
141 |
bos_token_id=tokenizer.bos_token_id)
|
142 |
response = tokenizer.decode(out[0], skip_special_tokens=True)
|
|
|
144 |
input_text = context + query
|
145 |
|
146 |
input_tokens = tokenizer.encode(input_text)
|
147 |
+
input_ids = torch.tensor(input_tokens, dtype=torch.int32).unsqueeze(0).to(device)
|
148 |
inputs_embeds = phi_model.get_input_embeddings()(input_ids)
|
149 |
out = phi_model.generate(inputs_embeds=inputs_embeds, min_new_tokens=10, max_new_tokens=50,
|
150 |
bos_token_id=tokenizer.bos_token_id)
|
|
|
214 |
chatbot.like(print_like_dislike, None, None)
|
215 |
clear.click(clear_fn, None, chatbot, queue=False)
|
216 |
|
217 |
+
aud.change(audio_file, [chatbot, aud], [chatbot], queue=False).then(
|
218 |
bot, chatbot, chatbot, api_name="bot_response"
|
219 |
)
|
220 |
+
'''aud.upload(audio_file, [chatbot, aud], [chatbot], queue=False).then(
|
221 |
bot, chatbot, chatbot, api_name="bot_response"
|
222 |
+
)'''
|
223 |
|
224 |
app.queue()
|
225 |
app.launch()
|