Update app.py
Browse files
app.py
CHANGED
@@ -23,19 +23,19 @@ def transcribe(audio):
|
|
23 |
HF_TOKEN = os.environ.get("HF_TOKEN", None)
|
24 |
|
25 |
def client_fn(model):
|
26 |
-
if "
|
27 |
-
return
|
|
|
|
|
|
|
28 |
elif "Llama" in model:
|
29 |
return InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
|
30 |
elif "Mistral" in model:
|
31 |
return InferenceClient("mistralai/Mistral-7B-Instruct-v0.2")
|
32 |
elif "Phi" in model:
|
33 |
return InferenceClient("microsoft/Phi-3-mini-4k-instruct")
|
34 |
-
elif "
|
35 |
-
return
|
36 |
-
base_url="http://52.76.81.56:60002/v1",
|
37 |
-
api_key="token-abc123"
|
38 |
-
)
|
39 |
else:
|
40 |
return InferenceClient("microsoft/Phi-3-mini-4k-instruct")
|
41 |
|
@@ -44,7 +44,7 @@ def randomize_seed_fn(seed: int) -> int:
|
|
44 |
return seed
|
45 |
|
46 |
system_instructions1 = """
|
47 |
-
[SYSTEM] Answer as Real
|
48 |
Keep conversation friendly, short, clear, and concise.
|
49 |
Avoid unnecessary introductions and answer the user's questions directly.
|
50 |
Respond in a normal, conversational manner while being friendly and helpful.
|
@@ -57,7 +57,7 @@ def models(text, model="Mixtral 8x7B", seed=42):
|
|
57 |
|
58 |
client = client_fn(model)
|
59 |
|
60 |
-
if "Llama 3B" in model:
|
61 |
messages = [
|
62 |
{"role": "system", "content": system_instructions1},
|
63 |
{"role": "user", "content": text}
|
@@ -72,7 +72,7 @@ def models(text, model="Mixtral 8x7B", seed=42):
|
|
72 |
max_new_tokens=300,
|
73 |
seed=seed
|
74 |
)
|
75 |
-
formatted_prompt = system_instructions1 + text + "[
|
76 |
stream = client.text_generation(
|
77 |
formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
78 |
output = ""
|
@@ -84,26 +84,24 @@ def models(text, model="Mixtral 8x7B", seed=42):
|
|
84 |
async def respond(audio, model, seed):
|
85 |
user = transcribe(audio)
|
86 |
reply = models(user, model, seed)
|
87 |
-
|
|
|
88 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
|
89 |
tmp_path = tmp_file.name
|
90 |
await communicate.save(tmp_path)
|
91 |
yield tmp_path
|
92 |
|
93 |
-
DESCRIPTION = """ # <center><b>
|
94 |
-
### <center>A personal Assistant of Jaward for YOU
|
95 |
-
### <center>Voice Chat with your personal Assistant</center>
|
96 |
-
"""
|
97 |
|
98 |
with gr.Blocks(css="style.css") as demo:
|
99 |
gr.Markdown(DESCRIPTION)
|
100 |
with gr.Row():
|
101 |
select = gr.Dropdown([
|
|
|
102 |
'Mixtral 8x7B',
|
103 |
'Llama 3 8B',
|
104 |
'Mistral 7B v0.3',
|
105 |
'Phi 3 mini',
|
106 |
-
'Llama 3B'
|
107 |
],
|
108 |
value="Mistral 7B v0.3",
|
109 |
label="Model"
|
|
|
23 |
HF_TOKEN = os.environ.get("HF_TOKEN", None)
|
24 |
|
25 |
def client_fn(model):
|
26 |
+
if "Llama 3B Service" in model:
|
27 |
+
return OpenAI(
|
28 |
+
base_url="http://host:60002/v1",
|
29 |
+
api_key="token-abc123"
|
30 |
+
)
|
31 |
elif "Llama" in model:
|
32 |
return InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
|
33 |
elif "Mistral" in model:
|
34 |
return InferenceClient("mistralai/Mistral-7B-Instruct-v0.2")
|
35 |
elif "Phi" in model:
|
36 |
return InferenceClient("microsoft/Phi-3-mini-4k-instruct")
|
37 |
+
elif "Mixtral" in model:
|
38 |
+
return InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
|
|
|
|
|
|
39 |
else:
|
40 |
return InferenceClient("microsoft/Phi-3-mini-4k-instruct")
|
41 |
|
|
|
44 |
return seed
|
45 |
|
46 |
system_instructions1 = """
|
47 |
+
[SYSTEM] Answer as Real Jarvis JARVIS, Created by Jaward.
|
48 |
Keep conversation friendly, short, clear, and concise.
|
49 |
Avoid unnecessary introductions and answer the user's questions directly.
|
50 |
Respond in a normal, conversational manner while being friendly and helpful.
|
|
|
57 |
|
58 |
client = client_fn(model)
|
59 |
|
60 |
+
if "Llama 3B Service" in model:
|
61 |
messages = [
|
62 |
{"role": "system", "content": system_instructions1},
|
63 |
{"role": "user", "content": text}
|
|
|
72 |
max_new_tokens=300,
|
73 |
seed=seed
|
74 |
)
|
75 |
+
formatted_prompt = system_instructions1 + text + "[JARVIS]"
|
76 |
stream = client.text_generation(
|
77 |
formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
78 |
output = ""
|
|
|
84 |
async def respond(audio, model, seed):
|
85 |
user = transcribe(audio)
|
86 |
reply = models(user, model, seed)
|
87 |
+
# Change the voice to a deep male voice
|
88 |
+
communicate = edge_tts.Communicate(reply, voice="en-US-GuyNeural")
|
89 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
|
90 |
tmp_path = tmp_file.name
|
91 |
await communicate.save(tmp_path)
|
92 |
yield tmp_path
|
93 |
|
94 |
+
DESCRIPTION = """ # <center><b>Hello, I am JARVIS your personal AI voice assistant</b></center>"""
|
|
|
|
|
|
|
95 |
|
96 |
with gr.Blocks(css="style.css") as demo:
|
97 |
gr.Markdown(DESCRIPTION)
|
98 |
with gr.Row():
|
99 |
select = gr.Dropdown([
|
100 |
+
'Llama 3B Service',
|
101 |
'Mixtral 8x7B',
|
102 |
'Llama 3 8B',
|
103 |
'Mistral 7B v0.3',
|
104 |
'Phi 3 mini',
|
|
|
105 |
],
|
106 |
value="Mistral 7B v0.3",
|
107 |
label="Model"
|