Update modules/app.py
Browse files- modules/app.py +71 -98
modules/app.py
CHANGED
@@ -1,118 +1,91 @@
|
|
1 |
'''
|
2 |
Created By Lewis Kamau Kimaru
|
3 |
-
|
|
|
4 |
August 2023
|
5 |
'''
|
6 |
|
7 |
-
from
|
8 |
-
from
|
9 |
-
|
10 |
-
import gradio as gr
|
11 |
-
import ctranslate2
|
12 |
-
import sentencepiece as spm
|
13 |
-
import fasttext
|
14 |
-
import uvicorn
|
15 |
from pyngrok import ngrok
|
|
|
|
|
16 |
import os
|
17 |
-
#import nest_asyncio
|
18 |
|
19 |
-
app =
|
20 |
|
21 |
# Set your ngrok authtoken
|
22 |
-
|
23 |
#ngrok.set_auth_token("2S6xeFEoSVFWr2egtDRcqgeUtSx_2juefHFkEW6nGbpRHS37W")
|
24 |
#ngrok.set_auth_token("2UAmdjHdAFV9x84TdyEknIfNhYk_4Ye8n4YK7ZhfCMob3yPBh")
|
25 |
#ngrok.set_auth_token("2UAqm26HuWiWvQjzK58xYufSGpy_6tStKSyLLyR9f7pcezh6R")
|
26 |
#ngrok.set_auth_token("2UGQqzZoI3bx7SSk8H4wuFC3iaC_2WniWyNAsW5fd2rFyKVq1")
|
27 |
-
ngrok.set_auth_token("2UISOtStHwytO70NQK38dFhS1at_5opQaXnoQCKeyhEe4qfT2")
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
# Translate the source sentences
|
73 |
-
translations = translator.translate_batch(
|
74 |
-
source_sents_subworded,
|
75 |
-
batch_type="tokens",
|
76 |
-
max_batch_size=2024,
|
77 |
-
beam_size=beam_size,
|
78 |
-
target_prefix=target_prefix,
|
79 |
)
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
async def translate_endpoint(request: Request):
|
97 |
-
data = await request.json()
|
98 |
-
userinput = data.get("userinput")
|
99 |
-
target_lang = data.get("target_lang")
|
100 |
-
print(f"\n Target Language; {target_lang}, User Input: {userinput}\n")
|
101 |
-
|
102 |
-
if not userinput or not target_lang:
|
103 |
-
raise HTTPException(status_code=422, detail="Both 'userinput' and 'target_lang' are required.")
|
104 |
-
|
105 |
-
source_lang, translated_text = translate_text(userinput, target_lang)
|
106 |
-
print(f"\nsource_language: {source_lang}, Translated Text: {translated_text}\n\n")
|
107 |
-
return {
|
108 |
-
"source_language": source_lang,
|
109 |
-
"translated_text": translated_text[0],
|
110 |
-
}
|
111 |
|
|
|
112 |
ngrok_tunnel = ngrok.connect(7860)
|
113 |
public_url = ngrok_tunnel.public_url
|
114 |
print('\nPublic URL✅:', public_url)
|
115 |
-
#nest_asyncio.apply()
|
116 |
|
117 |
-
print("\
|
118 |
-
#uvicorn.run(app, port=7860)
|
|
|
1 |
'''
|
2 |
Created By Lewis Kamau Kimaru
|
3 |
+
https://towardsdev.com/building-a-voice-assistant-using-openai-api-and-flask-by-chatgpt-9f90a430b242
|
4 |
+
https://github.com/prathmeshChaudhari05/Voice-Assistant-Flask
|
5 |
August 2023
|
6 |
'''
|
7 |
|
8 |
+
from flask import Flask, render_template, request, redirect, url_for, send_from_directory
|
9 |
+
from playsound import playsound
|
10 |
+
import speech_recognition as sr
|
|
|
|
|
|
|
|
|
|
|
11 |
from pyngrok import ngrok
|
12 |
+
from gtts import gTTS
|
13 |
+
import openai
|
14 |
import os
|
|
|
15 |
|
16 |
+
app = Flask(__name__)
|
17 |
|
18 |
# Set your ngrok authtoken
|
19 |
+
ngrok.set_auth_token("2UAhCqf5zP0cCgJzeadNANkbIqx_7ZJvhkDSNWccqMX2hyxXP")
|
20 |
#ngrok.set_auth_token("2S6xeFEoSVFWr2egtDRcqgeUtSx_2juefHFkEW6nGbpRHS37W")
|
21 |
#ngrok.set_auth_token("2UAmdjHdAFV9x84TdyEknIfNhYk_4Ye8n4YK7ZhfCMob3yPBh")
|
22 |
#ngrok.set_auth_token("2UAqm26HuWiWvQjzK58xYufSGpy_6tStKSyLLyR9f7pcezh6R")
|
23 |
#ngrok.set_auth_token("2UGQqzZoI3bx7SSk8H4wuFC3iaC_2WniWyNAsW5fd2rFyKVq1")
|
24 |
+
#ngrok.set_auth_token("2UISOtStHwytO70NQK38dFhS1at_5opQaXnoQCKeyhEe4qfT2")
|
25 |
+
|
26 |
+
# Set up OpenAI API credentials
|
27 |
+
openai.api_key = 'YOUR_API_KEY'
|
28 |
+
|
29 |
+
@app.route('/')
|
30 |
+
def home():
|
31 |
+
html_content = """
|
32 |
+
<!DOCTYPE html>
|
33 |
+
<html>
|
34 |
+
<head>
|
35 |
+
<title>Voice Assistant</title>
|
36 |
+
</head>
|
37 |
+
<body>
|
38 |
+
<h1>Voice Assistant</h1>
|
39 |
+
<form method="POST">
|
40 |
+
<button type="submit">Ask me something!</button>
|
41 |
+
</form>
|
42 |
+
<audio controls>
|
43 |
+
<source src="{{ url_for('static', filename='response.mp3') }}" type="audio/mpeg">
|
44 |
+
</audio>
|
45 |
+
</body>
|
46 |
+
</html>
|
47 |
+
"""
|
48 |
+
return html_content
|
49 |
+
|
50 |
+
|
51 |
+
def handle_form():
|
52 |
+
r = sr.Recognizer()
|
53 |
+
with sr.Microphone() as source:
|
54 |
+
print("Listening...")
|
55 |
+
audio = r.listen(source)
|
56 |
+
|
57 |
+
try:
|
58 |
+
result = r.recognize_google(audio)
|
59 |
+
print("result2:")
|
60 |
+
print(r.recognize_google(audio, show_all=True))
|
61 |
+
response = openai.Completion.create(
|
62 |
+
engine="davinci",
|
63 |
+
prompt=result,
|
64 |
+
max_tokens=60,
|
65 |
+
n=1,
|
66 |
+
stop=None,
|
67 |
+
temperature=0.5,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
)
|
69 |
+
if response.choices:
|
70 |
+
tts = gTTS(text=response.choices[0].text, lang='en')
|
71 |
+
else:
|
72 |
+
tts = gTTS(text="I'm sorry, I didn't understand what you said", lang='en')
|
73 |
+
filename = 'response.mp3'
|
74 |
+
tts.save(filename)
|
75 |
+
playsound(filename)
|
76 |
+
os.remove(filename)
|
77 |
+
except sr.UnknownValueError:
|
78 |
+
print("Google Speech Recognition could not understand audio")
|
79 |
+
except sr.RequestError as e:
|
80 |
+
print("Could not request results from Google Speech Recognition service; {0}".format(e))
|
81 |
+
|
82 |
+
@app.route('/', methods=['POST'])
|
83 |
+
def submit_textarea():
|
84 |
+
return handle_form()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
|
86 |
+
|
87 |
ngrok_tunnel = ngrok.connect(7860)
|
88 |
public_url = ngrok_tunnel.public_url
|
89 |
print('\nPublic URL✅:', public_url)
|
|
|
90 |
|
91 |
+
print("\nFlask APP starting .......\n")
|
|