meraj12 commited on
Commit
5e80325
Β·
verified Β·
1 Parent(s): e1681aa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -5
app.py CHANGED
@@ -22,18 +22,30 @@ if "clone_path" not in st.session_state:
22
  st.session_state.clone_path = None
23
  if "voice_gender" not in st.session_state:
24
  st.session_state.voice_gender = "Male" # Default to Male
 
 
 
 
 
 
 
 
25
 
26
  # Sidebar for voice cloning setup
27
  st.sidebar.header("🧬 Setup Your Clone Voice")
28
  voice_option = st.sidebar.radio("Choose how to provide clone voice", ["Upload Voice"])
29
  st.session_state.voice_gender = st.sidebar.selectbox("Select Voice Gender", ["Male", "Female"])
 
 
 
 
30
 
31
  if voice_option == "Upload Voice":
32
  uploaded = st.sidebar.file_uploader("Upload a voice sample", type=["wav", "mp3", "m4a", "flac", "ogg"])
33
  if uploaded:
34
  path = save_uploaded_audio(uploaded, "reference_voice.wav")
35
  st.session_state.clone_path = path
36
- st.success("βœ… Voice uploaded and saved as your clone voice.")
37
 
38
  # --- Conversation section ---
39
  st.subheader("πŸ—£οΈ Ask with voice or type text below")
@@ -55,9 +67,10 @@ with tab1:
55
 
56
  # Step 2: Get LLM response
57
  st.info("Thinking...")
 
58
  response = groq_client.chat.completions.create(
59
  model="llama3-8b-8192",
60
- messages=[{"role": "user", "content": user_text}]
61
  )
62
  reply = response.choices[0].message.content
63
  st.success(f"πŸ€– AI says: {reply}")
@@ -65,7 +78,7 @@ with tab1:
65
  # Step 3: Voice reply
66
  if st.session_state.clone_path:
67
  st.info("Cloning voice reply...")
68
- voice_preset = get_voice_preset(st.session_state.voice_gender)
69
  voice_output_path = clone_and_generate_text(reply, st.session_state.clone_path, voice_preset)
70
  st.audio(voice_output_path)
71
  else:
@@ -80,9 +93,10 @@ with tab2:
80
  else:
81
  # Step 1: Get LLM response
82
  st.info("Thinking...")
 
83
  response = groq_client.chat.completions.create(
84
  model="llama3-8b-8192",
85
- messages=[{"role": "user", "content": user_input}]
86
  )
87
  reply = response.choices[0].message.content
88
  st.success(f"πŸ€– AI says: {reply}")
@@ -90,7 +104,7 @@ with tab2:
90
  # Step 2: Voice reply
91
  if st.session_state.clone_path:
92
  st.info("Cloning voice reply...")
93
- voice_preset = get_voice_preset(st.session_state.voice_gender)
94
  voice_output_path = clone_and_generate_text(reply, st.session_state.clone_path, voice_preset)
95
  st.audio(voice_output_path)
96
  else:
 
22
  st.session_state.clone_path = None
23
  if "voice_gender" not in st.session_state:
24
  st.session_state.voice_gender = "Male" # Default to Male
25
+ if "voice_name" not in st.session_state:
26
+ st.session_state.voice_name = "MyVoice"
27
+ if "emotion" not in st.session_state:
28
+ st.session_state.emotion = "Neutral"
29
+ if "language" not in st.session_state:
30
+ st.session_state.language = "English"
31
+ if "ai_persona" not in st.session_state:
32
+ st.session_state.ai_persona = "Assistant"
33
 
34
  # Sidebar for voice cloning setup
35
  st.sidebar.header("🧬 Setup Your Clone Voice")
36
  voice_option = st.sidebar.radio("Choose how to provide clone voice", ["Upload Voice"])
37
  st.session_state.voice_gender = st.sidebar.selectbox("Select Voice Gender", ["Male", "Female"])
38
+ st.session_state.voice_name = st.sidebar.text_input("Name your voice", value=st.session_state.voice_name)
39
+ st.session_state.emotion = st.sidebar.selectbox("Select Emotion", ["Neutral", "Happy", "Sad", "Angry", "Excited", "Calm"])
40
+ st.session_state.language = st.sidebar.selectbox("Select Language", ["English", "Urdu", "Hindi", "Arabic", "Spanish"])
41
+ st.session_state.ai_persona = st.sidebar.selectbox("Select AI Personality", ["Assistant", "Urdu Teacher", "Wise Mentor", "Chill Friend", "Formal Assistant"])
42
 
43
  if voice_option == "Upload Voice":
44
  uploaded = st.sidebar.file_uploader("Upload a voice sample", type=["wav", "mp3", "m4a", "flac", "ogg"])
45
  if uploaded:
46
  path = save_uploaded_audio(uploaded, "reference_voice.wav")
47
  st.session_state.clone_path = path
48
+ st.success(f"βœ… Voice '{st.session_state.voice_name}' uploaded and saved as your clone voice.")
49
 
50
  # --- Conversation section ---
51
  st.subheader("πŸ—£οΈ Ask with voice or type text below")
 
67
 
68
  # Step 2: Get LLM response
69
  st.info("Thinking...")
70
+ persona_prompt = f"You are a {st.session_state.ai_persona}. Respond in {st.session_state.language} with a {st.session_state.emotion} tone."
71
  response = groq_client.chat.completions.create(
72
  model="llama3-8b-8192",
73
+ messages=[{"role": "system", "content": persona_prompt}, {"role": "user", "content": user_text}]
74
  )
75
  reply = response.choices[0].message.content
76
  st.success(f"πŸ€– AI says: {reply}")
 
78
  # Step 3: Voice reply
79
  if st.session_state.clone_path:
80
  st.info("Cloning voice reply...")
81
+ voice_preset = get_voice_preset(st.session_state.voice_gender, st.session_state.emotion)
82
  voice_output_path = clone_and_generate_text(reply, st.session_state.clone_path, voice_preset)
83
  st.audio(voice_output_path)
84
  else:
 
93
  else:
94
  # Step 1: Get LLM response
95
  st.info("Thinking...")
96
+ persona_prompt = f"You are a {st.session_state.ai_persona}. Respond in {st.session_state.language} with a {st.session_state.emotion} tone."
97
  response = groq_client.chat.completions.create(
98
  model="llama3-8b-8192",
99
+ messages=[{"role": "system", "content": persona_prompt}, {"role": "user", "content": user_input}]
100
  )
101
  reply = response.choices[0].message.content
102
  st.success(f"πŸ€– AI says: {reply}")
 
104
  # Step 2: Voice reply
105
  if st.session_state.clone_path:
106
  st.info("Cloning voice reply...")
107
+ voice_preset = get_voice_preset(st.session_state.voice_gender, st.session_state.emotion)
108
  voice_output_path = clone_and_generate_text(reply, st.session_state.clone_path, voice_preset)
109
  st.audio(voice_output_path)
110
  else: