vikramronavrsc's picture
Update voice_component.py
6d9e7a8 verified
raw
history blame contribute delete
11 kB
# voice_component.py
import streamlit as st
import streamlit.components.v1 as components
def voice_input_component():
"""
Creates a streamlined voice input component with animations.
Returns the transcribed text from voice input.
"""
# HTML component for voice input with animations
voice_html = """
<div class="voice-input-container">
<button id="voice-button" class="voice-button">
<div class="mic-icon">
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><path d="M12 2a3 3 0 0 0-3 3v7a3 3 0 0 0 6 0V5a3 3 0 0 0-3-3Z"></path><path d="M19 10v2a7 7 0 0 1-14 0v-2"></path><line x1="12" y1="19" x2="12" y2="22"></line></svg>
</div>
<div class="ripple-container">
<div class="ripple"></div>
</div>
</button>
<div id="voice-status" class="voice-status">Ask a question</div>
<div id="transcript-container" class="transcript-container" style="display:none;">
<div id="transcript" class="transcript"></div>
<button id="send-transcript" class="send-button">
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><line x1="22" y1="2" x2="11" y2="13"></line><polygon points="22 2 15 22 11 13 2 9 22 2"></polygon></svg>
Send
</button>
</div>
</div>
<style>
.voice-input-container {
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
padding: 15px;
margin-bottom: 15px;
}
.voice-button {
position: relative;
width: 60px;
height: 60px;
border-radius: 50%;
background-color: #ff4b4b;
color: white;
border: none;
cursor: pointer;
display: flex;
align-items: center;
justify-content: center;
transition: all 0.3s ease;
box-shadow: 0 4px 10px rgba(255, 75, 75, 0.3);
}
.voice-button:hover {
transform: scale(1.05);
box-shadow: 0 6px 15px rgba(255, 75, 75, 0.4);
}
.voice-button.listening {
background-color: #ff7a7a;
}
.mic-icon {
z-index: 2;
transition: all 0.3s ease;
}
.voice-button.listening .mic-icon {
animation: pulse 1.5s infinite;
}
.ripple-container {
position: absolute;
width: 100%;
height: 100%;
border-radius: 50%;
z-index: 1;
display: flex;
align-items: center;
justify-content: center;
overflow: hidden;
}
.ripple {
position: absolute;
width: 100%;
height: 100%;
border-radius: 50%;
background-color: rgba(255, 255, 255, 0.3);
transform: scale(0);
opacity: 1;
}
.voice-button.listening .ripple {
animation: ripple 2s linear infinite;
}
@keyframes ripple {
0% {
transform: scale(0);
opacity: 1;
}
50% {
transform: scale(1.5);
opacity: 0.3;
}
100% {
transform: scale(2);
opacity: 0;
}
}
@keyframes pulse {
0% { transform: scale(1); }
50% { transform: scale(1.1); }
100% { transform: scale(1); }
}
.voice-status {
margin-top: 15px;
font-size: 16px;
color: #6c757d;
font-weight: 500;
transition: all 0.3s ease;
}
.transcript-container {
width: 100%;
max-width: 600px;
margin-top: 20px;
display: flex;
flex-direction: column;
animation: fadeIn 0.3s ease;
}
.transcript {
padding: 15px;
border-radius: 8px;
border: 1px solid #dee2e6;
background-color: #f8f9fa;
font-size: 16px;
margin-bottom: 10px;
min-height: 60px;
width: 100%;
}
.send-button {
display: flex;
align-items: center;
justify-content: center;
gap: 8px;
align-self: flex-end;
background-color: #4CAF50;
color: white;
border: none;
padding: 8px 16px;
border-radius: 20px;
cursor: pointer;
font-weight: 500;
transition: all 0.3s ease;
box-shadow: 0 2px 5px rgba(76, 175, 80, 0.3);
}
.send-button:hover {
background-color: #45a049;
transform: translateY(-2px);
box-shadow: 0 4px 8px rgba(76, 175, 80, 0.4);
}
@keyframes fadeIn {
from { opacity: 0; transform: translateY(10px); }
to { opacity: 1; transform: translateY(0); }
}
</style>
<script>
// Variable to store transcript
let finalTranscript = '';
let recognizing = false;
let recognition;
// Check if browser supports speech recognition
function checkSpeechRecognitionSupport() {
return 'webkitSpeechRecognition' in window || 'SpeechRecognition' in window;
}
// Initialize speech recognition
function initSpeechRecognition() {
if (!checkSpeechRecognitionSupport()) {
document.getElementById('voice-status').textContent = 'Speech recognition not supported in this browser';
document.getElementById('voice-button').disabled = true;
return;
}
// Create speech recognition object
recognition = new (window.SpeechRecognition || window.webkitSpeechRecognition)();
// Configure settings
recognition.continuous = true;
recognition.interimResults = true;
recognition.lang = 'en-US'; // Can be made configurable
// Handle results
recognition.onresult = function(event) {
let interimTranscript = '';
for (let i = event.resultIndex; i < event.results.length; ++i) {
if (event.results[i].isFinal) {
finalTranscript += event.results[i][0].transcript;
} else {
interimTranscript += event.results[i][0].transcript;
}
}
const transcriptElement = document.getElementById('transcript');
transcriptElement.textContent = finalTranscript + interimTranscript;
// Show transcript container if we have text
if ((finalTranscript + interimTranscript).trim() !== '') {
document.getElementById('transcript-container').style.display = 'flex';
}
// Send transcript to Streamlit component
window.parent.postMessage({
type: 'voice-transcript',
transcript: finalTranscript
}, '*');
};
// Handle errors
recognition.onerror = function(event) {
if (event.error === 'no-speech') {
document.getElementById('voice-status').textContent = 'No speech detected. Try again.';
document.getElementById('voice-status').style.color = '#dc3545';
} else {
document.getElementById('voice-status').textContent = 'Error: ' + event.error;
document.getElementById('voice-status').style.color = '#dc3545';
console.error('Speech recognition error:', event.error);
}
stopRecognition();
};
// Handle end of speech recognition
recognition.onend = function() {
stopRecognition();
};
}
// Start recognition
function startRecognition() {
if (recognizing) return;
try {
recognition.start();
recognizing = true;
finalTranscript = '';
document.getElementById('voice-status').textContent = 'Listening...';
document.getElementById('voice-status').style.color = '#4CAF50';
document.getElementById('voice-button').classList.add('listening');
document.getElementById('transcript-container').style.display = 'none';
document.getElementById('transcript').textContent = '';
} catch (e) {
console.error('Error starting speech recognition:', e);
document.getElementById('voice-status').textContent = 'Error starting speech recognition';
document.getElementById('voice-status').style.color = '#dc3545';
}
}
// Stop recognition
function stopRecognition() {
if (!recognizing) return;
recognition.stop();
recognizing = false;
document.getElementById('voice-status').textContent = 'Ask a question';
document.getElementById('voice-status').style.color = '#6c757d';
document.getElementById('voice-button').classList.remove('listening');
}
// Toggle recognition
function toggleRecognition() {
if (recognizing) {
stopRecognition();
} else {
startRecognition();
}
}
// Send transcript to Streamlit
function sendTranscript() {
if (finalTranscript.trim() === '') return;
window.parent.postMessage({
type: 'streamlit:setComponentValue',
value: finalTranscript,
dataType: 'str'
}, '*');
// Reset UI
finalTranscript = '';
document.getElementById('transcript').textContent = '';
document.getElementById('transcript-container').style.display = 'none';
document.getElementById('voice-status').textContent = 'Question sent!';
document.getElementById('voice-status').style.color = '#4CAF50';
// Reset to default state after a delay
setTimeout(() => {
document.getElementById('voice-status').textContent = 'Ask a question';
document.getElementById('voice-status').style.color = '#6c757d';
}, 2000);
}
// Initialize after DOM loaded
document.addEventListener('DOMContentLoaded', function() {
// Initialize speech recognition
initSpeechRecognition();
// Add event listeners
document.getElementById('voice-button').addEventListener('click', toggleRecognition);
document.getElementById('send-transcript').addEventListener('click', sendTranscript);
});
</script>
"""
# Generate a unique key for the component
key = "voice_input"
# Initialize session state for voice input if not exists
if key not in st.session_state:
st.session_state[key] = ""
# Render the component
components.html(voice_html, height=250)
# Return the last transcript
return st.session_state.get(key, "")