Athspi commited on
Commit
6dde081
·
verified ·
1 Parent(s): 26cfa8b

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +232 -0
app.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import uuid
3
+ import time
4
+ from pathlib import Path
5
+ import io
6
+ import logging
7
+
8
+ import torch
9
+ from transformers import pipeline
10
+ import soundfile as sf
11
+ import numpy as np
12
+ from fastapi import FastAPI, HTTPException, Body, BackgroundTasks
13
+ from fastapi.responses import StreamingResponse # To send binary audio data
14
+ from pydantic import BaseModel
15
+
16
+ # --- Configuration ---
17
+ # Choose a TTS model from the Hugging Face Hub
18
+ MODEL_NAME = "espnet/kan-bayashi_ljspeech_vits" # Example model
19
+ # MODEL_NAME = "suno/bark-small"
20
+
21
+ # Directories
22
+ BASE_DIR = Path(__file__).parent
23
+ TEMP_AUDIO_DIR = BASE_DIR / "temp_audio" # For temporary storage before sending
24
+
25
+ # Ensure temporary audio directory exists
26
+ TEMP_AUDIO_DIR.mkdir(parents=True, exist_ok=True)
27
+
28
+ # Configure Logging
29
+ logging.basicConfig(level=logging.INFO)
30
+ logger = logging.getLogger(__name__)
31
+
32
+ # --- Pydantic Model for Request Body ---
33
+ class TTSRequest(BaseModel):
34
+ text: str
35
+
36
+ # --- Load TTS Model (Load on startup) ---
37
+ logger.info("Attempting to load TTS model...")
38
+ start_load_time = time.time()
39
+ tts_pipeline = None
40
+ try:
41
+ # Use GPU if available
42
+ if torch.cuda.is_available():
43
+ device = "cuda"
44
+ # Check for MPS (Apple Silicon) support if not CUDA
45
+ elif torch.backends.mps.is_available():
46
+ device = "mps"
47
+ else:
48
+ device = "cpu"
49
+
50
+ logger.info(f"Using device: {device}")
51
+ tts_pipeline = pipeline("text-to-speech", model=MODEL_NAME, device=device)
52
+ logger.info(f"Model '{MODEL_NAME}' loaded successfully in {time.time() - start_load_time:.2f} seconds.")
53
+ except Exception as e:
54
+ logger.error(f"FATAL: Could not load TTS model '{MODEL_NAME}'. Error: {e}", exc_info=True)
55
+ # The application can still run, but the /api/tts endpoint will fail until the model is loaded/fixed.
56
+
57
+ # --- Initialize FastAPI App ---
58
+ app = FastAPI(
59
+ title="Text-to-Speech API Service",
60
+ description=f"Provides a text-to-speech endpoint using the {MODEL_NAME} model. Send text, receive WAV audio.",
61
+ version="1.0.0"
62
+ )
63
+
64
+ # --- Background Task for Cleanup ---
65
+ def cleanup_temp_file(filepath: Path):
66
+ """Removes a file in the background."""
67
+ try:
68
+ if filepath.exists():
69
+ os.remove(filepath)
70
+ logger.info(f"Cleaned up temp file: {filepath.name}")
71
+ except OSError as e:
72
+ logger.error(f"Error deleting temp file {filepath.name}: {e}")
73
+
74
+ # --- API Endpoint for Text-to-Speech ---
75
+ @app.post(
76
+ "/api/tts",
77
+ tags=["TTS"],
78
+ summary="Generate Speech from Text",
79
+ description="""Send a JSON object with a "text" field.
80
+ Returns the generated speech as a WAV audio file stream.""",
81
+ responses={
82
+ 200: {
83
+ "content": {"audio/wav": {}},
84
+ "description": "Successful response returning the WAV audio stream.",
85
+ },
86
+ 400: {"description": "Bad Request (e.g., empty text)"},
87
+ 500: {"description": "Internal Server Error (e.g., model error)"},
88
+ 503: {"description": "Service Unavailable (e.g., model not loaded)"},
89
+ },
90
+ )
91
+ async def generate_speech_api(
92
+ background_tasks: BackgroundTasks,
93
+ tts_request: TTSRequest = Body(...)
94
+ ):
95
+ """
96
+ Receives text via POST request and returns the generated WAV audio directly.
97
+ """
98
+ if tts_pipeline is None:
99
+ raise HTTPException(status_code=503, detail="TTS Model is not available or failed to load.")
100
+
101
+ text = tts_request.text
102
+ if not text or not text.strip():
103
+ raise HTTPException(status_code=400, detail="Input text cannot be empty.")
104
+
105
+ logger.info(f"Received API request to synthesize: '{text[:50]}...'") # Log truncated text
106
+ start_synth_time = time.time()
107
+
108
+ try:
109
+ # --- Generate Audio ---
110
+ with torch.no_grad(): # Good practice for inference
111
+ output = tts_pipeline(text)
112
+
113
+ audio_data = output.get("audio")
114
+ sampling_rate = output.get("sampling_rate")
115
+
116
+ if audio_data is None or sampling_rate is None:
117
+ logger.error("TTS pipeline output missing 'audio' or 'sampling_rate'.")
118
+ raise ValueError("Invalid output from TTS pipeline.")
119
+
120
+ # Ensure NumPy array
121
+ if isinstance(audio_data, torch.Tensor):
122
+ # Ensure it's on CPU before converting to numpy
123
+ audio_data = audio_data.cpu().numpy()
124
+ if not isinstance(audio_data, np.ndarray):
125
+ logger.error(f"Unexpected audio data type: {type(audio_data)}")
126
+ raise TypeError(f"Expected audio data as NumPy array, got {type(audio_data)}")
127
+
128
+ # Normalize if float and outside [-1, 1] range (important for WAV)
129
+ if np.issubdtype(audio_data.dtype, np.floating):
130
+ max_val = np.max(np.abs(audio_data))
131
+ if max_val > 1.0:
132
+ audio_data = audio_data / max_val
133
+ # Convert to 16-bit integer format for standard WAV
134
+ audio_data = (audio_data * 32767).astype(np.int16)
135
+ elif not np.issubdtype(audio_data.dtype, np.integer):
136
+ logger.warning(f"Audio data is not float or int: {audio_data.dtype}. Attempting conversion to int16.")
137
+ # Attempt conversion if possible, might need adjustment based on model output
138
+ audio_data = audio_data.astype(np.int16)
139
+
140
+
141
+ synthesis_time = time.time() - start_synth_time
142
+ logger.info(f"Audio generated in {synthesis_time:.2f} seconds.")
143
+
144
+ # --- Prepare Audio for Streaming ---
145
+ # Method 1: Save to temp file and stream it (often safer for large files)
146
+ filename = f"speech_{uuid.uuid4()}.wav"
147
+ filepath = TEMP_AUDIO_DIR / filename
148
+ sf.write(filepath, audio_data, sampling_rate, subtype='PCM_16') # Save as standard 16-bit WAV
149
+ logger.info(f"Temporary audio saved to: {filepath.name}")
150
+
151
+ # Schedule the cleanup task to run after the response is sent
152
+ background_tasks.add_task(cleanup_temp_file, filepath)
153
+
154
+ # Return the file directly as a streaming response
155
+ return FileResponse(
156
+ path=filepath,
157
+ media_type="audio/wav",
158
+ filename=filename # Suggests a filename to the client
159
+ )
160
+
161
+ # # Method 2: Stream directly from memory buffer (avoids disk I/O)
162
+ # buffer = io.BytesIO()
163
+ # sf.write(buffer, audio_data, sampling_rate, format='WAV', subtype='PCM_16')
164
+ # buffer.seek(0) # Reset buffer position to the beginning
165
+ # logger.info("Audio prepared in memory buffer.")
166
+ # return StreamingResponse(buffer, media_type="audio/wav")
167
+
168
+ except Exception as e:
169
+ logger.error(f"Error during speech generation or streaming: {e}", exc_info=True)
170
+ # Cleanup temp file if it was created before an error occurred during streaming prep
171
+ if 'filepath' in locals() and filepath.exists():
172
+ logger.info(f"Cleaning up temp file due to error: {filepath.name}")
173
+ os.remove(filepath)
174
+ raise HTTPException(status_code=500, detail=f"Failed to process speech request. Error: {str(e)}")
175
+
176
+
177
+ # --- Health Check Endpoint (Good Practice) ---
178
+ @app.get("/health", tags=["System"], summary="Check API Health")
179
+ async def health_check():
180
+ """
181
+ Simple health check endpoint. Checks if the TTS model is loaded.
182
+ """
183
+ if tts_pipeline is None:
184
+ return {"status": "unhealthy", "reason": "TTS model is not loaded or failed to load."}
185
+ # Can add more checks here (e.g., disk space, dependencies)
186
+ return {"status": "ok", "model_loaded": MODEL_NAME}
187
+
188
+ # --- Root Endpoint (Optional Information) ---
189
+ @app.get("/", tags=["System"], summary="API Information")
190
+ async def read_root():
191
+ """
192
+ Provides basic information about the API.
193
+ """
194
+ return {
195
+ "message": "Welcome to the Text-to-Speech API Service!",
196
+ "model_used": MODEL_NAME,
197
+ "tts_endpoint": "/api/tts",
198
+ "health_endpoint": "/health",
199
+ "documentation": "/docs" # Link to FastAPI auto-generated docs
200
+ }
201
+
202
+ # --- Optional: Add cleanup for *old* files on startup (if using FileResponse) ---
203
+ def cleanup_old_audio_files(max_age_seconds: int = 3600): # Clean files older than 1 hour
204
+ now = time.time()
205
+ count = 0
206
+ try:
207
+ for filename in os.listdir(TEMP_AUDIO_DIR):
208
+ filepath = TEMP_AUDIO_DIR / filename
209
+ if filepath.is_file() and filename.startswith("speech_") and filename.endswith(".wav"):
210
+ try:
211
+ file_mod_time = os.path.getmtime(filepath)
212
+ if (now - file_mod_time) > max_age_seconds:
213
+ os.remove(filepath)
214
+ logger.info(f"Startup cleanup: Removed old temp file {filename}")
215
+ count += 1
216
+ except OSError as e:
217
+ logger.warning(f"Startup cleanup: Error removing file {filename}: {e}")
218
+ if count > 0:
219
+ logger.info(f"Startup cleanup: Removed {count} old audio files.")
220
+ except Exception as e:
221
+ logger.error(f"Startup cleanup: Error during old file cleanup: {e}")
222
+
223
+ # Run cleanup on startup
224
+ cleanup_old_audio_files()
225
+
226
+ # --- How to Run Locally (for testing) ---
227
+ # if __name__ == "__main__":
228
+ # import uvicorn
229
+ # # Ensure temp_audio exists before starting
230
+ # TEMP_AUDIO_DIR.mkdir(parents=True, exist_ok=True)
231
+ # cleanup_old_audio_files() # Run cleanup before starting server
232
+ # uvicorn.run("app:app", host="127.0.0.1", port=8000, reload=True) # Use reload=False for production testing