import requests import gradio as gr import os import torch # Check if CUDA is available and set the device accordingly device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') API_URL = "https://api-inference.huggingface.co/models/MIT/ast-finetuned-audioset-10-10-0.4593" headers = {"Authorization": f"Bearer {os.environ.get('HF_TOKEN')}"} def classify_audio(audio_file): """ Classify the uploaded audio file using Hugging Face AST model """ if audio_file is None: return "Please upload an audio file." try: with open(audio_file.name, "rb") as f: data = f.read() response = requests.post(API_URL, headers=headers, data=data) if response.status_code == 200: results = response.json() return results else: return f"Error: API returned status code {response.status_code}" except Exception as e: return f"Error processing audio: {str(e)}" # Create Gradio interface iface = gr.Interface( fn=classify_audio, inputs=gr.Audio(type="filepath", label="Upload Audio File"), outputs=gr.JSON(label="Classification Results"), title="Audio Classification using AST Model", description="Upload an audio file to get its classification results using the Audio Spectrogram Transformer model.", examples=[], ) # Launch the interface if __name__ == "__main__": iface.launch(server_name="0.0.0.0", server_port=7860)