Spaces:
Runtime error
Runtime error
import gradio as gr | |
from transformers import pipeline | |
# Load models | |
# Sentiment Analysis | |
classifier_sentiment = pipeline("sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english") | |
def analyze_sentiment(text): | |
result = classifier_sentiment(text)[0] | |
label = result['label'] | |
score = result['score'] | |
return f"Label: {label}, Score: {score:.2f}" | |
# Translation | |
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-en-fr") | |
def translate_text(text): | |
result = translator(text)[0] | |
translated_text = result["translation_text"] | |
return translated_text | |
# Image Classification | |
classifier_image = pipeline("image-classification", model="google/mobilenet_v2_1.0_224") | |
def classify_image(image): | |
results = classifier_image(image) | |
output = "" | |
for result in results: | |
output += f"{result['label']}: {result['score']:.2f}\n" | |
return output | |
# Speech to Text | |
speech_to_text = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h") | |
def transcribe_audio(audio): | |
text = speech_to_text(audio)["text"] | |
return text | |
# Text Summarization | |
summarizer = pipeline("summarization", model="facebook/bart-large-cnn") | |
def summarize_text(text): | |
summary = summarizer(text, max_length=130, min_length=30, do_sample=False)[0]["summary_text"] | |
return summary | |
# Define custom CSS styles | |
css = """ | |
<style> | |
body { | |
background-color: #e9ecef; | |
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; | |
} | |
.gradio-container { | |
border-radius: 15px; | |
box-shadow: 0 4px 20px rgba(0, 0, 0, 0.15); | |
padding: 20px; | |
background-color: #74748a; | |
max-width: 800px; | |
margin: auto; | |
} | |
h1 { | |
color: black; | |
} | |
</style> | |
""" | |
with gr.Blocks(css=css) as demo: | |
gr.Markdown("<h1 style='text-align: center;'>Multi-functional AI Demo</h1>") | |
with gr.Tab("Sentiment Analysis😣"): | |
text_input = gr.Textbox(placeholder="Enter text here...") | |
text_output = gr.Textbox() | |
sentiment_button = gr.Button("Analyze") | |
sentiment_button.click(analyze_sentiment, inputs=text_input, outputs=text_output) | |
with gr.Tab("Translation📚"): | |
text_input_trans = gr.Textbox(placeholder="Enter English text here...") | |
text_output_trans = gr.Textbox() | |
trans_button = gr.Button("Translate") | |
trans_button.click(translate_text, inputs=text_input_trans, outputs=text_output_trans) | |
with gr.Tab("Image Classification🔮"): | |
image_input = gr.Image(type="pil") | |
image_output = gr.Textbox() | |
image_button = gr.Button("Classify") | |
image_button.click(classify_image, inputs=image_input, outputs=image_output) | |
with gr.Tab("Speech to Text🔊"): | |
audio_input = gr.Audio(sources=["microphone"], type="filepath") | |
audio_output = gr.Textbox() | |
audio_button = gr.Button("Transcribe") | |
audio_button.click(transcribe_audio, inputs=audio_input, outputs=audio_output) | |
with gr.Tab("Text Summarization📑"): | |
text_input_summ = gr.Textbox(placeholder="Enter text here...") | |
text_output_summ = gr.Textbox() | |
summ_button = gr.Button("Summarize") | |
summ_button.click(summarize_text, inputs=text_input_summ, outputs=text_output_summ) | |
demo.launch() |