JUBJAI / app.py
IS361Group4's picture
Update app.py
564fde3 verified
raw
history blame
8.27 kB
import gradio as gr
from transformers import pipeline
import os
import pandas as pd
import numpy as np
import joblib
import spacy
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain.prompts import HumanMessagePromptTemplate, ChatPromptTemplate
from langchain.output_parsers import PydanticOutputParser
from langchain_openai import ChatOpenAI
# Set up models for each app
chat = ChatOpenAI()
classifier = pipeline("sentiment-analysis", model="cardiffnlp/twitter-xlm-roberta-base-sentiment")
asr = pipeline("automatic-speech-recognition", "facebook/wav2vec2-base-960h")
summarizer = pipeline("summarization", model="knkarthick/MEETING_SUMMARY")
fin_model = pipeline("sentiment-analysis", model='yiyanghkust/finbert-tone', tokenizer='yiyanghkust/finbert-tone')
fls_model = pipeline("text-classification", model="demo-org/finbert_fls", tokenizer="demo-org/finbert_fls")
# --- Translator App ---
class TextTranslator(BaseModel):
output: str = Field(description="Python string containing the output text translated in the desired language")
output_parser = PydanticOutputParser(pydantic_object=TextTranslator)
format_instructions = output_parser.get_format_instructions()
def text_translator(input_text : str, language : str) -> str:
human_template = """Enter the text that you want to translate:
{input_text}, and enter the language that you want it to translate to {language}. {format_instructions}"""
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
chat_prompt = ChatPromptTemplate.from_messages([human_message_prompt])
prompt = chat_prompt.format_prompt(input_text = input_text, language = language, format_instructions = format_instructions)
messages = prompt.to_messages()
response = chat(messages = messages)
output = output_parser.parse(response.content)
output_text = output.output
return output_text
# --- Sentiment Analysis App ---
def sentiment_analysis(message, history):
result = classifier(message)
return f"Sentimiento : {result[0]['label']} (Probabilidad: {result[0]['score']:.2f})"
# --- Financial Analyst App ---
nlp = spacy.load('en_core_web_sm')
nlp.add_pipe('sentencizer')
def split_in_sentences(text):
doc = nlp(text)
return [str(sent).strip() for sent in doc.sents]
def make_spans(text, results):
results_list = [results[i]['label'] for i in range(len(results))]
return list(zip(split_in_sentences(text), results_list))
def summarize_text(text):
resp = summarizer(text)
return resp[0]['summary_text']
def text_to_sentiment(text):
sentiment = fin_model(text)[0]["label"]
return sentiment
def fin_ext(text):
results = fin_model(split_in_sentences(text))
return make_spans(text, results)
def fls(text):
results = fls_model(split_in_sentences(text))
return make_spans(text, results)
# --- Customer Churn App ---
script_dir = os.path.dirname(os.path.abspath(__file__))
pipeline_path = os.path.join(script_dir, 'toolkit', 'pipeline.joblib')
model_path = os.path.join(script_dir, 'toolkit', 'Random Forest Classifier.joblib')
pipeline = joblib.load(pipeline_path)
model = joblib.load(model_path)
def calculate_total_charges(tenure, monthly_charges):
return tenure * monthly_charges
def predict(SeniorCitizen, Partner, Dependents, tenure, InternetService, OnlineSecurity, OnlineBackup, DeviceProtection,
TechSupport, StreamingTV, StreamingMovies, Contract, PaperlessBilling, PaymentMethod, MonthlyCharges):
TotalCharges = calculate_total_charges(tenure, MonthlyCharges)
input_df = pd.DataFrame({
'SeniorCitizen': [SeniorCitizen],
'Partner': [Partner],
'Dependents': [Dependents],
'tenure': [tenure],
'InternetService': [InternetService],
'OnlineSecurity': [OnlineSecurity],
'OnlineBackup': [OnlineBackup],
'DeviceProtection': [DeviceProtection],
'TechSupport': [TechSupport],
'StreamingTV': [StreamingTV],
'StreamingMovies': [StreamingMovies],
'Contract': [Contract],
'PaperlessBilling': [PaperlessBilling],
'PaymentMethod': [PaymentMethod],
'MonthlyCharges': [MonthlyCharges],
'TotalCharges': [TotalCharges]
})
X_processed = pipeline.transform(input_df)
cat_cols = [col for col in input_df.columns if input_df[col].dtype == 'object']
num_cols = [col for col in input_df.columns if input_df[col].dtype != 'object']
cat_encoder = pipeline.named_steps['preprocessor'].named_transformers_['cat'].named_steps['onehot']
cat_feature_names = cat_encoder.get_feature_names_out(cat_cols)
feature_names = num_cols + list(cat_feature_names)
final_df = pd.DataFrame(X_processed, columns=feature_names)
final_df = pd.concat([final_df.iloc[:, 3:], final_df.iloc[:, :3]], axis=1)
prediction_probs = model.predict_proba(final_df)[0]
prediction_label = {
"Prediction: CHURN 🔴": prediction_probs[1],
"Prediction: STAY ✅": prediction_probs[0]
}
return prediction_label
# --- Personal Information Detection App ---
import gradio as gr
gr.load("models/iiiorg/piiranha-v1-detect-personal-information").launch()
# --- Gradio Interface ---
with gr.Blocks() as demo:
gr.Markdown("# All-in-One AI Apps")
with gr.Tab("Text Translator"):
gr.HTML("<h1 align='center'>Text Translator</h1>")
text_input = gr.Textbox(label="Enter Text")
language_input = gr.Textbox(label="Enter Language")
translate_btn = gr.Button("Translate")
translated_text = gr.Textbox(label="Translated Text")
translate_btn.click(fn=text_translator, inputs=[text_input, language_input], outputs=translated_text)
with gr.Tab("Sentiment Analysis"):
gr.Markdown("# Sentiment Analysis")
sentiment_input = gr.Textbox(label="Enter Message")
sentiment_output = gr.Textbox(label="Sentiment")
sentiment_btn = gr.Button("Analyze Sentiment")
sentiment_btn.click(fn=sentiment_analysis, inputs=sentiment_input, outputs=sentiment_output)
with gr.Tab("Financial Analyst"):
gr.Markdown("# Financial Analyst AI")
financial_input = gr.Textbox(label="Enter Text for Financial Analysis")
summarize_btn = gr.Button("Summarize")
sentiment_btn = gr.Button("Classify Financial Tone")
financial_output = gr.Textbox(label="Analysis Results")
summarize_btn.click(fn=summarize_text, inputs=financial_input, outputs=financial_output)
sentiment_btn.click(fn=text_to_sentiment, inputs=financial_input, outputs=financial_output)
with gr.Tab("Customer Churn Prediction"):
gr.Markdown("# Customer Churn Prediction")
churn_inputs = [
gr.Radio(['Yes', 'No'], label="Are you a Seniorcitizen?"),
gr.Radio(['Yes', 'No'], label="Do you have a Partner?"),
gr.Radio(['No', 'Yes'], label="Do you have Dependents?"),
gr.Slider(label="Tenure (Months)", minimum=1, maximum=73),
gr.Radio(['DSL', 'Fiber optic', 'No Internet'], label="Internet Service"),
gr.Radio(['No', 'Yes'], label="Online Security"),
gr.Radio(['No', 'Yes'], label="Online Backup"),
gr.Radio(['No', 'Yes'], label="Device Protection"),
gr.Radio(['No', 'Yes'], label="Tech Support"),
gr.Radio(['No', 'Yes'], label="Streaming TV"),
gr.Radio(['No', 'Yes'], label="Streaming Movies"),
gr.Radio(['Month-to-month', 'One year', 'Two year'], label="Contract Type"),
gr.Radio(['Yes', 'No'], label="Paperless Billing"),
gr.Radio(['Electronic check', 'Mailed check', 'Bank transfer (automatic)', 'Credit card (automatic)'], label="Payment Method"),
gr.Slider(label="Monthly Charges", minimum=18.4, maximum=118.65)
]
churn_output = gr.Label(label="Churn Prediction")
churn_btn = gr.Button("Predict Churn")
churn_btn.click(fn=predict, inputs=churn_inputs, outputs=churn_output)
with gr.Tab("Personal Information Detection"):
gr.HTML("<h1 align='center'>Personal Information Detection</h1>")
gr.Interface.load("models/iiiorg/piiranha-v1-detect-personal-information").launch()
demo.launch(share=True)