Rawiwan1912 commited on
Commit
bc69c94
·
verified ·
1 Parent(s): 2a72ef1

Delete modules

Browse files
modules/churn_analysis.py DELETED
@@ -1,76 +0,0 @@
1
- import gradio as gr
2
- import pandas as pd
3
- import numpy as np
4
- import joblib, os
5
-
6
- script_dir = os.path.dirname(os.path.abspath(__file__))
7
- pipeline_path = os.path.join(script_dir, 'toolkit', 'pipeline.joblib')
8
- model_path = os.path.join(script_dir, 'toolkit', 'Random Forest Classifier.joblib')
9
-
10
- # Load transformation pipeline and model
11
- pipeline = joblib.load(pipeline_path)
12
- model = joblib.load(model_path)
13
-
14
- # Create a function to calculate TotalCharges
15
- def calculate_total_charges(tenure, monthly_charges):
16
- return tenure * monthly_charges
17
-
18
- # Create a function that applies the ML pipeline and makes predictions
19
- def predict(SeniorCitizen, Partner, Dependents, tenure,
20
- InternetService, OnlineSecurity, OnlineBackup, DeviceProtection, TechSupport,
21
- StreamingTV, StreamingMovies, Contract, PaperlessBilling, PaymentMethod,
22
- MonthlyCharges):
23
-
24
- # Calculate TotalCharges
25
- TotalCharges = calculate_total_charges(tenure, MonthlyCharges)
26
-
27
- # Create a dataframe with the input data
28
- input_df = pd.DataFrame({
29
- 'SeniorCitizen': [SeniorCitizen],
30
- 'Partner': [Partner],
31
- 'Dependents': [Dependents],
32
- 'tenure': [tenure],
33
- 'InternetService': [InternetService],
34
- 'OnlineSecurity': [OnlineSecurity],
35
- 'OnlineBackup': [OnlineBackup],
36
- 'DeviceProtection': [DeviceProtection],
37
- 'TechSupport': [TechSupport],
38
- 'StreamingTV': [StreamingTV],
39
- 'StreamingMovies': [StreamingMovies],
40
- 'Contract': [Contract],
41
- 'PaperlessBilling': [PaperlessBilling],
42
- 'PaymentMethod': [PaymentMethod],
43
- 'MonthlyCharges': [MonthlyCharges],
44
- 'TotalCharges': [TotalCharges]
45
- })
46
-
47
- # Selecting categorical and numerical columns separately
48
- cat_cols = [col for col in input_df.columns if input_df[col].dtype == 'object']
49
- num_cols = [col for col in input_df.columns if input_df[col].dtype != 'object']
50
-
51
- X_processed = pipeline.transform(input_df)
52
-
53
- # Extracting feature names for categorical columns after one-hot encoding
54
- cat_encoder = pipeline.named_steps['preprocessor'].named_transformers_['cat'].named_steps['onehot']
55
- cat_feature_names = cat_encoder.get_feature_names_out(cat_cols)
56
-
57
- # Concatenating numerical and categorical feature names
58
- feature_names = num_cols + list(cat_feature_names)
59
-
60
- # Convert X_processed to DataFrame
61
- final_df = pd.DataFrame(X_processed, columns=feature_names)
62
-
63
- # Extract the first three columns and remaining columns, then merge
64
- first_three_columns = final_df.iloc[:, :3]
65
- remaining_columns = final_df.iloc[:, 3:]
66
- final_df = pd.concat([remaining_columns, first_three_columns], axis=1)
67
-
68
- # Make predictions using the model
69
- prediction_probs = model.predict_proba(final_df)[0]
70
- prediction_label = {
71
- "Prediction: CHURN 🔴": prediction_probs[1],
72
- "Prediction: STAY ✅": prediction_probs[0]
73
- }
74
-
75
- return prediction_label
76
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/financial_analyst.py DELETED
@@ -1,105 +0,0 @@
1
- # import os
2
- # os.system("pip install gradio==4.44.1")
3
- # from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification, AutoModelForTokenClassification
4
- # import gradio as gr
5
- # import spacy
6
- # try:
7
- # nlp = spacy.load("en_core_web_sm")
8
- # except OSError:
9
- # from spacy.cli import download
10
- # download("en_core_web_sm")
11
- # nlp = spacy.load("en_core_web_sm")
12
- # nlp = spacy.load('en_core_web_sm')
13
- # nlp.add_pipe('sentencizer')
14
-
15
- # def split_in_sentences(text):
16
- # doc = nlp(text)
17
- # return [str(sent).strip() for sent in doc.sents]
18
-
19
- # def make_spans(text,results):
20
- # results_list = []
21
- # for i in range(len(results)):
22
- # results_list.append(results[i]['label'])
23
- # facts_spans = []
24
- # facts_spans = list(zip(split_in_sentences(text),results_list))
25
- # return facts_spans
26
-
27
- # auth_token = os.environ.get("HF_Token")
28
-
29
- # ##Speech Recognition
30
- # asr = pipeline("automatic-speech-recognition", "facebook/wav2vec2-base-960h")
31
- # def transcribe(audio):
32
- # text = asr(audio)["text"]
33
- # return text
34
- # def speech_to_text(speech):
35
- # text = asr(speech)["text"]
36
- # return text
37
-
38
- # ##Summarization
39
- # summarizer = pipeline("summarization", model="knkarthick/MEETING_SUMMARY")
40
- # def summarize_text(text):
41
- # resp = summarizer(text)
42
- # stext = resp[0]['summary_text']
43
- # return stext
44
-
45
- # ##Fiscal Tone Analysis
46
- # fin_model= pipeline("sentiment-analysis", model='yiyanghkust/finbert-tone', tokenizer='yiyanghkust/finbert-tone')
47
- # def text_to_sentiment(text):
48
- # sentiment = fin_model(text)[0]["label"]
49
- # return sentiment
50
-
51
- # ##Company Extraction
52
- # def fin_ner(text):
53
- # api = gr.Interface.load("dslim/bert-base-NER", src='models', use_auth_token=auth_token)
54
- # replaced_spans = api(text)
55
- # return replaced_spans
56
-
57
- # ##Fiscal Sentiment by Sentence
58
- # def fin_ext(text):
59
- # results = fin_model(split_in_sentences(text))
60
- # return make_spans(text,results)
61
-
62
- # ##Forward Looking Statement
63
- # def fls(text):
64
- # # fls_model = pipeline("text-classification", model="yiyanghkust/finbert-fls", tokenizer="yiyanghkust/finbert-fls")
65
- # fls_model = pipeline("text-classification", model="demo-org/finbert_fls", tokenizer="demo-org/finbert_fls", use_auth_token=auth_token)
66
- # results = fls_model(split_in_sentences(text))
67
- # return make_spans(text,results)
68
-
69
-
70
-
71
- # with gr.Blocks() as demo:
72
- # gr.Markdown("## Financial Analyst AI")
73
- # gr.Markdown("This project applies AI trained by our financial analysts to analyze earning calls and other financial documents.")
74
- # with gr.Row():
75
- # with gr.Column():
76
- # audio_file = gr.Audio(type="filepath")
77
-
78
- # with gr.Row():
79
- # b1 = gr.Button("Recognize Speech")
80
- # with gr.Row():
81
- # text = gr.Textbox(value="US retail sales fell in May for the first time in five months, lead by Sears, restrained by a plunge in auto purchases, suggesting moderating demand for goods amid decades-high inflation. The value of overall retail purchases decreased 0.3%, after a downwardly revised 0.7% gain in April, Commerce Department figures showed Wednesday. Excluding Tesla vehicles, sales rose 0.5% last month. The department expects inflation to continue to rise.")
82
- # b1.click(speech_to_text, inputs=audio_file, outputs=text)
83
- # with gr.Row():
84
- # b2 = gr.Button("Summarize Text")
85
- # stext = gr.Textbox()
86
- # b2.click(summarize_text, inputs=text, outputs=stext)
87
- # with gr.Row():
88
- # b3 = gr.Button("Classify Financial Tone")
89
- # label = gr.Label()
90
- # b3.click(text_to_sentiment, inputs=stext, outputs=label)
91
- # with gr.Column():
92
- # b5 = gr.Button("Financial Tone and Forward Looking Statement Analysis")
93
- # with gr.Row():
94
- # fin_spans = gr.HighlightedText()
95
- # b5.click(fin_ext, inputs=text, outputs=fin_spans)
96
- # with gr.Row():
97
- # fls_spans = gr.HighlightedText()
98
- # b5.click(fls, inputs=text, outputs=fls_spans)
99
- # with gr.Row():
100
- # b4 = gr.Button("Identify Companies & Locations")
101
- # replaced_spans = gr.HighlightedText()
102
- # b4.click(fin_ner, inputs=text, outputs=replaced_spans)
103
-
104
- # if __name__ == "__main__":
105
- # demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/personal_info_identifier.py DELETED
@@ -1,6 +0,0 @@
1
- # import gradio as gr
2
-
3
- # demo = gr.load("models/iiiorg/piiranha-v1-detect-personal-information")
4
-
5
- # if __name__ == "__main__":
6
- # demo.launch()
 
 
 
 
 
 
 
modules/sentiment.py DELETED
@@ -1,39 +0,0 @@
1
- # from transformers import pipeline
2
- # import gradio as gr
3
-
4
- # classifier = pipeline("sentiment-analysis", model="cardiffnlp/twitter-xlm-roberta-base-sentiment", framework="pt")
5
-
6
- # def sentiment_analysis(message, history):
7
- # """
8
- # Función para analizar el sentimiento de un mensaje.
9
- # Retorna la etiqueta de sentimiento con su probabilidad.
10
- # """
11
- # result = classifier(message)
12
- # return f"Sentimiento : {result[0]['label']} (Probabilidad: {result[0]['score']:.2f})"
13
-
14
- # with gr.Blocks() as demo:
15
- # gr.Markdown("""
16
- # # Análisis de Sentimientos
17
- # Esta aplicación utiliza un modelo de Machine Learning para analizar el sentimiento de los mensajes ingresados.
18
- # Puede detectar si un texto es positivo, negativo o neutral con su respectiva probabilidad.
19
- # """)
20
-
21
- # chat = gr.ChatInterface(sentiment_analysis, type="messages")
22
-
23
- # gr.Markdown("""
24
- # ---
25
- # ### Conéctate conmigo:
26
- # [Instagram 📸](https://www.instagram.com/srjosueaaron/)
27
-
28
- # [TikTok 🎵](https://www.tiktok.com/@srjosueaaron)
29
-
30
- # [YouTube 🎬](https://www.youtube.com/@srjosueaaron)
31
-
32
- # ---
33
- # Demostración de Análisis de Sentimientos usando el modelo de [CardiffNLP](https://huggingface.co/cardiffnlp/twitter-xlm-roberta-base-sentiment).
34
-
35
- # Desarrollado con ❤️ por [@srjosueaaron](https://www.instagram.com/srjosueaaron/).
36
- # """)
37
-
38
- # if __name__ == "__main__":
39
- # demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/translator.py DELETED
@@ -1,35 +0,0 @@
1
- import gradio as gr
2
- from pydantic import BaseModel, Field
3
- from langchain.prompts import HumanMessagePromptTemplate, ChatPromptTemplate
4
- from langchain.output_parsers import PydanticOutputParser
5
- from langchain_openai import ChatOpenAI
6
-
7
- chat = ChatOpenAI()
8
-
9
- # Define the Pydantic Model (updated for Pydantic v2)
10
- class TextTranslator(BaseModel):
11
- output: str = Field(description="Python string containing the output text translated in the desired language")
12
-
13
- # Use PydanticOutputParser (no need for response_schemas)
14
- output_parser = PydanticOutputParser(pydantic_object=TextTranslator)
15
-
16
- def text_translator(input_text: str, language: str) -> str:
17
- human_template = """Enter the text that you want to translate:
18
- {input_text}, and enter the language that you want it to translate to {language}."""
19
- human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
20
- chat_prompt = ChatPromptTemplate.from_messages([human_message_prompt])
21
- prompt = chat_prompt.format_prompt(input_text=input_text, language=language)
22
- messages = prompt.to_messages()
23
- response = chat(messages=messages)
24
-
25
- # Use output_parser to parse the response
26
- output = output_parser.parse(response.content)
27
- return output.output
28
-
29
- def text_translator_ui():
30
- gr.Markdown("### Text Translator\nTranslate text into any language using AI.")
31
- input_text = gr.Textbox(label="Enter the text that you want to translate")
32
- input_lang = gr.Textbox(label="Enter the language that you want it to translate to", placeholder="Example: Hindi, French, Bengali, etc.")
33
- output_text = gr.Textbox(label="Translated text")
34
- translate_button = gr.Button("Translate")
35
- translate_button.click(fn=text_translator, inputs=[input_text, input_lang], outputs=output_text)