vikramjeetthakur commited on
Commit
cadfb98
·
verified ·
1 Parent(s): 61677ed

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +102 -0
app.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pandas as pd
3
+ from utils.data_loader import load_data
4
+ from utils.model_loader import evaluate_model, get_test_predictions
5
+ from visualizations.plot_functions import plot_metrics
6
+ import matplotlib.pyplot as plt
7
+ import seaborn as sns
8
+ from sklearn.metrics import confusion_matrix, roc_curve, auc
9
+
10
+ def load_css():
11
+ with open("static/style.css") as f:
12
+ st.markdown(f"<style>{f.read()}</style>", unsafe_allow_html=True)
13
+
14
+ def load_js():
15
+ with open("static/script.js") as f:
16
+ st.markdown(f"<script>{f.read()}</script>", unsafe_allow_html=True)
17
+
18
+ def main():
19
+ st.set_page_config(page_title="ML Dashboard", layout="wide")
20
+ load_css()
21
+ load_js()
22
+
23
+ # Sidebar for filters or model selection
24
+ st.sidebar.title("Options")
25
+ selected_model = st.sidebar.selectbox("Select Model", ["Logistic Regression", "Decision Tree", "Random Forest", "Gradient Boosting", "SVM"])
26
+
27
+ # Main sections
28
+ st.title("ML Dashboard")
29
+
30
+ # Dataset display
31
+ st.header("Dataset")
32
+ df = load_data()
33
+ st.dataframe(df.head())
34
+
35
+ # Model evaluation metrics
36
+ st.header("Model Evaluation")
37
+ metrics = evaluate_model(selected_model)
38
+
39
+ # Tabs for better organization
40
+ tab1, tab2, tab3 = st.tabs(["Metrics Table", "Bar Plot", "Confusion Matrix"])
41
+
42
+ # Load results from the notebook
43
+ @st.cache_data
44
+ def load_results():
45
+ # Simulate loading results from the notebook
46
+ results = {
47
+ 'Model': ['Logistic Regression', 'Decision Tree', 'Random Forest', 'Gradient Boosting', 'SVM'],
48
+ 'Accuracy': [0.85, 0.83, 0.87, 0.88, 0.84],
49
+ 'Precision': [0.82, 0.80, 0.86, 0.87, 0.81],
50
+ 'Recall': [0.78, 0.76, 0.84, 0.85, 0.79],
51
+ 'F1 Score': [0.80, 0.78, 0.85, 0.86, 0.80]
52
+ }
53
+ return pd.DataFrame(results)
54
+
55
+ # Display results in the app
56
+ st.header("Model Results from Notebook")
57
+ results_df = load_results()
58
+ st.dataframe(results_df)
59
+
60
+ # Update Metrics Table to match the style of Model Results from Notebook
61
+ with tab1:
62
+ st.write("### Metrics Table")
63
+ st.dataframe(results_df) # Use the same dataframe display style as the notebook results
64
+
65
+ # Update Bar Plot to use notebook results
66
+ with tab2:
67
+ st.write("### F1 Score Comparison")
68
+ fig, ax = plt.subplots()
69
+ sns.barplot(data=results_df, x='Model', y='F1 Score', ax=ax, palette="viridis")
70
+ ax.set_title("F1 Score by Model")
71
+ ax.set_ylabel("F1 Score")
72
+ ax.set_xlabel("Model")
73
+ st.pyplot(fig)
74
+
75
+ # Update Confusion Matrix to use notebook results
76
+ with tab3:
77
+ st.write("### Confusion Matrix")
78
+ # Simulate confusion matrix data
79
+ cm = [[50, 10], [5, 35]] # Example data
80
+ fig, ax = plt.subplots()
81
+ sns.heatmap(cm, annot=True, fmt="d", cmap="Blues", ax=ax)
82
+ ax.set_title("Confusion Matrix")
83
+ ax.set_xlabel("Predicted")
84
+ ax.set_ylabel("Actual")
85
+ st.pyplot(fig)
86
+
87
+ # Optional: ROC Curve
88
+ if st.sidebar.checkbox("Show ROC Curve"):
89
+ st.write("### ROC Curve")
90
+ fpr, tpr, _ = roc_curve(y_test, y_pred)
91
+ roc_auc = auc(fpr, tpr)
92
+ fig, ax = plt.subplots()
93
+ ax.plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
94
+ ax.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
95
+ ax.set_title("Receiver Operating Characteristic")
96
+ ax.set_xlabel("False Positive Rate")
97
+ ax.set_ylabel("True Positive Rate")
98
+ ax.legend(loc="lower right")
99
+ st.pyplot(fig)
100
+
101
+ if __name__ == "__main__":
102
+ main()