bhagwandas commited on
Commit
ff73cbe
Β·
verified Β·
1 Parent(s): b365915

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -18
app.py CHANGED
@@ -1,4 +1,4 @@
1
- # app.py - FactoryRAG+: Condition Monitoring with Dashboard, PDF Export, Anomaly Detection & Digital Twin
2
 
3
  import streamlit as st
4
  import pandas as pd
@@ -11,7 +11,6 @@ import base64
11
  from io import BytesIO
12
  from fpdf import FPDF
13
 
14
- # Streamlit config
15
  st.set_page_config(page_title="FactoryRAG+ - Smart Sensor Twin", layout="wide")
16
  st.title("🏭 FactoryRAG+: Smart Dashboard with AI Monitoring, PDF Reporting & Digital Twin")
17
 
@@ -62,23 +61,9 @@ if uploaded_file:
62
  iso = IsolationForest(contamination=0.02)
63
  anomaly_labels = iso.fit_predict(df[numeric_cols])
64
  df['anomaly'] = ['❌' if x == -1 else '' for x in anomaly_labels]
65
- st.write("Anomaly Flags:")
66
  st.dataframe(df[df['anomaly'] == '❌'].head(5))
67
 
68
- # --- Technical Question Answering ---
69
- st.subheader("🧠 Ask Expert Questions")
70
- query = st.text_input("Ask a question like 'Where is instability?' or 'Are anomalies visible?'")
71
- if query:
72
- query_vec = EMBED_MODEL.encode([query])[0]
73
- sims = np.dot(st.session_state.embeddings, query_vec)
74
- top_idxs = np.argsort(sims)[-3:][::-1]
75
- context = "\n".join([st.session_state.chunks[i] for i in top_idxs])
76
- prompt = f"Context:\n{context}\n\nAs a reliability engineer, answer: {query}"
77
- response = GEN_MODEL(prompt, max_length=256)[0]['generated_text']
78
- st.subheader("πŸ€– FactoryGPT Answer")
79
- st.markdown(response)
80
-
81
- # --- Digital Twin Metrics ---
82
  st.subheader("πŸ§ͺ Digital Twin Summary")
83
  twin_report = ""
84
  for col in selected_cols:
@@ -99,5 +84,36 @@ if uploaded_file:
99
  href = f'<a href="data:application/octet-stream;base64,{b64}" download="digital_twin_report.pdf">πŸ“„ Download PDF Report</a>'
100
  st.markdown(href, unsafe_allow_html=True)
101
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
  else:
103
- st.info("πŸ‘ˆ Upload a sensor log CSV file to explore digital twin analysis, waveform charts, anomaly detection, and PDF export.")
 
1
+ # app.py - FactoryRAG+: Smart Chatbot with Role-Based Assistant, Dashboard, PDF, and Digital Twin
2
 
3
  import streamlit as st
4
  import pandas as pd
 
11
  from io import BytesIO
12
  from fpdf import FPDF
13
 
 
14
  st.set_page_config(page_title="FactoryRAG+ - Smart Sensor Twin", layout="wide")
15
  st.title("🏭 FactoryRAG+: Smart Dashboard with AI Monitoring, PDF Reporting & Digital Twin")
16
 
 
61
  iso = IsolationForest(contamination=0.02)
62
  anomaly_labels = iso.fit_predict(df[numeric_cols])
63
  df['anomaly'] = ['❌' if x == -1 else '' for x in anomaly_labels]
 
64
  st.dataframe(df[df['anomaly'] == '❌'].head(5))
65
 
66
+ # --- Digital Twin Summary ---
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  st.subheader("πŸ§ͺ Digital Twin Summary")
68
  twin_report = ""
69
  for col in selected_cols:
 
84
  href = f'<a href="data:application/octet-stream;base64,{b64}" download="digital_twin_report.pdf">πŸ“„ Download PDF Report</a>'
85
  st.markdown(href, unsafe_allow_html=True)
86
 
87
+ # --- Role-based Factory Assistant ---
88
+ st.subheader("πŸ’¬ Factory Assistant Chat")
89
+
90
+ roles = {
91
+ "Operator": "You are a machine operator. Provide practical insights and safety warnings.",
92
+ "Maintenance": "You are a maintenance technician. Suggest inspections and likely causes of sensor anomalies.",
93
+ "Engineer": "You are a control systems engineer. Offer analytical interpretations and system-level advice."
94
+ }
95
+
96
+ role = st.selectbox("πŸ‘€ Choose your role: Operator, Maintenance, or Engineer", list(roles.keys()))
97
+
98
+ if 'chat_history' not in st.session_state:
99
+ st.session_state.chat_history = []
100
+
101
+ user_input = st.text_input("Ask FactoryGPT anything (based on uploaded sensor logs):", key="chat_input")
102
+
103
+ if user_input:
104
+ query_vec = EMBED_MODEL.encode([user_input])[0]
105
+ sims = np.dot(st.session_state.embeddings, query_vec)
106
+ top_idxs = np.argsort(sims)[-3:][::-1]
107
+ context = "\n".join([st.session_state.chunks[i] for i in top_idxs])
108
+ system_prompt = roles[role]
109
+ full_prompt = f"{system_prompt}\n\nSensor Context:\n{context}\n\nUser Question: {user_input}"
110
+ reply = GEN_MODEL(full_prompt, max_length=256)[0]['generated_text']
111
+
112
+ st.session_state.chat_history.append(("You", user_input))
113
+ st.session_state.chat_history.append((f"{role} - FactoryGPT", reply))
114
+
115
+ for speaker, msg in st.session_state.chat_history[-10:]:
116
+ st.markdown(f"**{speaker}:** {msg}")
117
+
118
  else:
119
+ st.info("πŸ‘ˆ Upload a sensor log CSV file to explore digital twin analysis, chatbot Q&A, waveform charts, anomaly detection, and PDF export.")