FGPT / app.py
bhagwandas's picture
Update app.py
6141da1 verified
raw
history blame
2.1 kB
import streamlit as st
import pandas as pd
import numpy as np
from sentence_transformers import SentenceTransformer
from transformers import pipeline
# Page setup
st.set_page_config(page_title="FactoryRAG - Upload Logs", layout="wide")
st.title("🏭 FactoryRAG: Human-Centric AI for Sensor Log Analysis")
# Load models
EMBED_MODEL = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
GEN_MODEL = pipeline('text2text-generation', model='google/flan-t5-base')
# File uploader
uploaded_file = st.sidebar.file_uploader("πŸ“‚ Upload your sensor CSV log file", type=["csv"])
if uploaded_file:
df = pd.read_csv(uploaded_file)
st.success("βœ… File uploaded and loaded!")
st.write("πŸ“Š Sensor Data Snapshot:", df.head())
# Convert to chunks
def convert_to_chunks(df):
chunks = []
for idx, row in df.iterrows():
sentence = f"Log entry {idx}: " + ", ".join([f"{col}: {row[col]:.2f}" for col in df.columns])
chunks.append(sentence)
return chunks
if 'chunks' not in st.session_state:
st.session_state.chunks = convert_to_chunks(df)
st.session_state.embeddings = EMBED_MODEL.encode(st.session_state.chunks)
# User query
query = st.text_input("πŸ” Ask something about the sensor logs:")
if query:
query_vec = EMBED_MODEL.encode([query])[0]
scores = np.dot(st.session_state.embeddings, query_vec)
top_idxs = np.argsort(scores)[-3:][::-1]
context = "\n".join([st.session_state.chunks[i] for i in top_idxs])
prompt = f"Answer based on the following logs:\n{context}\n\nQuestion: {query}"
response = GEN_MODEL(prompt, max_length=256)[0]['generated_text']
st.subheader("πŸ€– FactoryGPT Answer")
st.write(response)
st.markdown("### πŸ§‘β€πŸ­ Human Feedback")
st.radio("Is this answer acceptable?", ["Approve", "Correct", "Escalate"], horizontal=True)
with st.expander("πŸ“„ Retrieved Log Context"):
st.code(context)
else:
st.info("πŸ‘ˆ Please upload your sensor log file (CSV) to begin.")