mdik1's picture
Update app.py
a554d99 verified
import os
from crewai import Agent, Task, Crew
from langchain_groq import ChatGroq
import streamlit as st
from PIL import Image
import base64
from io import BytesIO
import pandas as pd # Import pandas for handling data in tabular format
# Initialize the LLM for the Doctor Assistant
llm = ChatGroq(
groq_api_key="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",
model_name="llama3-70b-8192", # Replace with the actual model name
)
# Define the Doctor Assistant with a diagnostic goal
doctor_assistant = Agent(
role='Doctor Assistant',
goal='Collect detailed health information dynamically through a series of questions based on user responses.',
backstory=(
"You are a virtual doctor assistant who asks diagnostic questions based on user responses. "
"Your role is to gather health information before the user’s doctor consultation, adapting your questions as needed."
),
verbose=True,
llm=llm,
)
# Function to process user response and generate the next question
def get_next_question(response):
# Define the task for generating the next question based on user response
task_description = f"Generate the next diagnostic question based on the user's response: '{response}'"
# Set up the task for the assistant to generate a follow-up question
follow_up_task = Task(
description=task_description,
agent=doctor_assistant,
human_input=False,
expected_output="A contextually relevant follow-up question based on user response" # Placeholder for expected output
)
# Instantiate the crew and execute the task to get the next question
crew = Crew(
agents=[doctor_assistant],
tasks=[follow_up_task],
verbose=2,
)
result = crew.kickoff()
return result
# Load the image from the specified path
image_path = "./image-removebg-preview (2).png" # Adjust path if necessary
image = Image.open(image_path)
image = image.resize((300, 300))
# Convert image to base64 and display it
buffered = BytesIO()
image.save(buffered, format="PNG")
img_str = base64.b64encode(buffered.getvalue()).decode()
st.markdown("<h1 style='text-align: center;'>Doctor Assistant Chatbot</h1>", unsafe_allow_html=True)
st.markdown(f"<div style='text-align: center;'><img src='data:image/png;base64,{img_str}' width='300' height='300'/></div>", unsafe_allow_html=True)
# Initialize session states for storing conversation history and user details
if "conversation" not in st.session_state:
st.session_state.conversation = []
if "user_details" not in st.session_state:
st.session_state.user_details = {}
# Display the conversation history
for turn in st.session_state.conversation:
role, content = turn
with st.chat_message(role):
st.markdown(content)
import pandas as pd
# Function to generate a concise report
def generate_report():
# Prepare patient detail data with key information only
patient_details = {
"Patient Name": st.session_state.user_details.get("name", 'N/A'),
"Patient Age": st.session_state.user_details.get("age", 'N/A'),
"Patient Gender": st.session_state.user_details.get("gender", 'N/A'),
"Patient Phone Number": st.session_state.user_details.get("phone_number", 'N/A'), # Assuming you have the phone number
}
# Create a DataFrame for patient details
details_df = pd.DataFrame.from_dict(patient_details, orient='index', columns=['Value'])
# Prepare keywords and summary of symptoms
symptoms_summary = []
for turn in st.session_state.conversation:
role, content = turn
if role == "user":
# Extract main keywords from user responses
symptoms_summary.append(content)
# Select only unique symptoms and key information
unique_symptoms = list(set(symptoms_summary))
# Prepare symptom keywords for display
symptoms_df = pd.DataFrame(unique_symptoms, columns=["Main Symptoms"])
# Display the report
report = f"""
## Patient Report
"""
return details_df, symptoms_df
# Initial input for user details
if not st.session_state.user_details:
name = st.text_input("Please enter your name:")
age = st.text_input("Please enter your age:")
gender = st.selectbox("Please select your gender:", ["Male", "Female", "Other"])
# Store user details in session state
if st.button("Submit Details"):
if name and age and gender:
st.session_state.user_details = {"name": name, "age": age, "gender": gender}
initial_question = "Thank you! Now, could you tell me what symptoms you're experiencing?"
st.session_state.conversation.append(("assistant", initial_question))
with st.chat_message("assistant"):
st.markdown(initial_question)
else:
st.warning("Please fill out all fields.")
# Check for user's response and generate the next question
if user_response := st.chat_input("Your response:"):
# Check for the end conversation keyword
if user_response.lower() in ["finish", "done", "end"]:
st.session_state.conversation.append(("user", user_response))
with st.chat_message("user"):
st.markdown(user_response)
# Generate and display the final report
report_df = generate_report()
st.table(report_df) # Display the report in table format
st.markdown("Thank you for your responses! The consultation has ended. Take care!", unsafe_allow_html=True)
st.stop() # Stop the app from proceeding further
# Append the user's response to the conversation
st.session_state.conversation.append(("user", user_response))
# Display the user's response immediately
with st.chat_message("user"):
st.markdown(user_response)
# Generate the next question based on the user's response
with st.spinner("Processing..."):
next_question = get_next_question(user_response)
# Append the assistant's next question to the conversation
st.session_state.conversation.append(("assistant", next_question))
# Display the assistant's response
with st.chat_message("assistant"):
st.markdown(next_question)
if st.button("Generate Report"):
details_df, symptoms_df = generate_report()
# Display patient details
st.markdown("### Patient Details")
st.table(details_df)
st.markdown("### Main Symptoms")
st.table(symptoms_df)
# Add an end line
st.markdown("---")