|
import os |
|
from crewai import Agent, Task, Crew |
|
from langchain_groq import ChatGroq |
|
import streamlit as st |
|
from PIL import Image |
|
|
|
|
|
llm = ChatGroq( |
|
groq_api_key="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", |
|
model_name="llama3-70b-8192", |
|
) |
|
|
|
|
|
einstein_agent = Agent( |
|
role='Einstein Agent', |
|
goal='Provide in-depth answers and insights on various topics to help with research questions.', |
|
backstory=( |
|
"You are an Einstein Agent, skilled in gathering and synthesizing information across domains. Mainly in Physics. " |
|
"Your role is to answer questions with a detailed and analytical approach." |
|
), |
|
verbose=True, |
|
llm=llm, |
|
) |
|
|
|
def process_question_with_agent(question): |
|
|
|
task_description = f"Research and provide a detailed answer to the question: '{question}'" |
|
|
|
|
|
research_task = Task( |
|
description=task_description, |
|
agent=einstein_agent, |
|
human_input=False, |
|
expected_output="According to user need response to the question" |
|
) |
|
|
|
|
|
crew = Crew( |
|
agents=[einstein_agent], |
|
tasks=[research_task], |
|
verbose=2, |
|
) |
|
|
|
|
|
result = crew.kickoff() |
|
|
|
return result |
|
|
|
|
|
image_path = "./image-removebg-preview (1).png" |
|
image = Image.open(image_path) |
|
|
|
|
|
image = image.resize((300, 300)) |
|
|
|
|
|
st.markdown("<h1 style='text-align: center;'>Einstein Researcher Chatbot</h1>", unsafe_allow_html=True) |
|
|
|
|
|
import base64 |
|
from io import BytesIO |
|
|
|
buffered = BytesIO() |
|
image.save(buffered, format="PNG") |
|
img_str = base64.b64encode(buffered.getvalue()).decode() |
|
|
|
|
|
st.markdown(f"<div style='text-align: center;'><img src='data:image/png;base64,{img_str}' width='300' height='300'/></div>", unsafe_allow_html=True) |
|
|
|
|
|
if "messages" not in st.session_state: |
|
st.session_state.messages = [] |
|
|
|
|
|
for message in st.session_state.messages: |
|
with st.chat_message(message["role"]): |
|
st.markdown(message["content"]) |
|
|
|
|
|
if prompt := st.chat_input("Ask a research question:"): |
|
|
|
st.chat_message("user").markdown(prompt) |
|
|
|
st.session_state.messages.append({"role": "user", "content": prompt}) |
|
|
|
|
|
with st.spinner("Processing..."): |
|
response = process_question_with_agent(prompt) |
|
|
|
|
|
with st.chat_message("assistant"): |
|
st.markdown(response) |
|
|
|
|
|
st.session_state.messages.append({"role": "assistant", "content": response}) |
|
|