mdik1 commited on
Commit
199c651
·
verified ·
1 Parent(s): 9de9352

Upload 4 files

Browse files
Files changed (4) hide show
  1. README.md +12 -12
  2. app.py +95 -0
  3. image-removebg-preview (1).png +0 -0
  4. requirements.txt +5 -0
README.md CHANGED
@@ -1,12 +1,12 @@
1
- ---
2
- title: Einstein
3
- emoji: 💻
4
- colorFrom: red
5
- colorTo: pink
6
- sdk: streamlit
7
- sdk_version: 1.39.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+ ---
2
+ title: Einstein
3
+ emoji: 💻
4
+ colorFrom: red
5
+ colorTo: pink
6
+ sdk: streamlit
7
+ sdk_version: 1.39.0
8
+ app_file: app.py
9
+ pinned: false
10
+ ---
11
+
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from crewai import Agent, Task, Crew
3
+ from langchain_groq import ChatGroq
4
+ import streamlit as st
5
+ from PIL import Image
6
+
7
+ # Initialize the LLM for the Einstein Agent
8
+ llm = ChatGroq(
9
+ groq_api_key="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",
10
+ model_name="llama3-70b-8192", # Replace with the actual Einstein model name
11
+ )
12
+
13
+ # Define the Einstein Agent with a research-oriented goal
14
+ einstein_agent = Agent(
15
+ role='Einstein Agent',
16
+ goal='Provide in-depth answers and insights on various topics to help with research questions.',
17
+ backstory=(
18
+ "You are an Einstein Agent, skilled in gathering and synthesizing information across domains. Mainly in Physics. "
19
+ "Your role is to answer questions with a detailed and analytical approach."
20
+ ),
21
+ verbose=True,
22
+ llm=llm,
23
+ )
24
+
25
+ def process_question_with_agent(question):
26
+ # Describe the task for the agent
27
+ task_description = f"Research and provide a detailed answer to the question: '{question}'"
28
+
29
+ # Define the task for the agent to generate a response to the question
30
+ research_task = Task(
31
+ description=task_description,
32
+ agent=einstein_agent,
33
+ human_input=False,
34
+ expected_output="According to user need response to the question" # Placeholder for expected output
35
+ )
36
+
37
+ # Instantiate the crew with the defined agent and task
38
+ crew = Crew(
39
+ agents=[einstein_agent],
40
+ tasks=[research_task],
41
+ verbose=2,
42
+ )
43
+
44
+ # Get the crew to work on the task and return the result
45
+ result = crew.kickoff()
46
+
47
+ return result
48
+
49
+ # Load the image from the specified path
50
+ image_path = r"C:\Users\PMLS\Downloads\image-removebg-preview (1).png" # Update with your image path
51
+ image = Image.open(image_path)
52
+
53
+ # Resize the image to 500x500
54
+ image = image.resize((300, 300))
55
+
56
+ # Set the title of your app with Markdown
57
+ st.markdown("<h1 style='text-align: center;'>Einstein Researcher Chatbot</h1>", unsafe_allow_html=True)
58
+
59
+ # Convert the image to base64 for embedding in HTML
60
+ import base64
61
+ from io import BytesIO
62
+
63
+ buffered = BytesIO()
64
+ image.save(buffered, format="PNG")
65
+ img_str = base64.b64encode(buffered.getvalue()).decode()
66
+
67
+ # Display the image and center it using HTML
68
+ st.markdown(f"<div style='text-align: center;'><img src='data:image/png;base64,{img_str}' width='300' height='300'/></div>", unsafe_allow_html=True)
69
+
70
+ # Initialize chat history
71
+ if "messages" not in st.session_state:
72
+ st.session_state.messages = []
73
+
74
+ # Display chat messages from history on app rerun
75
+ for message in st.session_state.messages:
76
+ with st.chat_message(message["role"]):
77
+ st.markdown(message["content"])
78
+
79
+ # React to user input
80
+ if prompt := st.chat_input("Ask a research question:"):
81
+ # Display user message in chat message container
82
+ st.chat_message("user").markdown(prompt)
83
+ # Add user message to chat history
84
+ st.session_state.messages.append({"role": "user", "content": prompt})
85
+
86
+ # Get the response from the Einstein Agent
87
+ with st.spinner("Processing..."):
88
+ response = process_question_with_agent(prompt)
89
+
90
+ # Display assistant response in chat message container
91
+ with st.chat_message("assistant"):
92
+ st.markdown(response)
93
+
94
+ # Add assistant response to chat history
95
+ st.session_state.messages.append({"role": "assistant", "content": response})
image-removebg-preview (1).png ADDED
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ os
2
+ crewai
3
+ langchain_groq
4
+ streamlit
5
+ Pillow