Update app.py
Browse files
app.py
CHANGED
@@ -1,144 +1,144 @@
|
|
1 |
-
import os
|
2 |
-
import json
|
3 |
-
import streamlit as st
|
4 |
-
from groq import Groq
|
5 |
-
from PIL import Image, UnidentifiedImageError, ExifTags
|
6 |
-
import requests
|
7 |
-
from io import BytesIO
|
8 |
-
from transformers import pipeline
|
9 |
-
from final_captioner import generate_final_caption
|
10 |
-
import hashlib
|
11 |
-
|
12 |
-
# Streamlit page title
|
13 |
-
st.title("PicSamvaad : Image Conversational Chatbot")
|
14 |
-
|
15 |
-
# # Load configuration
|
16 |
-
# working_dir = os.path.dirname(os.path.abspath(__file__))
|
17 |
-
# config_data = json.load(open(f"{working_dir}/config.json"))
|
18 |
-
|
19 |
-
# GROQ_API_KEY = config_data["GROQ_API_KEY"]
|
20 |
-
|
21 |
-
# Save the API key to environment variable
|
22 |
-
os.environ["GROQ_API_KEY"] =
|
23 |
-
|
24 |
-
client = Groq()
|
25 |
-
|
26 |
-
# Sidebar for image upload and URL input
|
27 |
-
with st.sidebar:
|
28 |
-
st.header("Upload Image or Enter URL")
|
29 |
-
|
30 |
-
uploaded_file = st.file_uploader(
|
31 |
-
"Upload an image to chat...", type=["jpg", "jpeg", "png"]
|
32 |
-
)
|
33 |
-
url = st.text_input("Or enter a valid image URL...")
|
34 |
-
|
35 |
-
image = None
|
36 |
-
error_message = None
|
37 |
-
|
38 |
-
|
39 |
-
def correct_image_orientation(img):
|
40 |
-
try:
|
41 |
-
for orientation in ExifTags.TAGS.keys():
|
42 |
-
if ExifTags.TAGS[orientation] == "Orientation":
|
43 |
-
break
|
44 |
-
exif = img._getexif()
|
45 |
-
if exif is not None:
|
46 |
-
orientation = exif[orientation]
|
47 |
-
if orientation == 3:
|
48 |
-
img = img.rotate(180, expand=True)
|
49 |
-
elif orientation == 6:
|
50 |
-
img = img.rotate(270, expand=True)
|
51 |
-
elif orientation == 8:
|
52 |
-
img = img.rotate(90, expand=True)
|
53 |
-
except (AttributeError, KeyError, IndexError):
|
54 |
-
pass
|
55 |
-
return img
|
56 |
-
|
57 |
-
|
58 |
-
def get_image_hash(image):
|
59 |
-
# Generate a unique hash for the image
|
60 |
-
img_bytes = image.tobytes()
|
61 |
-
return hashlib.md5(img_bytes).hexdigest()
|
62 |
-
|
63 |
-
|
64 |
-
# Check if a new image or URL has been provided and reset chat history
|
65 |
-
if "last_uploaded_hash" not in st.session_state:
|
66 |
-
st.session_state.last_uploaded_hash = None
|
67 |
-
|
68 |
-
if uploaded_file is not None:
|
69 |
-
image = Image.open(uploaded_file)
|
70 |
-
image_hash = get_image_hash(image)
|
71 |
-
|
72 |
-
if st.session_state.last_uploaded_hash != image_hash:
|
73 |
-
st.session_state.chat_history = [] # Clear chat history
|
74 |
-
st.session_state.last_uploaded_hash = image_hash # Update last uploaded hash
|
75 |
-
|
76 |
-
image = correct_image_orientation(image)
|
77 |
-
st.image(image, caption="Uploaded Image.", use_column_width=True)
|
78 |
-
|
79 |
-
elif url:
|
80 |
-
try:
|
81 |
-
response = requests.get(url)
|
82 |
-
response.raise_for_status() # Check if the request was successful
|
83 |
-
image = Image.open(BytesIO(response.content))
|
84 |
-
image_hash = get_image_hash(image)
|
85 |
-
|
86 |
-
if st.session_state.last_uploaded_hash != image_hash:
|
87 |
-
st.session_state.chat_history = [] # Clear chat history
|
88 |
-
st.session_state.last_uploaded_hash = (
|
89 |
-
image_hash # Update last uploaded hash
|
90 |
-
)
|
91 |
-
|
92 |
-
image = correct_image_orientation(image)
|
93 |
-
st.image(image, caption="Image from URL.", use_column_width=True)
|
94 |
-
except (requests.exceptions.RequestException, UnidentifiedImageError) as e:
|
95 |
-
image = None
|
96 |
-
error_message = "Error: The provided URL is invalid or the image could not be loaded. Sometimes some image URLs don't work. We suggest you upload the downloaded image instead ;)"
|
97 |
-
|
98 |
-
caption = ""
|
99 |
-
if image is not None:
|
100 |
-
caption += generate_final_caption(image)
|
101 |
-
st.write("ChatBot : " + caption)
|
102 |
-
|
103 |
-
# Display error message if any
|
104 |
-
if error_message:
|
105 |
-
st.error(error_message)
|
106 |
-
|
107 |
-
# Initialize chat history in Streamlit session state if not present already
|
108 |
-
if "chat_history" not in st.session_state:
|
109 |
-
st.session_state.chat_history = []
|
110 |
-
|
111 |
-
# Display chat history
|
112 |
-
for message in st.session_state.chat_history:
|
113 |
-
with st.chat_message(message["role"]):
|
114 |
-
st.markdown(message["content"])
|
115 |
-
|
116 |
-
# Input field for user's message
|
117 |
-
user_prompt = st.chat_input("Ask the Chatbot about the image...")
|
118 |
-
|
119 |
-
if user_prompt:
|
120 |
-
st.chat_message("user").markdown(user_prompt)
|
121 |
-
st.session_state.chat_history.append({"role": "user", "content": user_prompt})
|
122 |
-
|
123 |
-
# Send user's message to the LLM and get a response
|
124 |
-
messages = [
|
125 |
-
{
|
126 |
-
"role": "system",
|
127 |
-
"content": "You are a helpful, accurate image conversational assistant. You don't hallucinate, and your answers are very precise and have a positive approach.The caption of the image is: "
|
128 |
-
+ caption,
|
129 |
-
},
|
130 |
-
*st.session_state.chat_history,
|
131 |
-
]
|
132 |
-
|
133 |
-
response = client.chat.completions.create(
|
134 |
-
model="llama-3.1-8b-instant", messages=messages
|
135 |
-
)
|
136 |
-
|
137 |
-
assistant_response = response.choices[0].message.content
|
138 |
-
st.session_state.chat_history.append(
|
139 |
-
{"role": "assistant", "content": assistant_response}
|
140 |
-
)
|
141 |
-
|
142 |
-
# Display the LLM's response
|
143 |
-
with st.chat_message("assistant"):
|
144 |
-
st.markdown(assistant_response)
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
import streamlit as st
|
4 |
+
from groq import Groq
|
5 |
+
from PIL import Image, UnidentifiedImageError, ExifTags
|
6 |
+
import requests
|
7 |
+
from io import BytesIO
|
8 |
+
from transformers import pipeline
|
9 |
+
from final_captioner import generate_final_caption
|
10 |
+
import hashlib
|
11 |
+
|
12 |
+
# Streamlit page title
|
13 |
+
st.title("PicSamvaad : Image Conversational Chatbot")
|
14 |
+
|
15 |
+
# # Load configuration
|
16 |
+
# working_dir = os.path.dirname(os.path.abspath(__file__))
|
17 |
+
# config_data = json.load(open(f"{working_dir}/config.json"))
|
18 |
+
|
19 |
+
# GROQ_API_KEY = config_data["GROQ_API_KEY"]
|
20 |
+
|
21 |
+
# Save the API key to environment variable
|
22 |
+
os.environ["GROQ_API_KEY"] = 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
|
23 |
+
|
24 |
+
client = Groq()
|
25 |
+
|
26 |
+
# Sidebar for image upload and URL input
|
27 |
+
with st.sidebar:
|
28 |
+
st.header("Upload Image or Enter URL")
|
29 |
+
|
30 |
+
uploaded_file = st.file_uploader(
|
31 |
+
"Upload an image to chat...", type=["jpg", "jpeg", "png"]
|
32 |
+
)
|
33 |
+
url = st.text_input("Or enter a valid image URL...")
|
34 |
+
|
35 |
+
image = None
|
36 |
+
error_message = None
|
37 |
+
|
38 |
+
|
39 |
+
def correct_image_orientation(img):
|
40 |
+
try:
|
41 |
+
for orientation in ExifTags.TAGS.keys():
|
42 |
+
if ExifTags.TAGS[orientation] == "Orientation":
|
43 |
+
break
|
44 |
+
exif = img._getexif()
|
45 |
+
if exif is not None:
|
46 |
+
orientation = exif[orientation]
|
47 |
+
if orientation == 3:
|
48 |
+
img = img.rotate(180, expand=True)
|
49 |
+
elif orientation == 6:
|
50 |
+
img = img.rotate(270, expand=True)
|
51 |
+
elif orientation == 8:
|
52 |
+
img = img.rotate(90, expand=True)
|
53 |
+
except (AttributeError, KeyError, IndexError):
|
54 |
+
pass
|
55 |
+
return img
|
56 |
+
|
57 |
+
|
58 |
+
def get_image_hash(image):
|
59 |
+
# Generate a unique hash for the image
|
60 |
+
img_bytes = image.tobytes()
|
61 |
+
return hashlib.md5(img_bytes).hexdigest()
|
62 |
+
|
63 |
+
|
64 |
+
# Check if a new image or URL has been provided and reset chat history
|
65 |
+
if "last_uploaded_hash" not in st.session_state:
|
66 |
+
st.session_state.last_uploaded_hash = None
|
67 |
+
|
68 |
+
if uploaded_file is not None:
|
69 |
+
image = Image.open(uploaded_file)
|
70 |
+
image_hash = get_image_hash(image)
|
71 |
+
|
72 |
+
if st.session_state.last_uploaded_hash != image_hash:
|
73 |
+
st.session_state.chat_history = [] # Clear chat history
|
74 |
+
st.session_state.last_uploaded_hash = image_hash # Update last uploaded hash
|
75 |
+
|
76 |
+
image = correct_image_orientation(image)
|
77 |
+
st.image(image, caption="Uploaded Image.", use_column_width=True)
|
78 |
+
|
79 |
+
elif url:
|
80 |
+
try:
|
81 |
+
response = requests.get(url)
|
82 |
+
response.raise_for_status() # Check if the request was successful
|
83 |
+
image = Image.open(BytesIO(response.content))
|
84 |
+
image_hash = get_image_hash(image)
|
85 |
+
|
86 |
+
if st.session_state.last_uploaded_hash != image_hash:
|
87 |
+
st.session_state.chat_history = [] # Clear chat history
|
88 |
+
st.session_state.last_uploaded_hash = (
|
89 |
+
image_hash # Update last uploaded hash
|
90 |
+
)
|
91 |
+
|
92 |
+
image = correct_image_orientation(image)
|
93 |
+
st.image(image, caption="Image from URL.", use_column_width=True)
|
94 |
+
except (requests.exceptions.RequestException, UnidentifiedImageError) as e:
|
95 |
+
image = None
|
96 |
+
error_message = "Error: The provided URL is invalid or the image could not be loaded. Sometimes some image URLs don't work. We suggest you upload the downloaded image instead ;)"
|
97 |
+
|
98 |
+
caption = ""
|
99 |
+
if image is not None:
|
100 |
+
caption += generate_final_caption(image)
|
101 |
+
st.write("ChatBot : " + caption)
|
102 |
+
|
103 |
+
# Display error message if any
|
104 |
+
if error_message:
|
105 |
+
st.error(error_message)
|
106 |
+
|
107 |
+
# Initialize chat history in Streamlit session state if not present already
|
108 |
+
if "chat_history" not in st.session_state:
|
109 |
+
st.session_state.chat_history = []
|
110 |
+
|
111 |
+
# Display chat history
|
112 |
+
for message in st.session_state.chat_history:
|
113 |
+
with st.chat_message(message["role"]):
|
114 |
+
st.markdown(message["content"])
|
115 |
+
|
116 |
+
# Input field for user's message
|
117 |
+
user_prompt = st.chat_input("Ask the Chatbot about the image...")
|
118 |
+
|
119 |
+
if user_prompt:
|
120 |
+
st.chat_message("user").markdown(user_prompt)
|
121 |
+
st.session_state.chat_history.append({"role": "user", "content": user_prompt})
|
122 |
+
|
123 |
+
# Send user's message to the LLM and get a response
|
124 |
+
messages = [
|
125 |
+
{
|
126 |
+
"role": "system",
|
127 |
+
"content": "You are a helpful, accurate image conversational assistant. You don't hallucinate, and your answers are very precise and have a positive approach.The caption of the image is: "
|
128 |
+
+ caption,
|
129 |
+
},
|
130 |
+
*st.session_state.chat_history,
|
131 |
+
]
|
132 |
+
|
133 |
+
response = client.chat.completions.create(
|
134 |
+
model="llama-3.1-8b-instant", messages=messages
|
135 |
+
)
|
136 |
+
|
137 |
+
assistant_response = response.choices[0].message.content
|
138 |
+
st.session_state.chat_history.append(
|
139 |
+
{"role": "assistant", "content": assistant_response}
|
140 |
+
)
|
141 |
+
|
142 |
+
# Display the LLM's response
|
143 |
+
with st.chat_message("assistant"):
|
144 |
+
st.markdown(assistant_response)
|