Anustup commited on
Commit
98aea07
·
verified ·
1 Parent(s): b43a1bb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +190 -235
app.py CHANGED
@@ -1,5 +1,8 @@
1
  import streamlit as st
2
  import os
 
 
 
3
  from prompts import prompts
4
  from constants import JSON_SCHEMA_FOR_GPT, UPDATED_MODEL_ONLY_SCHEMA, JSON_SCHEMA_FOR_LOC_ONLY
5
  from gpt import runAssistant, checkRunStatus, retrieveThread, createAssistant, saveFileOpenAI, startAssistantThread, \
@@ -9,6 +12,9 @@ from theme import flux_generated_image, flux_generated_image_seed
9
  import time
10
  from PIL import Image
11
  import io
 
 
 
12
 
13
 
14
  def process_run(st, thread_id, assistant_id):
@@ -26,6 +32,28 @@ def process_run(st, thread_id, assistant_id):
26
  pass
27
 
28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  def page1():
30
  st.title("Upload Product")
31
  st.markdown("<h2 style='color:#FF5733; font-weight:bold;'>Add a Product</h2>", unsafe_allow_html=True)
@@ -79,54 +107,141 @@ def page2():
79
 
80
 
81
  def page3():
 
82
  st.title("Scene Suggestions")
83
  st.write("Based on your uploaded product and references!")
84
  feedback = st.chat_input("Provide feedback:")
85
  if not st.session_state.get("assistant_initialized", False):
86
- assistant_id = createAssistant("You are a helpful assistant who is an expert in Fashion Shoots.")
87
- updated_prompt = prompts["IDEA_GENERATION_PROMPT"].format(
88
- brand_details=st.session_state["brand_summary"],
89
- product_details=st.session_state["product_info"],
90
- type_of_shoot=st.session_state["shoot_type"],
91
- json_schema=JSON_SCHEMA_FOR_GPT,
92
- product_name=st.session_state["product_description"]
93
- )
94
- file_locations = []
95
  for uploaded_file in st.session_state['uploaded_files']:
96
  bytes_data = uploaded_file.getvalue()
97
  image = Image.open(io.BytesIO(bytes_data))
98
  image.verify()
99
- location = f"temp_image_{uploaded_file.name}"
100
  with open(location, "wb") as f:
101
  f.write(bytes_data)
102
- file_locations.append(location)
103
  image.close()
 
 
 
 
 
 
 
 
104
  for uploaded_file in st.session_state['reference_images']:
105
  bytes_data = uploaded_file.getvalue()
106
  image = Image.open(io.BytesIO(bytes_data))
107
  image.verify()
108
- location = f"temp2_image_{uploaded_file.name}"
109
  with open(location, "wb") as f:
110
  f.write(bytes_data)
111
- file_locations.append(location)
112
  image.close()
113
- file_ids = [saveFileOpenAI(location) for location in file_locations]
114
- thread_id = startAssistantThread(file_ids, updated_prompt, "yes", "yes")
115
- st.session_state.assistant_id = assistant_id
116
- st.session_state.thread_id = thread_id
117
  st.session_state.assistant_initialized = True
118
- regenerate_images(thread_id, assistant_id)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
  if feedback:
 
120
  if 'images' in st.session_state and 'descriptions' in st.session_state:
121
  for image_path in st.session_state['images']:
122
  os.remove(image_path)
123
  del st.session_state['images']
124
  del st.session_state['descriptions']
125
- del st.session_state["json_descriptions"]
126
- addMessageToThread(st.session_state.thread_id, feedback)
127
- regenerate_images(st.session_state.thread_id, st.session_state.assistant_id)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
  selected_image_index = None
129
- cols = st.columns(1)
130
  for i in range(len(st.session_state["images"])):
131
  with cols[i]:
132
  st.image(st.session_state.images[i], caption=st.session_state.descriptions[i], use_column_width=True)
@@ -141,62 +256,10 @@ def page3():
141
  st.session_state.page = "Page 2"
142
 
143
 
144
- def regenerate_images(thread_id, assistant_id):
145
- """Helper function to generate images and descriptions."""
146
- response_from_process_list = []
147
- for _ in range(1): # Assuming you generate 1 set of image/description
148
- response_from_process = process_run(st, thread_id, assistant_id)
149
- response_from_process_list.append(response_from_process)
150
-
151
- summary_list = []
152
- for final_response in response_from_process_list:
153
- prompt_for_idea_summary = prompts["IDEA_SUMMARY_PROMPT"].format(
154
- json_schema=str(final_response)
155
- )
156
- summary = create_chat_completion_request_open_ai_for_summary(prompt_for_idea_summary, "No")
157
- summary_list.append(summary)
158
-
159
- # Generate images based on the summaries
160
- flux_generated_theme_image = []
161
- for summary in summary_list:
162
- theme_image = flux_generated_image(summary)
163
- flux_generated_theme_image.append(theme_image["file_name"])
164
-
165
- # Save the new images and descriptions in session state
166
- st.session_state["images"] = flux_generated_theme_image
167
- st.session_state["descriptions"] = summary_list
168
- st.session_state["json_descriptions"] = response_from_process_list
169
-
170
-
171
  def page4():
172
  import json
173
- selected_theme_text_by_user = st.session_state.json_descriptions[st.session_state.selected_image_index]
174
  print(selected_theme_text_by_user)
175
- schema_for_model_bg = {"type": "object",
176
- "properties": {
177
- "Model": {
178
- "type": "string",
179
- "description": "The model name or identifier."
180
- },
181
- "Background": {
182
- "type": "string",
183
- "description": "Description or type of the background."
184
- }},
185
- "required": ["Model", "Background"],
186
- "additionalProperties": False
187
- }
188
- session_state_desp = st.session_state["json_descriptions"]
189
- prompt_to_get_details = (f"You are provided with a brief of a Fashion Shoot : "
190
- f"{session_state_desp}.\n Now provide me a JSON which will"
191
- f"have two keys ```Model``` and ```Background```. Provide all detail's"
192
- f"present about model and background in the brief provided by you. Just provide a "
193
- f"natural langauge description. I will use it as description of model and "
194
- f"background needed by the brand Output JSON following the schema")
195
- response_from_open_ai = create_chat_completion_request_open_ai_for_summary(prompt_to_get_details,
196
- schema_name="model_bg",
197
- json_schema=schema_for_model_bg,
198
- json_mode="yes")
199
- json_response_from_open_ai = json.loads(response_from_open_ai)
200
  with (st.sidebar):
201
  st.title(st.session_state["product_info"])
202
  st.write("Product Image")
@@ -213,183 +276,74 @@ def page4():
213
  else:
214
  seed_number = 0
215
  st.text("Thanks will take care")
216
- model_preference = st.selectbox(
217
- "Model Preference",
218
- ("Create Own/Edit Pre-filled", "Ideas", "Upload Reference"),
219
- )
220
- if model_preference == "Create Own/Edit Pre-filled":
221
- pre_filled_model_details = st.text_area("Model Idea", value=json_response_from_open_ai["Model"],
222
- key="Model Idea")
223
- elif model_preference == "Ideas":
224
- prompt_to_generate_idea = ("Your task is to create model ideas for shoot of a product of a brand. "
225
- "The details about the brand: ```{brand_details}.\n The product: {product_name},"
226
- "which is: ```{product_details}```.\n Reference images for the product and "
227
- "brands shoot idea is already provided with you. Additionally brand wants to "
228
- "have a ```{type_of_shoot}``` of the model. Now based on all provided details, "
229
- "think step by step and provide your ideas about what type of model the brand"
230
- "should need based on mentioned JSON format. Also provide a combined prompt "
231
- "which the brand will use to create a shoot image. While creating the "
232
- "combined prompt as mentioned in the JSON schema, do not miss any details you"
233
- " mentioned in the JSON.")
234
- updated_model_idea_gen_prompt = prompt_to_generate_idea.format(
235
- brand_details=st.session_state["brand_summary"],
236
- product_details=st.session_state["product_info"],
237
- type_of_shoot=st.session_state["shoot_type"],
238
- product_name=st.session_state["product_description"]
239
-
240
- )
241
- response_for_only_model = create_chat_completion_request_open_ai_for_summary(updated_model_idea_gen_prompt
242
- , schema_name="model_only",
243
- json_schema=
244
- UPDATED_MODEL_ONLY_SCHEMA,
245
- json_mode="yes")
246
- pre_filled_model_details = st.text_area("Model Idea", value=response_for_only_model,
247
- key="Model Idea")
248
- else:
249
- uploaded_files = st.file_uploader("Upload one Model Reference Image here",
250
- accept_multiple_files=False, key="uploader")
251
- bytes_data = uploaded_files.getvalue()
252
- image = Image.open(io.BytesIO(bytes_data))
253
- image.verify()
254
- location = f"temp_image_{uploaded_files.name}"
255
- with open(location, "wb") as f:
256
- f.write(bytes_data)
257
- image.close()
258
- prompt_to_generate_idea = ("Follow this JSON Schema : {json_schema_model_only}."
259
- "Your task is to create model ideas for shoot of a product of a brand. "
260
- "The details about the brand: ```{brand_details}.\n The product: {product_name},"
261
- "which is: ```{product_details}```.\n Reference images for the product and "
262
- "brands shoot idea is already provided with you. Additionally brand wants to "
263
- "have a ```{type_of_shoot}``` of the model. Now based on all provided details, "
264
- "think step by step and provide your ideas about what type of model the brand"
265
- "should need based on mentioned JSON format. Also provide a combined prompt "
266
- "which the brand will use to create a shoot image. While creating the "
267
- "combined prompt as mentioned in the JSON schema, do not miss any details you"
268
- " mentioned in the JSON.")
269
- updated_model_idea_gen_prompt = prompt_to_generate_idea.format(
270
- json_schema_model_only=UPDATED_MODEL_ONLY_SCHEMA,
271
- brand_details=st.session_state["brand_summary"],
272
- product_details=st.session_state["product_info"],
273
- type_of_shoot=st.session_state["shoot_type"],
274
- product_name=st.session_state["product_description"]
275
-
276
- )
277
- json_response = create_image_completion_request_gpt(location, updated_model_idea_gen_prompt)
278
- pre_filled_model_details = st.text_area("Model Idea", value=json_response,
279
- key="Model Idea")
280
- background_preference = st.selectbox(
281
- "Background Preference",
282
- ("Create Own/Edit Pre-filled", "Ideas", "Upload Reference"),
283
- )
284
- if background_preference == "Create Own/Edit Pre-filled":
285
- pre_filled_background_details = st.text_area("Background Idea",
286
- value=json_response_from_open_ai["Background"],
287
- key="Background Idea")
288
- elif background_preference == "Ideas":
289
- prompt_to_generate_idea = ("Follow this JSON Schema : {json_schema_background_only}."
290
- "Your task is to create location/background ideas for shoot of a "
291
- "product of a brand. "
292
- "The details about the brand: ```{brand_details}.\n The product: {product_name},"
293
- "which is: ```{product_details}```.\n Reference images for the product and "
294
- "brands shoot idea is already provided with you. Additionally brand wants to "
295
- "have a ```{type_of_shoot}``` of the model. Now based on all provided details, "
296
- "think step by step and provide your ideas about what type of location the brand"
297
- "should need based on mentioned JSON format. Also provide a combined prompt "
298
- "which the brand will use to create a shoot image. While creating the "
299
- "combined prompt as mentioned in the JSON schema, do not miss any details you"
300
- " mentioned in the JSON.")
301
- updated_bg_idea_gen_prompt = prompt_to_generate_idea.format(
302
- json_schema_background_only=JSON_SCHEMA_FOR_LOC_ONLY,
303
- brand_details=st.session_state["brand_summary"],
304
- product_details=st.session_state["product_info"],
305
- type_of_shoot=st.session_state["shoot_type"],
306
- product_name=st.session_state["product_description"]
307
-
308
- )
309
- response_for_only_bg = create_chat_completion_request_open_ai_for_summary(updated_bg_idea_gen_prompt,
310
- schema_name="bg_o",
311
- json_schema=JSON_SCHEMA_FOR_LOC_ONLY,
312
- json_mode="yes")
313
- pre_filled_background_details = st.text_area("Background Idea", value=response_for_only_bg,
314
- key="Background Idea")
315
- else:
316
- uploaded_files = st.file_uploader("Upload one Background Reference Image here",
317
- accept_multiple_files=False, key="uploader")
318
- bytes_data = uploaded_files.getvalue()
319
- image = Image.open(io.BytesIO(bytes_data))
320
- image.verify()
321
- location = f"temp2_image_{uploaded_files.name}"
322
- with open(location, "wb") as f:
323
- f.write(bytes_data)
324
- image.close()
325
- prompt_to_generate_idea = ("Follow this JSON Schema : {json_schema_bg_only}."
326
- "Your task is to create Background/Location ideas for shoot of a "
327
- "product of a brand. "
328
- "The details about the brand: ```{brand_details}.\n The product: {product_name},"
329
- "which is: ```{product_details}```.\n Reference images for the product and "
330
- "brands shoot idea is already provided with you. Additionally brand wants to "
331
- "have a ```{type_of_shoot}``` of the model. Now based on all provided details, "
332
- "think step by step and provide your ideas about what type of location the brand"
333
- "should need based on mentioned JSON format. Also provide a combined prompt "
334
- "which the brand will use to create a shoot image. While creating the "
335
- "combined prompt as mentioned in the JSON schema, do not miss any details you"
336
- " mentioned in the JSON.")
337
- updated_bg_idea_gen_prompt = prompt_to_generate_idea.format(
338
- json_schema_bg_only=JSON_SCHEMA_FOR_LOC_ONLY,
339
- brand_details=st.session_state["brand_summary"],
340
- product_details=st.session_state["product_info"],
341
- type_of_shoot=st.session_state["shoot_type"],
342
- product_name=st.session_state["product_description"]
343
-
344
- )
345
- json_response = create_image_completion_request_gpt(location, updated_bg_idea_gen_prompt)
346
- pre_filled_background_details = st.text_area("Background Idea", value=json_response,
347
- key="Background Idea")
348
  start_chat = st.button("Start Chat")
349
  if "mood_chat_messages" not in st.session_state:
350
  st.session_state["mood_chat_messages"] = []
351
- if seed and dimensions and model_preference and background_preference:
352
  if start_chat:
353
- final_mood_board_image_prompt = prompts["FINAL_PROMPT_GENERATION"].format(
354
- brand_details=st.session_state["brand_summary"],
355
- product_details=st.session_state["product_info"],
356
- type_of_shoot=st.session_state["shoot_type"],
357
- product_name=st.session_state["product_description"],
358
- model_details=pre_filled_model_details,
359
- location_details=pre_filled_background_details,
360
- theme_details=str(selected_theme_text_by_user),
361
- chat_history=str(st.session_state["mood_chat_messages"])
362
- )
363
- prompt_for_flux_mood_board = create_chat_completion_request_open_ai_for_summary(
364
- final_mood_board_image_prompt, "No", system_message=prompts["SYSTEM_PROMPT_FOR_MOOD_BOARD"])
365
  if seed == "Fixed":
366
- generated_flux_image = flux_generated_image_seed(prompt_for_flux_mood_board, seed_number, dimensions)
367
  else:
368
- generated_flux_image = flux_generated_image(prompt_for_flux_mood_board)
369
  st.session_state["mood_chat_messages"].append({
370
  "role": "AI",
371
- "message": prompt_for_flux_mood_board,
372
  "image": generated_flux_image["file_name"]
373
  })
374
  # for message in st.session_state["mood_chat_messages"]:
375
- # if message["role"] == "AI":
376
- # st.write(f"Caimera AI: {message['message']}")
377
- # st.image(message['image'])
378
- #else:
379
- # st.write(f"**You**: {message['message']}")
380
  user_input = st.chat_input("Type your message here...")
381
  if user_input:
382
  st.session_state["mood_chat_messages"].append({"role": "User", "message": user_input})
383
- prompt_for_flux_mood_board_n = create_chat_completion_request_open_ai_for_summary(
384
- user_input, "No", system_message=prompts["SYSTEM_PROMPT_FOR_MOOD_BOARD"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
385
  if seed == "Fixed":
386
- generated_flux_image_n = flux_generated_image_seed(prompt_for_flux_mood_board_n, seed_number,
387
  dimensions)
388
  else:
389
- generated_flux_image_n = flux_generated_image(prompt_for_flux_mood_board_n)
390
  st.session_state["mood_chat_messages"].append({
391
  "role": "AI",
392
- "message": prompt_for_flux_mood_board_n,
 
393
  "image": generated_flux_image_n["file_name"]
394
  })
395
  for message in st.session_state["mood_chat_messages"]:
@@ -402,9 +356,10 @@ def page4():
402
 
403
 
404
  if 'page' not in st.session_state:
405
- st.session_state.page = "Page 1"
406
 
407
- # Routing between pages
 
408
  if st.session_state.page == "Page 1":
409
  page1()
410
  elif st.session_state.page == "Page 2":
 
1
  import streamlit as st
2
  import os
3
+ import re
4
+ from claude import embed_base64_for_claude, create_claude_image_request_for_image_captioning, \
5
+ create_claude_request_for_text_completion, extract_data_from_text_xml
6
  from prompts import prompts
7
  from constants import JSON_SCHEMA_FOR_GPT, UPDATED_MODEL_ONLY_SCHEMA, JSON_SCHEMA_FOR_LOC_ONLY
8
  from gpt import runAssistant, checkRunStatus, retrieveThread, createAssistant, saveFileOpenAI, startAssistantThread, \
 
12
  import time
13
  from PIL import Image
14
  import io
15
+ from streamlit_gsheets import GSheetsConnection
16
+
17
+ conn = st.connection("gsheets", type=GSheetsConnection)
18
 
19
 
20
  def process_run(st, thread_id, assistant_id):
 
32
  pass
33
 
34
 
35
+ def page5():
36
+ st.title('Initialize your preferences!')
37
+ system_prompt_passed = st.text_area("System Prompt", value=prompts["PROMPT_FOR_MOOD_AND_IDEA"],
38
+ key="System Prompt")
39
+ caption_system_prompt = st.text_area("Captioning System Prompt", value=prompts["CAPTION_SYSTEM_PROMPT"],
40
+ key="Caption Generation System Prompt")
41
+ caption_prompt = st.text_area("Caption Prompt", value=prompts["CAPTION_PROMPT"],
42
+ key="Caption Generation Prompt")
43
+ st.text("Running on Claude")
44
+ col1, col2 = st.columns([1, 2])
45
+ with col1:
46
+ if st.button("Save the Prompt"):
47
+ st.session_state["system_prompt"] = system_prompt_passed
48
+ print(st.session_state["system_prompt"])
49
+ st.session_state["caption_system_prompt"] = caption_system_prompt
50
+ st.session_state["caption_prompt"] = caption_prompt
51
+ st.success("Saved your prompts")
52
+ with col2:
53
+ if st.button("Start Testing!"):
54
+ st.session_state['page'] = "Page 1"
55
+
56
+
57
  def page1():
58
  st.title("Upload Product")
59
  st.markdown("<h2 style='color:#FF5733; font-weight:bold;'>Add a Product</h2>", unsafe_allow_html=True)
 
107
 
108
 
109
  def page3():
110
+ import random
111
  st.title("Scene Suggestions")
112
  st.write("Based on your uploaded product and references!")
113
  feedback = st.chat_input("Provide feedback:")
114
  if not st.session_state.get("assistant_initialized", False):
115
+ file_locations_for_product = []
 
 
 
 
 
 
 
 
116
  for uploaded_file in st.session_state['uploaded_files']:
117
  bytes_data = uploaded_file.getvalue()
118
  image = Image.open(io.BytesIO(bytes_data))
119
  image.verify()
120
+ location = f"temp_image_{random.randint(1, 100000000)}.png"
121
  with open(location, "wb") as f:
122
  f.write(bytes_data)
123
+ file_locations_for_product.append(location)
124
  image.close()
125
+ file_base64_embeds_product = [embed_base64_for_claude(location) for location in file_locations_for_product]
126
+ caption_list_from_claude_product = []
127
+ for file_embeds_base64 in file_base64_embeds_product:
128
+ caption_from_claude = create_claude_image_request_for_image_captioning(
129
+ st.session_state["caption_system_prompt"], st.session_state["caption_prompt"], file_embeds_base64)
130
+ caption_list_from_claude_product.append(caption_from_claude)
131
+ string_caption_list_product = str(caption_list_from_claude_product)
132
+ file_locations_for_others = []
133
  for uploaded_file in st.session_state['reference_images']:
134
  bytes_data = uploaded_file.getvalue()
135
  image = Image.open(io.BytesIO(bytes_data))
136
  image.verify()
137
+ location = f"temp2_image_{random.randint(1, 1000000)}.png"
138
  with open(location, "wb") as f:
139
  f.write(bytes_data)
140
+ file_locations_for_others.append(location)
141
  image.close()
142
+ file_base64_embeds = [embed_base64_for_claude(location) for location in file_locations_for_others]
 
 
 
143
  st.session_state.assistant_initialized = True
144
+ caption_list_from_claude = []
145
+ for file_embeds_base64 in file_base64_embeds:
146
+ caption_from_claude = create_claude_image_request_for_image_captioning(
147
+ st.session_state["caption_system_prompt"], st.session_state["caption_prompt"], file_embeds_base64)
148
+ caption_list_from_claude.append(caption_from_claude)
149
+ string_caption_list = str(caption_list_from_claude)
150
+ st.session_state["caption_product"] = string_caption_list_product
151
+ st.session_state["additional_caption"] = string_caption_list
152
+ additional_info_param_for_prompt = f"Brand have provided reference images whose details are:" \
153
+ f"```{string_caption_list}```. Apart from this brand needs" \
154
+ f"{st.session_state['shoot_type']}"
155
+ product_info = str(string_caption_list_product) + st.session_state['product_info']
156
+ updated_prompt_for_claude = st.session_state["system_prompt"].format(
157
+ BRAND_DETAILS=st.session_state['brand_summary'],
158
+ PRODUCT_DETAILS=product_info,
159
+ ADDITIONAL_INFO=additional_info_param_for_prompt
160
+ )
161
+ st.session_state["updated_prompt"] = updated_prompt_for_claude
162
+ message_schema_for_claude = [
163
+ {
164
+ "role": "user",
165
+ "content": [
166
+ {
167
+ "type": "text",
168
+ "text": updated_prompt_for_claude
169
+ }
170
+ ]
171
+ }
172
+ ]
173
+ response_from_claude = create_claude_request_for_text_completion(message_schema_for_claude)
174
+ campaign_pattern = r"<campaign_idea>(.*?)</campaign_idea>"
175
+ campaigns = re.findall(campaign_pattern, response_from_claude, re.DOTALL)
176
+ concat_prompt_list = []
177
+ for idx, campaign in enumerate(campaigns, start=1):
178
+ get_model_prompt = extract_data_from_text_xml(campaign, "model_prompt")
179
+ get_background_prompt = extract_data_from_text_xml(campaign, "background_prompt")
180
+ concat_prompt_flux = get_model_prompt + get_background_prompt
181
+ concat_prompt_list.append(concat_prompt_flux)
182
+ flux_generated_theme_image = []
183
+ for concat_prompt in concat_prompt_list:
184
+ theme_image = flux_generated_image(concat_prompt)
185
+ flux_generated_theme_image.append(theme_image["file_name"])
186
+ print(flux_generated_theme_image)
187
+ st.session_state["descriptions"] = concat_prompt_list
188
+ st.session_state["claude_context"] = response_from_claude
189
+ st.session_state["images"] = flux_generated_theme_image
190
  if feedback:
191
+ updated_context = st.session_state["claude_context"]
192
  if 'images' in st.session_state and 'descriptions' in st.session_state:
193
  for image_path in st.session_state['images']:
194
  os.remove(image_path)
195
  del st.session_state['images']
196
  del st.session_state['descriptions']
197
+ del st.session_state["claude_context"]
198
+ message_schema_for_claude = [
199
+ {
200
+ "role": "user",
201
+ "content": [
202
+ {
203
+ "type": "text",
204
+ "text": st.session_state["updated_prompt"]
205
+ }
206
+ ]
207
+ },
208
+ {
209
+ "role": "assistant",
210
+ "content": [
211
+ {
212
+ "type": "text",
213
+ "text": updated_context}
214
+ ]
215
+ },
216
+ {
217
+ "role": "user",
218
+ "content": [
219
+ {
220
+ "type": "text",
221
+ "text": feedback
222
+ }
223
+ ]
224
+ },
225
+
226
+ ]
227
+ response_from_claude = create_claude_request_for_text_completion(message_schema_for_claude)
228
+ campaign_pattern = r"<campaign_idea>(.*?)</campaign_idea>"
229
+ campaigns = re.findall(campaign_pattern, response_from_claude, re.DOTALL)
230
+ concat_prompt_list = []
231
+ for idx, campaign in enumerate(campaigns, start=1):
232
+ get_model_prompt = extract_data_from_text_xml(campaign, "model_prompt")
233
+ get_background_prompt = extract_data_from_text_xml(campaign, "background_prompt")
234
+ concat_prompt_flux = get_model_prompt + get_background_prompt
235
+ concat_prompt_list.append(concat_prompt_flux)
236
+ flux_generated_theme_image = []
237
+ for concat_prompt in concat_prompt_list:
238
+ theme_image = flux_generated_image(concat_prompt)
239
+ flux_generated_theme_image.append(theme_image["file_name"])
240
+ st.session_state["descriptions"] = concat_prompt_list
241
+ st.session_state["claude_context"] = response_from_claude
242
+ st.session_state["images"] = flux_generated_theme_image
243
  selected_image_index = None
244
+ cols = st.columns(4)
245
  for i in range(len(st.session_state["images"])):
246
  with cols[i]:
247
  st.image(st.session_state.images[i], caption=st.session_state.descriptions[i], use_column_width=True)
 
256
  st.session_state.page = "Page 2"
257
 
258
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
259
  def page4():
260
  import json
261
+ selected_theme_text_by_user = st.session_state.descriptions[st.session_state.selected_image_index]
262
  print(selected_theme_text_by_user)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
263
  with (st.sidebar):
264
  st.title(st.session_state["product_info"])
265
  st.write("Product Image")
 
276
  else:
277
  seed_number = 0
278
  st.text("Thanks will take care")
279
+ model__bg_preference = st.text_area("Edit Model & BG Idea", value=selected_theme_text_by_user,
280
+ key="Model & BG Idea")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
281
  start_chat = st.button("Start Chat")
282
  if "mood_chat_messages" not in st.session_state:
283
  st.session_state["mood_chat_messages"] = []
284
+ if seed and dimensions and model__bg_preference:
285
  if start_chat:
 
 
 
 
 
 
 
 
 
 
 
 
286
  if seed == "Fixed":
287
+ generated_flux_image = flux_generated_image_seed(model__bg_preference, seed_number, dimensions)
288
  else:
289
+ generated_flux_image = flux_generated_image(model__bg_preference)
290
  st.session_state["mood_chat_messages"].append({
291
  "role": "AI",
292
+ "message": model__bg_preference,
293
  "image": generated_flux_image["file_name"]
294
  })
295
  # for message in st.session_state["mood_chat_messages"]:
296
+ # if message["role"] == "AI":
297
+ # st.write(f"Caimera AI: {message['message']}")
298
+ # st.image(message['image'])
299
+ #else:
300
+ # st.write(f"**You**: {message['message']}")
301
  user_input = st.chat_input("Type your message here...")
302
  if user_input:
303
  st.session_state["mood_chat_messages"].append({"role": "User", "message": user_input})
304
+ updated_flux_prompt = prompts["PROMPT_TO_UPDATE_IDEA_OR_MOOD"].format(
305
+ EXISTING_MODEL_BG_PROMPT=model__bg_preference,
306
+ USER_INSTRUCTIONS=user_input
307
+ )
308
+ message_schema_for_claude = [
309
+ {
310
+ "role": "user",
311
+ "content": [
312
+ {
313
+ "type": "text",
314
+ "text": updated_flux_prompt
315
+ }
316
+ ]
317
+ },
318
+ {
319
+ "role": "assistant",
320
+ "content": [
321
+ {
322
+ "type": "text",
323
+ "text": str(st.session_state["mood_chat_messages"])}
324
+ ]
325
+ },
326
+ {
327
+ "role": "user",
328
+ "content": [
329
+ {
330
+ "type": "text",
331
+ "text": user_input + "Reference of previous conversation is also added."
332
+ }
333
+ ]
334
+ },
335
+ ]
336
+ response_from_claude = create_claude_request_for_text_completion(message_schema_for_claude)
337
+ cleaned_prompt = extract_data_from_text_xml(response_from_claude, "updated_prompt")
338
  if seed == "Fixed":
339
+ generated_flux_image_n = flux_generated_image_seed(cleaned_prompt, seed_number,
340
  dimensions)
341
  else:
342
+ generated_flux_image_n = flux_generated_image(cleaned_prompt)
343
  st.session_state["mood_chat_messages"].append({
344
  "role": "AI",
345
+ "message": cleaned_prompt,
346
+ "actual_response": response_from_claude,
347
  "image": generated_flux_image_n["file_name"]
348
  })
349
  for message in st.session_state["mood_chat_messages"]:
 
356
 
357
 
358
  if 'page' not in st.session_state:
359
+ st.session_state.page = "Page 5"
360
 
361
+ if st.session_state.page == "Page 5":
362
+ page5()
363
  if st.session_state.page == "Page 1":
364
  page1()
365
  elif st.session_state.page == "Page 2":