aniruddh1907 commited on
Commit
4261f10
Β·
verified Β·
1 Parent(s): 18ab634

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -9
app.py CHANGED
@@ -53,10 +53,11 @@ TEXT:
53
  """
54
 
55
  IMAGE_PROMPT_TEMPLATE = """
56
- Based on the following story, craft ONE vivid scene description suitable as a prompt for a text‑to‑image AI.
57
- Do NOT restate the entire storyβ€”write only the visual description.
58
- Include setting, mood, key characters, and distinctive details.
59
- Return a single sentence, nothing else.
 
60
 
61
  Story:
62
  \"\"\"%s\"\"\"
@@ -85,17 +86,20 @@ def extract_entities(text: str):
85
  # ────────────────────────────────
86
  # Build visual prompt
87
  # ────────────────────────────────
88
- def generate_image_prompt(story_text: str):
89
  try:
90
- prompt_msg = IMAGE_PROMPT_TEMPLATE % story_text
91
  resp = together_client.chat.completions.create(
92
  model="meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
93
  messages=[{"role": "user", "content": prompt_msg}],
94
- max_tokens=120,
95
  )
96
- return resp.choices[0].message.content.strip()
 
 
97
  except Exception as e:
98
- return e
 
99
 
100
 
101
 
 
53
  """
54
 
55
  IMAGE_PROMPT_TEMPLATE = """
56
+ Based on the following story, write %d distinct vivid scene descriptions, one per line.
57
+ Each line should begin with a dash (-) followed by a detailed image-worthy scene.
58
+ Include setting, mood, characters, and visual cues.
59
+
60
+ Return ONLY the list of scenes, each on its own line.
61
 
62
  Story:
63
  \"\"\"%s\"\"\"
 
86
  # ────────────────────────────────
87
  # Build visual prompt
88
  # ────────────────────────────────
89
+ def generate_image_prompts(story_text: str, count=1):
90
  try:
91
+ prompt_msg = IMAGE_PROMPT_TEMPLATE % (count, story_text)
92
  resp = together_client.chat.completions.create(
93
  model="meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
94
  messages=[{"role": "user", "content": prompt_msg}],
95
+ max_tokens=200,
96
  )
97
+ raw_output = resp.choices[0].message.content.strip()
98
+ prompts = [line.strip("-β€’ ").strip() for line in raw_output.split("\n") if line.strip()]
99
+ return prompts[:count] # just in case LLM gives more than needed
100
  except Exception as e:
101
+ print("⚠️ LLM scene prompt generation failed:", e)
102
+ return []
103
 
104
 
105