Spaces:
Sleeping
Sleeping
updated app.py
Browse files
app.py
CHANGED
@@ -19,6 +19,7 @@ class StoryRequest(BaseModel):
|
|
19 |
mood: str
|
20 |
story_type: str
|
21 |
theme: str
|
|
|
22 |
num_scenes: int
|
23 |
txt: str
|
24 |
|
@@ -64,11 +65,7 @@ Total pages in storie are seven each page have one short paragraph and dont ask
|
|
64 |
to create another adventure soon!
|
65 |
"""
|
66 |
|
67 |
-
|
68 |
-
|
69 |
-
@app.get("/")
|
70 |
-
def read_root():
|
71 |
-
return {"message": "Welcome to the Story Generation API!"}
|
72 |
|
73 |
# FastAPI endpoint to generate the story
|
74 |
@app.post("/generate_story/")
|
@@ -79,9 +76,8 @@ async def generate_story(story_request: StoryRequest):
|
|
79 |
- **Theme:** {story_request.theme}
|
80 |
- **Details Provided:** {story_request.txt}
|
81 |
"""
|
82 |
-
prompt_template = ChatPromptTemplate.from_messages([("system", system), ("human", story)])
|
83 |
|
84 |
-
|
85 |
|
86 |
# Create the LLMChain
|
87 |
# chain = LLMChain(llm=llm, prompt=prompt_template)
|
@@ -92,10 +88,7 @@ async def generate_story(story_request: StoryRequest):
|
|
92 |
# return {"story": response}
|
93 |
# except Exception as e:
|
94 |
# raise HTTPException(status_code=500, detail=str(e))
|
95 |
-
|
96 |
-
# async for s in chain.astream(final_prompt):
|
97 |
-
# print(s.content, end="", flush=True)
|
98 |
-
response = await chain.ainvoke()
|
99 |
|
100 |
if not response:
|
101 |
raise HTTPException(status_code=500, detail="Failed to generate the story")
|
@@ -125,4 +118,14 @@ async def generate_story(story_request: StoryRequest):
|
|
125 |
return {
|
126 |
"story": response,
|
127 |
"images": images
|
128 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
mood: str
|
20 |
story_type: str
|
21 |
theme: str
|
22 |
+
length: int
|
23 |
num_scenes: int
|
24 |
txt: str
|
25 |
|
|
|
65 |
to create another adventure soon!
|
66 |
"""
|
67 |
|
68 |
+
prompt_template = ChatPromptTemplate.from_messages([("system", system), ("human", "{text}")])
|
|
|
|
|
|
|
|
|
69 |
|
70 |
# FastAPI endpoint to generate the story
|
71 |
@app.post("/generate_story/")
|
|
|
76 |
- **Theme:** {story_request.theme}
|
77 |
- **Details Provided:** {story_request.txt}
|
78 |
"""
|
|
|
79 |
|
80 |
+
final_prompt = prompt_template.format(text=story)
|
81 |
|
82 |
# Create the LLMChain
|
83 |
# chain = LLMChain(llm=llm, prompt=prompt_template)
|
|
|
88 |
# return {"story": response}
|
89 |
# except Exception as e:
|
90 |
# raise HTTPException(status_code=500, detail=str(e))
|
91 |
+
response = chain.invoke(final_prompt)
|
|
|
|
|
|
|
92 |
|
93 |
if not response:
|
94 |
raise HTTPException(status_code=500, detail="Failed to generate the story")
|
|
|
118 |
return {
|
119 |
"story": response,
|
120 |
"images": images
|
121 |
+
}
|
122 |
+
|
123 |
+
|
124 |
+
|
125 |
+
# image_prompt = (
|
126 |
+
# f"Generate an image for Scene {i+1}. "
|
127 |
+
# f"This image should represent the details described in paragraph {i+1} of the story. "
|
128 |
+
# f"Mood: {mood}, Story Type: {', '.join(story_type)}, Theme: {theme}. "
|
129 |
+
# f"Story: {response} "
|
130 |
+
# f"Focus on the key elements in paragraph {i+1}."
|
131 |
+
# )
|