Update app.py
Browse files
app.py
CHANGED
@@ -9,7 +9,7 @@ tokenizer = AutoTokenizer.from_pretrained(model_name)
|
|
9 |
model = AutoModelForCausalLM.from_pretrained(model_name).to("cpu") # CPU-friendly for Spaces
|
10 |
|
11 |
# Text generation function
|
12 |
-
def generate_text(prompt, max_length=
|
13 |
inputs = tokenizer(prompt, return_tensors="pt")
|
14 |
outputs = model.generate(
|
15 |
**inputs,
|
@@ -28,8 +28,8 @@ global_chapters = ""
|
|
28 |
# Generation functions
|
29 |
def generate_synopsis(topic):
|
30 |
global global_synopsis
|
31 |
-
prompt = f"Write a brief synopsis for a story about {topic}: "
|
32 |
-
global_synopsis = generate_text(prompt, max_length=
|
33 |
return global_synopsis
|
34 |
|
35 |
def generate_chapters():
|
@@ -37,7 +37,8 @@ def generate_chapters():
|
|
37 |
if not global_synopsis:
|
38 |
return "Please generate a synopsis first."
|
39 |
prompt = f'''Based on this synopsis for a book: {global_synopsis}. Divide the story into 4 chapters with brief descriptions for each.
|
40 |
-
Enumerate every chapter created followed by its description and make the first chapter sound like an introduction and the last may sound as the epilogue
|
|
|
41 |
global_chapters = generate_text(prompt, max_length=2000)
|
42 |
return global_chapters
|
43 |
|
@@ -50,7 +51,7 @@ def expand_chapter(chapter_number):
|
|
50 |
return f"Select a number between 1 and {len(chapters)}."
|
51 |
prompt = f'''Knowing this synopsis for a book: {global_synopsis}. Expand and describe Chapter {chapter_number}
|
52 |
in more detail, the title and current brief description of this chapter is: {chapters[chapter_number - 1]}'''
|
53 |
-
return generate_text(prompt, max_length=
|
54 |
|
55 |
# Gradio interface
|
56 |
with gr.Blocks() as demo:
|
|
|
9 |
model = AutoModelForCausalLM.from_pretrained(model_name).to("cpu") # CPU-friendly for Spaces
|
10 |
|
11 |
# Text generation function
|
12 |
+
def generate_text(prompt, max_length=500, temperature=0.7, top_p=0.9):
|
13 |
inputs = tokenizer(prompt, return_tensors="pt")
|
14 |
outputs = model.generate(
|
15 |
**inputs,
|
|
|
28 |
# Generation functions
|
29 |
def generate_synopsis(topic):
|
30 |
global global_synopsis
|
31 |
+
prompt = f"Write a brief synopsis for a story about {topic}: . The max number of characters must be 500"
|
32 |
+
global_synopsis = generate_text(prompt, max_length=500)
|
33 |
return global_synopsis
|
34 |
|
35 |
def generate_chapters():
|
|
|
37 |
if not global_synopsis:
|
38 |
return "Please generate a synopsis first."
|
39 |
prompt = f'''Based on this synopsis for a book: {global_synopsis}. Divide the story into 4 chapters with brief descriptions for each.
|
40 |
+
Enumerate every chapter created followed by its description and make the first chapter sound like an introduction and the last may sound as the epilogue.
|
41 |
+
The max number of character for every pair title, description must be 500'''
|
42 |
global_chapters = generate_text(prompt, max_length=2000)
|
43 |
return global_chapters
|
44 |
|
|
|
51 |
return f"Select a number between 1 and {len(chapters)}."
|
52 |
prompt = f'''Knowing this synopsis for a book: {global_synopsis}. Expand and describe Chapter {chapter_number}
|
53 |
in more detail, the title and current brief description of this chapter is: {chapters[chapter_number - 1]}'''
|
54 |
+
return generate_text(prompt, max_length=500)
|
55 |
|
56 |
# Gradio interface
|
57 |
with gr.Blocks() as demo:
|