LuisMBA commited on
Commit
fe1ee5b
·
verified ·
1 Parent(s): 8bf4c29

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -27
app.py CHANGED
@@ -3,23 +3,28 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
4
 
5
  # Model setup
6
- #model_name = "EleutherAI/gpt-neo-125M" # Lightweight model for Spaces
7
- model_name = "EleutherAI/gpt-neo-1.3B" # A bit better model
8
  tokenizer = AutoTokenizer.from_pretrained(model_name)
9
  model = AutoModelForCausalLM.from_pretrained(model_name).to("cpu") # CPU-friendly for Spaces
10
 
11
  # Text generation function
12
  def generate_text(prompt, max_length=500, temperature=0.7, top_p=0.9):
13
- inputs = tokenizer(prompt, return_tensors="pt")
14
- outputs = model.generate(
15
- **inputs,
16
- max_length=max_length,
17
- temperature=temperature,
18
- top_p=top_p,
19
- do_sample=True,
20
- pad_token_id=tokenizer.eos_token_id
21
- )
22
- return tokenizer.decode(outputs[0], skip_special_tokens=True)
 
 
 
 
 
 
23
 
24
  # Global variables to store hierarchical content
25
  global_synopsis = ""
@@ -28,30 +33,39 @@ global_chapters = ""
28
  # Generation functions
29
  def generate_synopsis(topic):
30
  global global_synopsis
31
- prompt = f"Write a brief synopsis for a story about {topic}: . The max number of characters must be 500"
32
- global_synopsis = generate_text(prompt, max_length=500)
33
- return global_synopsis
 
 
 
34
 
35
  def generate_chapters():
36
  global global_synopsis, global_chapters
37
  if not global_synopsis:
38
  return "Please generate a synopsis first."
39
- prompt = f'''Based on this synopsis for a book: {global_synopsis}. Divide the story into 4 chapters with brief descriptions for each.
40
- Enumerate every chapter created followed by its description and make the first chapter sound like an introduction and the last may sound as the epilogue.
41
- The max number of character for every pair title, description must be 500'''
42
- global_chapters = generate_text(prompt, max_length=2000)
43
- return global_chapters
 
 
 
44
 
45
  def expand_chapter(chapter_number):
46
  global global_chapters
47
  if not global_chapters:
48
  return "Please generate chapters first."
49
- chapters = global_chapters.split("\n")
50
- if chapter_number <= 0 or chapter_number > len(chapters):
51
- return f"Select a number between 1 and {len(chapters)}."
52
- prompt = f'''Knowing this synopsis for a book: {global_synopsis}. Expand and describe Chapter {chapter_number}
53
- in more detail, the title and current brief description of this chapter is: {chapters[chapter_number - 1]}'''
54
- return generate_text(prompt, max_length=500)
 
 
 
55
 
56
  # Gradio interface
57
  with gr.Blocks() as demo:
@@ -77,4 +91,4 @@ with gr.Blocks() as demo:
77
  chapter_button.click(expand_chapter, inputs=chapter_input, outputs=chapter_detail_output)
78
 
79
  # Launch the app
80
- demo.launch()
 
3
  import torch
4
 
5
  # Model setup
6
+ model_name = "EleutherAI/gpt-neo-1.3B"
 
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
  model = AutoModelForCausalLM.from_pretrained(model_name).to("cpu") # CPU-friendly for Spaces
9
 
10
  # Text generation function
11
  def generate_text(prompt, max_length=500, temperature=0.7, top_p=0.9):
12
+ try:
13
+ # Calculate max tokens for the input to fit within model limits
14
+ max_input_length = 2048 - max_length # Total context limit minus the max length of output
15
+ inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=max_input_length)
16
+
17
+ outputs = model.generate(
18
+ **inputs,
19
+ max_length=max_length,
20
+ temperature=temperature,
21
+ top_p=top_p,
22
+ do_sample=True,
23
+ pad_token_id=tokenizer.eos_token_id
24
+ )
25
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
26
+ except Exception as e:
27
+ return f"Error during generation: {str(e)}"
28
 
29
  # Global variables to store hierarchical content
30
  global_synopsis = ""
 
33
  # Generation functions
34
  def generate_synopsis(topic):
35
  global global_synopsis
36
+ try:
37
+ prompt = f"Write a brief synopsis for a story about {topic}. The max number of characters must be 500."
38
+ global_synopsis = generate_text(prompt, max_length=500)
39
+ return global_synopsis
40
+ except Exception as e:
41
+ return f"Error generating synopsis: {str(e)}"
42
 
43
  def generate_chapters():
44
  global global_synopsis, global_chapters
45
  if not global_synopsis:
46
  return "Please generate a synopsis first."
47
+ try:
48
+ prompt = f'''Based on this synopsis for a book: {global_synopsis}. Divide the story into 4 chapters with brief descriptions for each.
49
+ Enumerate every chapter created followed by its description and make the first chapter sound like an introduction and the last may sound as the epilogue.
50
+ The max number of characters for every pair title, description must be 500.'''
51
+ global_chapters = generate_text(prompt, max_length=2000)
52
+ return global_chapters
53
+ except Exception as e:
54
+ return f"Error generating chapters: {str(e)}"
55
 
56
  def expand_chapter(chapter_number):
57
  global global_chapters
58
  if not global_chapters:
59
  return "Please generate chapters first."
60
+ try:
61
+ chapters = global_chapters.split("\n")
62
+ if chapter_number <= 0 or chapter_number > len(chapters):
63
+ return f"Select a number between 1 and {len(chapters)}."
64
+ prompt = f'''Knowing this synopsis for a book: {global_synopsis}. Expand and describe Chapter {chapter_number}
65
+ in more detail, the title and current brief description of this chapter is: {chapters[chapter_number - 1]}'''
66
+ return generate_text(prompt, max_length=500)
67
+ except Exception as e:
68
+ return f"Error expanding chapter: {str(e)}"
69
 
70
  # Gradio interface
71
  with gr.Blocks() as demo:
 
91
  chapter_button.click(expand_chapter, inputs=chapter_input, outputs=chapter_detail_output)
92
 
93
  # Launch the app
94
+ demo.launch()