awacke1 commited on
Commit
a03361e
·
1 Parent(s): c64a874

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +92 -9
app.py CHANGED
@@ -3,12 +3,96 @@ import requests
3
  import os
4
 
5
  ##Bloom Inference API
6
- API_URL = "https://api-inference.huggingface.co/models/bigscience/bloom"
7
- HF_TOKEN = os.environ["HF_TOKEN"]
 
 
 
 
8
  headers = {"Authorization": f"Bearer {HF_TOKEN}"}
9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
- def text_generate(prompt, generated_txt):
 
 
 
 
 
 
 
 
12
  #Prints to debug the code
13
  print(f"*****Inside text_generate - Prompt is :{prompt}")
14
  json_ = {"inputs": prompt,
@@ -71,18 +155,17 @@ def text_generate(prompt, generated_txt):
71
  demo = gr.Blocks()
72
 
73
  with demo:
74
- gr.Markdown("<h1><center>Write Stories Using Bloom</center></h1>")
75
- gr.Markdown(
76
- """Bloom is a model by [HuggingFace](https://huggingface.co/bigscience/bloom) and a team of more than 1000 researchers coming together as [BigScienceW Bloom](https://twitter.com/BigscienceW).\n\nLarge language models have demonstrated a capability of producing coherent sentences and given a context we can pretty much decide the *theme* of generated text.\n\nHow to Use this App: Use the sample text given as prompt or type in a new prompt as a starting point of your awesome story! Just keep pressing the 'Generate Text' Button and go crazy!\n\nHow this App works: This app operates by feeding back the text generated by Bloom to itself as a Prompt for next generation round and so on. Currently, due to size-limits on Prompt and Token generation, we are only able to feed very limited-length text as Prompt and are getting very few tokens generated in-turn. This makes it difficult to keep a tab on theme of text generation, so please bear with that. In summary, I believe it is a nice little fun App which you can play with for a while.\n\nThis Space is created by [Yuvraj Sharma](https://twitter.com/yvrjsharma) for EuroPython 2022 Demo."""
77
- )
78
  with gr.Row():
79
  input_prompt = gr.Textbox(label="Write some text to get started...", lines=3, value="Dear human philosophers, I read your comments on my abilities and limitations with great interest.")
80
 
81
  with gr.Row():
82
- generated_txt = gr.Textbox(lines=7, visible = True)
 
 
 
83
 
84
  b1 = gr.Button("Generate Your Story")
85
 
86
- b1.click(text_generate, inputs=[input_prompt, generated_txt], outputs=[generated_txt, input_prompt])
87
 
88
  demo.launch(enable_queue=True, debug=True)
 
3
  import os
4
 
5
  ##Bloom Inference API
6
+
7
+ API_URL = "https://api-inference.huggingface.co/models/bigscience/bloom" # Models on HF feature inference API which allows direct call and easy interface
8
+
9
+ HF_TOKEN = os.environ["HF_TOKEN"] # Add a token called HF_TOKEN under profile in settings access tokens. Then copy it to the repository secret in this spaces settings panel. os.environ reads from there.
10
+
11
+ # For headers the bearer token needs to incclude your HF_TOKEN value.
12
  headers = {"Authorization": f"Bearer {HF_TOKEN}"}
13
 
14
+ # Improved text generation function
15
+ def text_generate(prompt, generated_txt):
16
+ # Initialize Thoughts variable to aggregate text
17
+ Thoughts = ""
18
+
19
+ # Debug: display the prompt
20
+ Thoughts += f"*****Inside text_generate - Prompt is: {prompt}\n"
21
+
22
+ json_ = {
23
+ "inputs": prompt,
24
+ "parameters": {
25
+ "top_p": 0.9,
26
+ "temperature": 1.1,
27
+ "return_full_text": True,
28
+ "do_sample": True,
29
+ },
30
+ "options": {
31
+ "use_cache": True,
32
+ "wait_for_model": True,
33
+ },
34
+ }
35
+ response = requests.post(API_URL, headers=headers, json=json_)
36
+
37
+ # Debug: display the response
38
+ Thoughts += f"Response is: {response}\n"
39
+ output = response.json()
40
+
41
+ # Debug: display the output
42
+ Thoughts += f"output is: {output}\n"
43
+ output_tmp = output[0]['generated_text']
44
+
45
+ # Debug: display the output_tmp
46
+ Thoughts += f"output_tmp is: {output_tmp}\n"
47
+ solution = output_tmp.split("\nQ:")[0]
48
+
49
+ # Debug: display the solution after splitting
50
+ Thoughts += f"Final response after splits is: {solution}\n"
51
+
52
+ if '\nOutput:' in solution:
53
+ final_solution = solution.split("\nOutput:")[0]
54
+ Thoughts += f"Response after removing output is: {final_solution}\n"
55
+ elif '\n\n' in solution:
56
+ final_solution = solution.split("\n\n")[0]
57
+ Thoughts += f"Response after removing new line entries is: {final_solution}\n"
58
+ else:
59
+ final_solution = solution
60
+
61
+ if len(generated_txt) == 0:
62
+ display_output = final_solution
63
+ else:
64
+ display_output = generated_txt[:-len(prompt)] + final_solution
65
+
66
+ new_prompt = final_solution[len(prompt):]
67
+
68
+ # Debug: display the new prompt for the next cycle
69
+ Thoughts += f"new prompt for next cycle is: {new_prompt}\n"
70
+ Thoughts += f"display_output for printing on screen is: {display_output}\n"
71
+
72
+ if len(new_prompt) == 0:
73
+ temp_text = display_output[::-1]
74
+ Thoughts += f"What is the last character of the sentence?: {temp_text[0]}\n"
75
+
76
+ if temp_text[1] == '.':
77
+ first_period_loc = temp_text[2:].find('.') + 1
78
+ Thoughts += f"Location of last Period is: {first_period_loc}\n"
79
+ new_prompt = display_output[-first_period_loc:-1]
80
+ Thoughts += f"Not sending blank as prompt so new prompt for next cycle is: {new_prompt}\n"
81
+ else:
82
+ first_period_loc = temp_text.find('.')
83
+ Thoughts += f"Location of last Period is: {first_period_loc}\n"
84
+ new_prompt = display_output[-first_period_loc:-1]
85
+ Thoughts += f"Not sending blank as prompt so new prompt for next cycle is: {new_prompt}\n"
86
 
87
+ display_output = display_output[:-1]
88
+
89
+ return display_output, new_prompt, Thoughts
90
+
91
+
92
+
93
+
94
+ # Text generation
95
+ def text_generate_old(prompt, generated_txt):
96
  #Prints to debug the code
97
  print(f"*****Inside text_generate - Prompt is :{prompt}")
98
  json_ = {"inputs": prompt,
 
155
  demo = gr.Blocks()
156
 
157
  with demo:
 
 
 
 
158
  with gr.Row():
159
  input_prompt = gr.Textbox(label="Write some text to get started...", lines=3, value="Dear human philosophers, I read your comments on my abilities and limitations with great interest.")
160
 
161
  with gr.Row():
162
+ generated_txt = gr.Textbox(lines=4, visible = True)
163
+
164
+ with gr.Row():
165
+ Thoughts = gr.Textbox(lines=4, visible = True)
166
 
167
  b1 = gr.Button("Generate Your Story")
168
 
169
+ b1.click(text_generate, inputs=[input_prompt, generated_txt], outputs=[generated_txt, input_prompt, Thoughts])
170
 
171
  demo.launch(enable_queue=True, debug=True)