Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
@@ -1,42 +1,55 @@
|
|
1 |
import tensorflow as tf
|
2 |
from tensorflow import keras
|
|
|
|
|
|
|
3 |
import gradio as gr
|
4 |
from gradio import mix
|
5 |
import numpy as np
|
6 |
import torch
|
7 |
from keras.utils.data_utils import pad_sequences
|
8 |
-
|
9 |
|
10 |
from huggingface_hub import from_pretrained_keras
|
11 |
|
12 |
model = from_pretrained_keras("keras-io/text-generation-miniature-gpt")
|
13 |
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
def generate_answers(
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
|
37 |
examples = [["The movie was nice, "], ["It was showing nothing special to "]]
|
38 |
title = "Text Generation with Miniature GPT"
|
39 |
description = "Gradio Demo for a miniature with GPT. To use it, simply add your text, or click one of the examples to load them. Read more at the links below."
|
40 |
|
41 |
iface = gr.Interface(fn=generate_answers, title = title, description=description, inputs=['text'], outputs=["text"], examples=examples)
|
42 |
-
iface.launch()
|
|
|
1 |
import tensorflow as tf
|
2 |
from tensorflow import keras
|
3 |
+
from tensorflow.keras.preprocessing.text import Tokenizer
|
4 |
+
from tensorflow.keras.preprocessing.sequence import pad_sequences
|
5 |
+
|
6 |
import gradio as gr
|
7 |
from gradio import mix
|
8 |
import numpy as np
|
9 |
import torch
|
10 |
from keras.utils.data_utils import pad_sequences
|
11 |
+
|
12 |
|
13 |
from huggingface_hub import from_pretrained_keras
|
14 |
|
15 |
model = from_pretrained_keras("keras-io/text-generation-miniature-gpt")
|
16 |
|
17 |
+
def text_process_pipeline(text): #pipeline
|
18 |
+
tokenizer = Tokenizer(num_words=80, split=' ')
|
19 |
+
tokenizer.fit_on_texts(word_to_index.values())
|
20 |
+
processed_text = tokenizer.texts_to_sequences(start_prompt)
|
21 |
+
processed_text = pad_sequences(processed_text, maxlen=80, padding='post')
|
22 |
+
return processed_text
|
23 |
+
|
24 |
+
def sample_from(logits):
|
25 |
+
l, i = tf.math.top_k(logits, k=10, sorted=True)
|
26 |
+
indices = np.asarray(i).astype("int32")
|
27 |
+
preds = keras.activations.softmax(tf.expand_dims(l, 0))[0]
|
28 |
+
preds = np.asarray(preds).astype("float32")
|
29 |
+
return np.random.choice(i, p=preds)
|
30 |
+
|
31 |
+
def generate_answers(start_prompt):
|
32 |
+
num_tokens_generated = 0
|
33 |
+
sample_index = len(start_prompt) - 1
|
34 |
+
start_tokens = [word_to_index.get(_, 1) for _ in start_prompt]
|
35 |
+
tokens_generated= []
|
36 |
+
|
37 |
+
text_out = text_process_pipeline(start_prompt)
|
38 |
+
predictions,_ = mode.predict(text_out)
|
39 |
+
results = np.argmax(predictions, axis=1)[0]
|
40 |
+
|
41 |
+
while num_tokens_generated <= 40:
|
42 |
+
sample_token = sample_from(predictions[0][sample_index])
|
43 |
+
tokens_generated.append(sample_token)
|
44 |
+
start_tokens.append(sample_token)
|
45 |
+
num_tokens_generated = len(tokens_generated)
|
46 |
+
|
47 |
+
text_out = tokenizer.sequences_to_texts([tokens_generated])
|
48 |
+
return text_out[0]
|
49 |
|
50 |
examples = [["The movie was nice, "], ["It was showing nothing special to "]]
|
51 |
title = "Text Generation with Miniature GPT"
|
52 |
description = "Gradio Demo for a miniature with GPT. To use it, simply add your text, or click one of the examples to load them. Read more at the links below."
|
53 |
|
54 |
iface = gr.Interface(fn=generate_answers, title = title, description=description, inputs=['text'], outputs=["text"], examples=examples)
|
55 |
+
iface.launch(debug=True)
|