Spaces:
Runtime error
Runtime error
File size: 8,908 Bytes
d625a73 8c4b92d 90009ee 9b8bd50 43ac953 459027d 43ac953 8c4b92d 43ac953 6cb7f39 d625a73 43ac953 d625a73 43ac953 c25d79d d625a73 43ac953 d625a73 43ac953 6cb7f39 43ac953 d625a73 43ac953 6cb7f39 43ac953 6cb7f39 43ac953 8c4b92d 43ac953 459027d 43ac953 459027d 43ac953 6cb7f39 43ac953 bf32265 90009ee bf32265 05b4410 bf32265 05b4410 e33c2d8 04e7329 e33c2d8 bf32265 05b4410 bf32265 05b4410 bf32265 05b4410 43ac953 05b4410 bf32265 05b4410 43ac953 05b4410 43ac953 bf32265 90009ee bf32265 90009ee |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 |
import gradio as gr
import spacy # noqa
from transformers import pipeline
# import os
# os.environ['KMP_DUPLICATE_LIB_OK']='True'
# import spacy
# Change this according to what words should be corrected to
SPELL_CORRECT_MIN_CHAR_DIFF = 2
TOKENS2INT_ERROR_INT = 32202
ONES = [
"zero", "one", "two", "three", "four", "five", "six", "seven", "eight",
"nine", "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen",
"sixteen", "seventeen", "eighteen", "nineteen",
]
CHAR_MAPPING = {
"-": " ",
"_": " ",
"and": " ",
}
# CHAR_MAPPING.update((str(i), word) for i, word in enumerate([" " + s + " " for s in ONES]))
TOKEN_MAPPING = {
"and": " ",
"oh": "0",
}
def find_char_diff(a, b):
# Finds the character difference between two str objects by counting the occurences of every character. Not edit distance.
char_counts_a = {}
char_counts_b = {}
for char in a:
if char in char_counts_a.keys():
char_counts_a[char] += 1
else:
char_counts_a[char] = 1
for char in b:
if char in char_counts_b.keys():
char_counts_b[char] += 1
else:
char_counts_b[char] = 1
char_diff = 0
for i in char_counts_a:
if i in char_counts_b.keys():
char_diff += abs(char_counts_a[i] - char_counts_b[i])
else:
char_diff += char_counts_a[i]
return char_diff
def tokenize(text):
text = text.lower()
# print(text)
text = replace_tokens(''.join(i for i in replace_chars(text)).split())
# print(text)
text = [i for i in text if i != ' ']
# print(text)
output = []
for word in text:
# print(word)
output.append(convert_word_to_int(word))
output = [i for i in output if i != ' ']
# print(output)
return output
def detokenize(tokens):
return ' '.join(tokens)
def replace_tokens(tokens, token_mapping=TOKEN_MAPPING):
return [token_mapping.get(tok, tok) for tok in tokens]
def replace_chars(text, char_mapping=CHAR_MAPPING):
return [char_mapping.get(c, c) for c in text]
def convert_word_to_int(in_word, numwords={}):
# Converts a single word/str into a single int
tens = ["", "", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"]
scales = ["hundred", "thousand", "million", "billion", "trillion"]
if not numwords:
for idx, word in enumerate(ONES):
numwords[word] = idx
for idx, word in enumerate(tens):
numwords[word] = idx * 10
for idx, word in enumerate(scales):
numwords[word] = 10 ** (idx * 3 or 2)
if in_word in numwords:
# print(in_word)
# print(numwords[in_word])
return numwords[in_word]
try:
int(in_word)
return int(in_word)
except ValueError:
pass
# Spell correction using find_char_diff
char_diffs = [find_char_diff(in_word, i) for i in ONES + tens + scales]
min_char_diff = min(char_diffs)
if min_char_diff <= SPELL_CORRECT_MIN_CHAR_DIFF:
return char_diffs.index(min_char_diff)
def tokens2int(tokens):
# Takes a list of tokens and returns a int representation of them
types = []
for i in tokens:
if i <= 9:
types.append(1)
elif i <= 90:
types.append(2)
else:
types.append(3)
# print(tokens)
if len(tokens) <= 3:
current = 0
for i, number in enumerate(tokens):
if i != 0 and types[i] < types[i - 1] and current != tokens[i - 1] and types[i - 1] != 3:
current += tokens[i] + tokens[i - 1]
elif current <= tokens[i] and current != 0:
current *= tokens[i]
elif 3 not in types and 1 not in types:
current = int(''.join(str(i) for i in tokens))
break
elif '111' in ''.join(str(i) for i in types) and 2 not in types and 3 not in types:
current = int(''.join(str(i) for i in tokens))
break
else:
current += number
elif 3 not in types and 2 not in types:
current = int(''.join(str(i) for i in tokens))
else:
"""
double_list = []
current_double = []
double_type_list = []
for i in tokens:
if len(current_double) < 2:
current_double.append(i)
else:
double_list.append(current_double)
current_double = []
current_double = []
for i in types:
if len(current_double) < 2:
current_double.append(i)
else:
double_type_list.append(current_double)
current_double = []
print(double_type_list)
print(double_list)
current = 0
for i, type_double in enumerate(double_type_list):
if len(type_double) == 1:
current += double_list[i][0]
elif type_double[0] == type_double[1]:
current += int(str(double_list[i][0]) + str(double_list[i][1]))
elif type_double[0] > type_double[1]:
current += sum(double_list[i])
elif type_double[0] < type_double[1]:
current += double_list[i][0] * double_list[i][1]
#print(current)
"""
count = 0
current = 0
for i, token in enumerate(tokens):
count += 1
if count == 2:
if types[i - 1] == types[i]:
current += int(str(token) + str(tokens[i - 1]))
elif types[i - 1] > types[i]:
current += tokens[i - 1] + token
else:
current += tokens[i - 1] * token
count = 0
elif i == len(tokens) - 1:
current += token
return current
def text2int(text):
# Wraps all of the functions up into one
return tokens2int(tokenize(text))
sentiment = pipeline(task="sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english")
def get_sentiment(text):
return sentiment(text)
with gr.Blocks() as html_block:
gr.Markdown("# Rori - Mathbot")
with gr.Tab("Text to integer"):
inputs_text2int = [gr.Text(
placeholder="Type a number as text or a sentence",
label="Text to process",
value="forty two")]
outputs_text2int = gr.Textbox(label="Output integer")
button_text2int = gr.Button("text2int")
button_text2int.click(
fn=text2int,
inputs=inputs_text2int,
outputs=outputs_text2int,
api_name="text2int",
)
examples_text2int = [
"one thousand forty seven",
"one hundred",
]
gr.Examples(examples=examples_text2int, inputs=inputs_text2int)
gr.Markdown(r"""
## API
```python
import requests
requests.post(
url="https://tangibleai-mathtext.hf.space/run/text2int", json={"data": ["one hundred forty five"]}
).json()
```
Or using `curl`:
```bash
curl -X POST https://tangibleai-mathtext.hf.space/run/text2int -H 'Content-Type: application/json' -d '{"data": ["one hundred forty five"]}'
```
""")
with gr.Tab("Sentiment Analysis"):
inputs_sentiment = [
gr.Text(placeholder="Type a number as text or a sentence", label="Text to process",
value="I really like it!"),
]
outputs_sentiment = gr.Textbox(label="Sentiment result")
button_sentiment = gr.Button("sentiment analysis")
button_sentiment.click(
get_sentiment,
inputs=inputs_sentiment,
outputs=outputs_sentiment,
api_name="sentiment-analysis"
)
examples_sentiment = [
["Totally agree!"],
["Sorry, I can not accept this!"],
]
gr.Examples(examples=examples_sentiment, inputs=inputs_sentiment)
gr.Markdown(r"""
## API
```python
import requests
requests.post(
url="https://tangibleai-mathtext.hf.space/run/sentiment-analysis", json={"data": ["You are right!"]}
).json()
```
Or using `curl`:
```bash
curl -X POST https://tangibleai-mathtext.hf.space/run/sentiment-analysis -H 'Content-Type: application/json' -d '{"data": ["You are right!"]}'
```
""")
# interface = gr.Interface(lambda x: x, inputs=["text"], outputs=["text"])
# html_block.input_components = interface.input_components
# html_block.output_components = interface.output_components
# html_block.examples = None
html_block.predict_durations = []
html_block.launch()
|