File size: 4,982 Bytes
9554084
 
 
 
 
 
 
 
7382912
 
 
9554084
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
850836c
 
28c4823
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
import inspect
import json
import logging
import os
import gradio as gr
from gradio import routes
import spacy  # noqa
from typing import List, Type
from dotenv import load_dotenv

load_dotenv()

TOKENS2INT_ERROR_INT = 32202

log = logging.getLogger()

ONES = [
    "zero", "one", "two", "three", "four", "five", "six", "seven", "eight",
    "nine", "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen",
    "sixteen", "seventeen", "eighteen", "nineteen",
]

# token_mapping = json.load(open('str_mapping.json'))
CHAR_MAPPING = {
    "-": " ",
    "_": " ",
}
CHAR_MAPPING.update((str(i), word) for i, word in enumerate([" " + s + " " for s in ONES]))

TOKEN_MAPPING = dict(enumerate([" " + s + " " for s in ONES]))

BQ_JSON = os.environ['BQ_JSON']


def tokenize(text):
    return text.split()


def detokenize(tokens):
    return ' '.join(tokens)


def replace_tokens(tokens, token_mapping=TOKEN_MAPPING):
    return [token_mapping.get(tok, tok) for tok in tokens]


def replace_chars(text, char_mapping=CHAR_MAPPING):
    return ''.join((char_mapping.get(c, c) for c in text))


def tokens2int(tokens, numwords={}):
    """ Convert an English str containing number words into an int
    >>> text2int("nine")
    9
    >>> text2int("forty two")
    42
    >>> text2int("1 2 three")
    123
    """
    if not numwords:

        tens = ["", "", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"]

        scales = ["hundred", "thousand", "million", "billion", "trillion"]

        numwords["and"] = (1, 0)
        for idx, word in enumerate(ONES):
            numwords[word] = (1, idx)
        for idx, word in enumerate(tens):
            numwords[word] = (1, idx * 10)
        for idx, word in enumerate(scales):
            numwords[word] = (10 ** (idx * 3 or 2), 0)

    current = result = 0

    for word in tokens:
        if word not in numwords:
            raise Exception("Illegal word: " + word)

        scale, increment = numwords[word]
        current = current * scale + increment
        if scale > 100:
            result += current
            current = 0

    return str(result + current)


def text2int(text):
    return tokens2int(tokenize(replace_chars(text)))


def try_text2int(text):
    text = str(text)
    try:
        intstr = tokens2int(tokens2int(tokenize(replace_chars(text))))
    except Exception as e:
        log.error(str(e))
        log.error(f'User input: {text}')
        intstr = TOKENS2INT_ERROR_INT
    return str(intstr)


def try_text2int_preprocessed(text):
    text = str(text)
    try:
        tokens = replace_tokens(tokenize(replace_chars(str(text))))
    except Exception as e:
        log.error(str(e))
        tokens = text.split()
    try:
        intstr = tokens2int(tokens)
    except Exception as e:
        log.error(str(e))
        intstr = str(TOKENS2INT_ERROR_INT)
    return intstr


def get_types(cls_set: List[Type], component: str):
    docset = []
    types = []
    if component == "input":
        for cls in cls_set:
            doc = inspect.getdoc(cls)
            doc_lines = doc.split("\n")
            docset.append(doc_lines[1].split(":")[-1])
            types.append(doc_lines[1].split(")")[0].split("(")[-1])
    else:
        for cls in cls_set:
            doc = inspect.getdoc(cls)
            doc_lines = doc.split("\n")
            docset.append(doc_lines[-1].split(":")[-1])
            types.append(doc_lines[-1].split(")")[0].split("(")[-1])
    return docset, types


routes.get_types = get_types

with gr.Blocks() as html_block:
    gr.Markdown("# Gradio Blocks (3.0) with REST API")
    textbox_input = gr.Textbox(
        value="forty-two",
        label="Input number words:",
    )
    button_text2int = gr.Button("text2int")
    button_text2int_preprocessed = gr.Button("text2int with preprocessing")
    textbox_output = gr.Textbox(
        value="42",
        label="Output integer:"
    )
    button_text2int.click(try_text2int, inputs=[textbox_input], outputs=[textbox_output], api_name="text2int")
    button_text2int_preprocessed.click(try_text2int_preprocessed, inputs=[textbox_input], outputs=[textbox_output], api_name="text2int_preprocessed")
    gr.Markdown(r"""
## API
You can select which function to run using the `fn_index` argument:
```python
import requests
requests.post(
    url="https://cetinca-mathtext-nlu.hf.space/run/predict/run/predict", json={"data": ["one hundred forty-two"], "fn_index": 0}
).json()
```
Or using `curl`:
```bash
curl -X POST https://cetinca-mathtext-nlu.hf.space/run/predict/ -H 'Content-Type: application/json' -d '{"data": ["one hundred forty-two"], "fn_index": 0}'
```
""" + f"{json.loads(BQ_JSON)['type']}")

interface = gr.Interface(lambda: None, inputs=[textbox_input], outputs=[textbox_output])

html_block.input_components = interface.input_components
html_block.output_components = interface.output_components
html_block.examples = None
html_block.predict_durations = []

bapp = html_block.launch()