Update app.py
Browse files
app.py
CHANGED
@@ -1,95 +1,52 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
|
3 |
import gradio as gr
|
4 |
import requests
|
5 |
-
import
|
6 |
-
import
|
7 |
|
8 |
-
##Bloom
|
9 |
-
API_URL = "https://api-inference.huggingface.co/models/bigscience/bloom-350m"
|
10 |
-
# HF_TOKEN = os.environ["HF_TOKEN"]
|
11 |
-
# headers = {"Authorization": f"Bearer {HF_TOKEN}"}
|
12 |
|
13 |
-
|
|
|
|
|
|
|
14 |
|
15 |
-
|
16 |
-
if len(prompt) == 0:
|
17 |
-
prompt = input_prompt
|
18 |
-
|
19 |
-
json_ = {
|
20 |
-
"inputs": prompt,
|
21 |
-
"parameters": {
|
22 |
-
"top_p": 0.9,
|
23 |
-
"temperature": 1.1,
|
24 |
-
"max_new_tokens": 250,
|
25 |
-
"return_full_text": False,
|
26 |
-
"do_sample": False,
|
27 |
-
"seed": seed,
|
28 |
-
"early_stopping": False,
|
29 |
-
"length_penalty": 0.0,
|
30 |
-
"eos_token_id": None,
|
31 |
-
},
|
32 |
-
"options": {
|
33 |
-
"use_cache": True,
|
34 |
-
"wait_for_model": True,
|
35 |
-
},
|
36 |
-
}
|
37 |
-
response = requests.request("POST", API_URL, json=json_) # headers=headers
|
38 |
-
# output = response.json()
|
39 |
-
output = json.loads(response.content.decode("utf-8"))
|
40 |
-
output_tmp = output[0]['generated_text']
|
41 |
-
solution = output_tmp.split(f"\n{to_lang}:")[0]
|
42 |
-
|
43 |
|
44 |
-
if '\n\n' in solution:
|
45 |
-
final_solution = solution.split("\n\n")[0]
|
46 |
-
else:
|
47 |
-
final_solution = solution
|
48 |
-
return final_solution
|
49 |
|
50 |
-
|
|
|
51 |
|
52 |
-
|
53 |
-
gr.Markdown("<h1><center>Translate with Bloom</center></h1>")
|
54 |
-
gr.Markdown('''
|
55 |
-
## Model Details
|
56 |
-
BLOOM is an autoregressive Large Language Model (LLM), trained to continue text
|
57 |
-
from a prompt on vast amounts of text data using industrial-scale computational
|
58 |
-
resources. As such, it is able to output coherent text in 46 languages and 13
|
59 |
-
programming languages that is hardly distinguishable from text written by humans.
|
60 |
-
BLOOM can also be instructed to perform text tasks it hasn't been explicitly trained
|
61 |
-
for, by casting them as text generation tasks.
|
62 |
|
63 |
-
|
64 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
|
66 |
-
|
67 |
-
At the moment this space has only capacity to translate between English, Spanish and Hindi languages.
|
68 |
-
from languange is the languge you put in text box and to langauge is to what language you are intended to translate.
|
69 |
-
Select from language from the drop down.
|
70 |
-
Select to language from the drop down.
|
71 |
|
72 |
-
|
73 |
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
with gr.Row():
|
78 |
-
|
79 |
-
|
80 |
-
label='select From language : ')
|
81 |
-
to_lang = gr.Dropdown(['English', 'Spanish', 'Hindi'],
|
82 |
-
value='Hindi',
|
83 |
-
label= 'select to Language : ')
|
84 |
|
85 |
-
|
86 |
-
value=f"Instruction: ... \ninput: \"from sentence\" \n{to_lang} :",
|
87 |
-
lines=6)
|
88 |
|
89 |
-
|
90 |
|
91 |
-
|
92 |
-
|
93 |
|
94 |
demo.launch(enable_queue=True, debug=True)
|
95 |
-
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
import requests
|
3 |
+
import json
|
4 |
+
import os
|
5 |
|
|
|
|
|
|
|
|
|
6 |
|
7 |
+
LANGUAGES = ['Akan', 'Arabic', ' Assamese', 'Bambara', 'Bengali', 'Catalan', 'English', 'Spanish', ' Basque', 'French', ' Gujarati', 'Hindi',
|
8 |
+
'Indonesian', 'Igbo', 'Kikuyu', 'Kannada', 'Ganda', 'Lingala', 'Malayalam', 'Marathi', 'Nepali', 'Chichewa', 'Oriya', 'Panjabi', 'Portuguese',
|
9 |
+
'Kirundi', 'Kinyarwanda', 'Shona', 'Sotho', 'Swahili', 'Tamil', 'Telugu', 'Tswana', 'Tsonga', 'Twi', 'Urdu', 'Viêt Namese', 'Wolof', 'Xhosa',
|
10 |
+
'Yoruba', 'Chinese', 'Zulu']
|
11 |
|
12 |
+
API_URL = "https://api-inference.huggingface.co/models/bigscience/bloom"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
+
def translate(input, output, text):
|
16 |
+
"""Translate text from input language to output language"""
|
17 |
|
18 |
+
instruction = f"""Translation in {input.lower()}: {text}\nTranslation in {output.lower()}: """
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
+
json_ = {
|
21 |
+
"inputs": instruction,
|
22 |
+
"parameters": {
|
23 |
+
"return_full_text": True,
|
24 |
+
"do_sample": False,
|
25 |
+
},
|
26 |
+
"options": {
|
27 |
+
"use_cache": True,
|
28 |
+
"wait_for_model": True,
|
29 |
+
},
|
30 |
+
}
|
31 |
+
response = requests.request("POST", API_URL, json=json_)
|
32 |
+
output = response.json()[0]['generated_text']
|
33 |
|
34 |
+
return output.replace(instruction, '', 1)[1:]
|
|
|
|
|
|
|
|
|
35 |
|
36 |
+
demo = gr.Blocks()
|
37 |
|
38 |
+
with demo:
|
39 |
+
gr.Markdown("<h1><center>Translate with Bloom</center></h1>")
|
40 |
+
|
41 |
with gr.Row():
|
42 |
+
input_lang = gr.Dropdown(LANGUAGES, value='English', label='Select input language')
|
43 |
+
output_lang = gr.Dropdown(LANGUAGES, value='French', label='Select output language')
|
|
|
|
|
|
|
|
|
44 |
|
45 |
+
input_text = gr.Textbox(label="Input", lines=6)
|
|
|
|
|
46 |
|
47 |
+
output_text = gr.Textbox(lines=6, label="Output")
|
48 |
|
49 |
+
buton = gr.Button("translate")
|
50 |
+
buton.click(translate, inputs=[input_lang, output_lang, input_text], outputs=output_text)
|
51 |
|
52 |
demo.launch(enable_queue=True, debug=True)
|
|