File size: 750 Bytes
422fadf
 
 
e8a902a
 
 
 
 
422fadf
e8a902a
f003591
422fadf
f003591
 
da90dd3
e48d20b
8b7fa33
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
import streamlit as st
from tokenizers.tools import EncodingVisualizer
from transformers import AutoTokenizer

@st.cache(allow_output_mutation=True)
def load_tokenizer(model_ckpt):
    return AutoTokenizer.from_pretrained(model_ckpt, use_auth_token=st.secrets["AUTH_TOKEN"])

st.set_page_config(page_title="BigScience Tokenizer", page_icon='👩‍💻', layout="wide")
tokenizer = load_tokenizer('bigscience/tokenizer')
visualizer = EncodingVisualizer(tokenizer=tokenizer._tokenizer, default_to_notebook=False)
text = st.text_area(label="", placeholder="Text to tokenize")
button_clicked = st.button("Tokenize")
if text or button_clicked:
    st.write(len(tokenizer.tokenize(text)))
    st.components.v1.html(visualizer(text), height=1500)