File size: 543 Bytes
2b354eb
ce79099
2b354eb
cbdb918
2b354eb
 
 
9d5b613
 
 
2b354eb
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
from tokenizers import models, trainers, Tokenizer

tokenizer = Tokenizer(model=models.WordPiece(unk_token="[UNK]"))

special_tokens = ["[UNK]", "[PAD]", "[CLS]", "[SEP]", "[MASK]"]
trainer = trainers.WordPieceTrainer(vocab_size=25000, special_tokens=special_tokens)

tokenizer.train(["https://datasets-server.huggingface.co/rows?dataset=wikimedia%2Fwikipedia&config=20231101.en&split=train&offset=0&length=100"],
    trainer=trainer)


encoding = tokenizer.encode("Let's test this tokenizer...", "on a pair of sentences.")
print(encoding.ids)