Itsme5 commited on
Commit
2b485f2
·
verified ·
1 Parent(s): b98ac79

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -5,7 +5,8 @@ tokenizer = Tokenizer(model=models.WordPiece(unk_token="[UNK]"))
5
  special_tokens = ["[UNK]", "[PAD]", "[CLS]", "[SEP]", "[MASK]"]
6
  trainer = trainers.WordPieceTrainer(vocab_size=25000, special_tokens=special_tokens)
7
 
8
- tokenizer.train(["wikitext-2.txt"], trainer=trainer)
 
9
 
10
  encoding = tokenizer.encode("Let's test this tokenizer...", "on a pair of sentences.")
11
  print(encoding.ids)
 
5
  special_tokens = ["[UNK]", "[PAD]", "[CLS]", "[SEP]", "[MASK]"]
6
  trainer = trainers.WordPieceTrainer(vocab_size=25000, special_tokens=special_tokens)
7
 
8
+ tokenizer.train(["curl -X GET \
9
+ "https://datasets-server.huggingface.co/rows?dataset=wikimedia%2Fwikipedia&config=20231101.en&split=train&offset=0&length=100""], trainer=trainer)
10
 
11
  encoding = tokenizer.encode("Let's test this tokenizer...", "on a pair of sentences.")
12
  print(encoding.ids)