Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -5,8 +5,9 @@ tokenizer = Tokenizer(model=models.WordPiece(unk_token="[UNK]"))
5
  special_tokens = ["[UNK]", "[PAD]", "[CLS]", "[SEP]", "[MASK]"]
6
  trainer = trainers.WordPieceTrainer(vocab_size=25000, special_tokens=special_tokens)
7
 
8
- tokenizer.train(["curl -X GET \
9
- "https://datasets-server.huggingface.co/rows?dataset=wikimedia%2Fwikipedia&config=20231101.en&split=train&offset=0&length=100""], trainer=trainer)
 
10
 
11
  encoding = tokenizer.encode("Let's test this tokenizer...", "on a pair of sentences.")
12
  print(encoding.ids)
 
5
  special_tokens = ["[UNK]", "[PAD]", "[CLS]", "[SEP]", "[MASK]"]
6
  trainer = trainers.WordPieceTrainer(vocab_size=25000, special_tokens=special_tokens)
7
 
8
+ tokenizer.train(["https://datasets-server.huggingface.co/rows?dataset=wikimedia%2Fwikipedia&config=20231101.en&split=train&offset=0&length=100"],
9
+ trainer=trainer)
10
+
11
 
12
  encoding = tokenizer.encode("Let's test this tokenizer...", "on a pair of sentences.")
13
  print(encoding.ids)