dp2022/WordPieceTokenizerTrainer.py
2022-01-11 23:01:48 +00:00

14 lines
580 B
Python

from tokenizers import Tokenizer
from tokenizers.models import WordPiece
from tokenizers.trainers import WordPieceTrainer
from tokenizers.pre_tokenizers import Whitespace
# training the tokenizer
tokenizer = Tokenizer(WordPiece(unk_token="[UNK]"))
trainer = WordPieceTrainer(special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"])
tokenizer.pre_tokenizer = Whitespace()
# files = [f"raw/eceuropa.{split}.raw" for split in ["test", "train", "valid"]]
files = [f"raw/eujournal.sk.raw"]
tokenizer.train(files, trainer)
tokenizer.save("wordpiece-tokenizer-eujournal-sk.json")