|
446 | 446 | # tokenizers-backed objects
|
447 | 447 | if is_tokenizers_available():
|
448 | 448 | # Fast tokenizers
|
449 |
| - _import_structure["models.realm"].append("RealmTokenizerFast") |
450 |
| - _import_structure["models.xglm"].append("XGLMTokenizerFast") |
451 |
| - _import_structure["models.fnet"].append("FNetTokenizerFast") |
452 |
| - _import_structure["models.roformer"].append("RoFormerTokenizerFast") |
453 |
| - _import_structure["models.clip"].append("CLIPTokenizerFast") |
454 |
| - _import_structure["models.convbert"].append("ConvBertTokenizerFast") |
455 |
| - _import_structure["models.blenderbot_small"].append("BlenderbotSmallTokenizerFast") |
456 | 449 | _import_structure["models.albert"].append("AlbertTokenizerFast")
|
457 | 450 | _import_structure["models.bart"].append("BartTokenizerFast")
|
458 | 451 | _import_structure["models.barthez"].append("BarthezTokenizerFast")
|
459 | 452 | _import_structure["models.bert"].append("BertTokenizerFast")
|
460 | 453 | _import_structure["models.big_bird"].append("BigBirdTokenizerFast")
|
461 | 454 | _import_structure["models.blenderbot"].append("BlenderbotTokenizerFast")
|
| 455 | + _import_structure["models.blenderbot_small"].append("BlenderbotSmallTokenizerFast") |
462 | 456 | _import_structure["models.camembert"].append("CamembertTokenizerFast")
|
| 457 | + _import_structure["models.clip"].append("CLIPTokenizerFast") |
| 458 | + _import_structure["models.convbert"].append("ConvBertTokenizerFast") |
463 | 459 | _import_structure["models.deberta"].append("DebertaTokenizerFast")
|
464 | 460 | _import_structure["models.deberta_v2"].append("DebertaV2TokenizerFast")
|
465 | 461 | _import_structure["models.distilbert"].append("DistilBertTokenizerFast")
|
466 | 462 | _import_structure["models.dpr"].extend(
|
467 | 463 | ["DPRContextEncoderTokenizerFast", "DPRQuestionEncoderTokenizerFast", "DPRReaderTokenizerFast"]
|
468 | 464 | )
|
469 | 465 | _import_structure["models.electra"].append("ElectraTokenizerFast")
|
| 466 | + _import_structure["models.fnet"].append("FNetTokenizerFast") |
470 | 467 | _import_structure["models.funnel"].append("FunnelTokenizerFast")
|
471 | 468 | _import_structure["models.gpt2"].append("GPT2TokenizerFast")
|
472 | 469 | _import_structure["models.herbert"].append("HerbertTokenizerFast")
|
|
483 | 480 | _import_structure["models.mt5"].append("MT5TokenizerFast")
|
484 | 481 | _import_structure["models.openai"].append("OpenAIGPTTokenizerFast")
|
485 | 482 | _import_structure["models.pegasus"].append("PegasusTokenizerFast")
|
| 483 | + _import_structure["models.realm"].append("RealmTokenizerFast") |
486 | 484 | _import_structure["models.reformer"].append("ReformerTokenizerFast")
|
487 | 485 | _import_structure["models.rembert"].append("RemBertTokenizerFast")
|
488 | 486 | _import_structure["models.retribert"].append("RetriBertTokenizerFast")
|
489 | 487 | _import_structure["models.roberta"].append("RobertaTokenizerFast")
|
| 488 | + _import_structure["models.roformer"].append("RoFormerTokenizerFast") |
490 | 489 | _import_structure["models.splinter"].append("SplinterTokenizerFast")
|
491 | 490 | _import_structure["models.squeezebert"].append("SqueezeBertTokenizerFast")
|
492 | 491 | _import_structure["models.t5"].append("T5TokenizerFast")
|
| 492 | + _import_structure["models.xglm"].append("XGLMTokenizerFast") |
493 | 493 | _import_structure["models.xlm_roberta"].append("XLMRobertaTokenizerFast")
|
494 | 494 | _import_structure["models.xlnet"].append("XLNetTokenizerFast")
|
495 | 495 | _import_structure["tokenization_utils_fast"] = ["PreTrainedTokenizerFast"]
|
|
0 commit comments