Skip to content

Commit 21e86f9

Browse files
sguggerLysandreJik
andauthored
Sort init import (#10801)
* Initial script * Add script to properly sort imports in init. * Add to the CI * Update utils/custom_init_isort.py Co-authored-by: Lysandre Debut <[email protected]> * Separate scripts that change content from quality * Move class_mapping_update to style_checks Co-authored-by: Lysandre Debut <[email protected]>
1 parent 1438c48 commit 21e86f9

File tree

15 files changed

+355
-110
lines changed

15 files changed

+355
-110
lines changed

.circleci/config.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -383,6 +383,7 @@ jobs:
383383
- '~/.cache/pip'
384384
- run: black --check examples tests src utils
385385
- run: isort --check-only examples tests src utils
386+
- run: python utils/custom_init_isort.py --check_only
386387
- run: flake8 examples tests src utils
387388
- run: python utils/style_doc.py src/transformers docs/source --max_len 119 --check_only
388389
- run: python utils/check_copies.py

Makefile

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -21,32 +21,36 @@ deps_table_update:
2121

2222
# Check that source code meets quality standards
2323

24-
extra_quality_checks: deps_table_update
24+
extra_quality_checks:
2525
python utils/check_copies.py
2626
python utils/check_table.py
2727
python utils/check_dummies.py
2828
python utils/check_repo.py
29-
python utils/style_doc.py src/transformers docs/source --max_len 119
30-
python utils/class_mapping_update.py
3129

3230
# this target runs checks on all files
3331
quality:
3432
black --check $(check_dirs)
3533
isort --check-only $(check_dirs)
34+
python utils/custom_init_isort.py --check_only
3635
flake8 $(check_dirs)
37-
python utils/style_doc.py src/transformers docs/source --max_len 119 --check_only
3836
${MAKE} extra_quality_checks
3937

4038
# Format source code automatically and check is there are any problems left that need manual fixing
4139

42-
style: deps_table_update
40+
extra_style_checks: deps_table_update
41+
python utils/custom_init_isort.py
42+
python utils/style_doc.py src/transformers docs/source --max_len 119
43+
python utils/class_mapping_update.py
44+
45+
# this target runs checks on all files
46+
style:
4347
black $(check_dirs)
4448
isort $(check_dirs)
45-
python utils/style_doc.py src/transformers docs/source --max_len 119
49+
${MAKE} extra_style_checks
4650

4751
# Super fast fix and check target that only works on relevant modified files since the branch was made
4852

49-
fixup: modified_only_fixup extra_quality_checks
53+
fixup: modified_only_fixup extra_style_checks extra_quality_checks
5054

5155
# Make marked copies of snippets of codes conform to the original
5256

src/transformers/__init__.py

Lines changed: 81 additions & 85 deletions
Original file line numberDiff line numberDiff line change
@@ -78,6 +78,7 @@
7878
"xnli_processors",
7979
"xnli_tasks_num_labels",
8080
],
81+
"feature_extraction_sequence_utils": ["BatchFeature", "SequenceFeatureExtractor"],
8182
"file_utils": [
8283
"CONFIG_NAME",
8384
"MODEL_CARD_NAME",
@@ -124,23 +125,8 @@
124125
"load_tf2_model_in_pytorch_model",
125126
"load_tf2_weights_in_pytorch_model",
126127
],
127-
"models": [],
128128
# Models
129-
"models.wav2vec2": [
130-
"WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
131-
"Wav2Vec2Config",
132-
"Wav2Vec2CTCTokenizer",
133-
"Wav2Vec2Tokenizer",
134-
"Wav2Vec2FeatureExtractor",
135-
"Wav2Vec2Processor",
136-
],
137-
"models.m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config"],
138-
"models.speech_to_text": [
139-
"SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
140-
"Speech2TextConfig",
141-
"Speech2TextFeatureExtractor",
142-
],
143-
"models.convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertTokenizer"],
129+
"models": [],
144130
"models.albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig"],
145131
"models.auto": [
146132
"ALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
@@ -169,6 +155,7 @@
169155
"BlenderbotSmallTokenizer",
170156
],
171157
"models.camembert": ["CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CamembertConfig"],
158+
"models.convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertTokenizer"],
172159
"models.ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig", "CTRLTokenizer"],
173160
"models.deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaTokenizer"],
174161
"models.deberta_v2": ["DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaV2Config"],
@@ -193,6 +180,7 @@
193180
"models.led": ["LED_PRETRAINED_CONFIG_ARCHIVE_MAP", "LEDConfig", "LEDTokenizer"],
194181
"models.longformer": ["LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongformerConfig", "LongformerTokenizer"],
195182
"models.lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig", "LxmertTokenizer"],
183+
"models.m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config"],
196184
"models.marian": ["MarianConfig"],
197185
"models.mbart": ["MBartConfig"],
198186
"models.mmbt": ["MMBTConfig"],
@@ -207,6 +195,11 @@
207195
"models.reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"],
208196
"models.retribert": ["RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RetriBertConfig", "RetriBertTokenizer"],
209197
"models.roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaTokenizer"],
198+
"models.speech_to_text": [
199+
"SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
200+
"Speech2TextConfig",
201+
"Speech2TextFeatureExtractor",
202+
],
210203
"models.squeezebert": ["SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "SqueezeBertConfig", "SqueezeBertTokenizer"],
211204
"models.t5": ["T5_PRETRAINED_CONFIG_ARCHIVE_MAP", "T5Config"],
212205
"models.tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig", "TapasTokenizer"],
@@ -216,6 +209,14 @@
216209
"TransfoXLCorpus",
217210
"TransfoXLTokenizer",
218211
],
212+
"models.wav2vec2": [
213+
"WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
214+
"Wav2Vec2Config",
215+
"Wav2Vec2CTCTokenizer",
216+
"Wav2Vec2FeatureExtractor",
217+
"Wav2Vec2Processor",
218+
"Wav2Vec2Tokenizer",
219+
],
219220
"models.xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMTokenizer"],
220221
"models.xlm_prophetnet": ["XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMProphetNetConfig"],
221222
"models.xlm_roberta": ["XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMRobertaConfig"],
@@ -251,7 +252,6 @@
251252
"SpecialTokensMixin",
252253
"TokenSpan",
253254
],
254-
"feature_extraction_sequence_utils": ["SequenceFeatureExtractor", "BatchFeature"],
255255
"trainer_callback": [
256256
"DefaultFlowCallback",
257257
"EarlyStoppingCallback",
@@ -383,54 +383,14 @@
383383
"TopPLogitsWarper",
384384
]
385385
_import_structure["generation_stopping_criteria"] = [
386-
"StoppingCriteria",
387-
"StoppingCriteriaList",
388386
"MaxLengthCriteria",
389387
"MaxTimeCriteria",
388+
"StoppingCriteria",
389+
"StoppingCriteriaList",
390390
]
391391
_import_structure["generation_utils"] = ["top_k_top_p_filtering"]
392392
_import_structure["modeling_utils"] = ["Conv1D", "PreTrainedModel", "apply_chunking_to_forward", "prune_layer"]
393393
# PyTorch models structure
394-
395-
_import_structure["models.speech_to_text"].extend(
396-
[
397-
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
398-
"Speech2TextForConditionalGeneration",
399-
"Speech2TextModel",
400-
]
401-
)
402-
403-
_import_structure["models.wav2vec2"].extend(
404-
[
405-
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
406-
"Wav2Vec2ForCTC",
407-
"Wav2Vec2ForMaskedLM",
408-
"Wav2Vec2Model",
409-
"Wav2Vec2PreTrainedModel",
410-
]
411-
)
412-
_import_structure["models.m2m_100"].extend(
413-
[
414-
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
415-
"M2M100ForConditionalGeneration",
416-
"M2M100Model",
417-
]
418-
)
419-
420-
_import_structure["models.convbert"].extend(
421-
[
422-
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
423-
"ConvBertForMaskedLM",
424-
"ConvBertForMultipleChoice",
425-
"ConvBertForQuestionAnswering",
426-
"ConvBertForSequenceClassification",
427-
"ConvBertForTokenClassification",
428-
"ConvBertLayer",
429-
"ConvBertModel",
430-
"ConvBertPreTrainedModel",
431-
"load_tf_weights_in_convbert",
432-
]
433-
)
434394
_import_structure["models.albert"].extend(
435395
[
436396
"ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
@@ -512,17 +472,17 @@
512472
_import_structure["models.blenderbot"].extend(
513473
[
514474
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
475+
"BlenderbotForCausalLM",
515476
"BlenderbotForConditionalGeneration",
516477
"BlenderbotModel",
517-
"BlenderbotForCausalLM",
518478
]
519479
)
520480
_import_structure["models.blenderbot_small"].extend(
521481
[
522482
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
483+
"BlenderbotSmallForCausalLM",
523484
"BlenderbotSmallForConditionalGeneration",
524485
"BlenderbotSmallModel",
525-
"BlenderbotSmallForCausalLM",
526486
]
527487
)
528488
_import_structure["models.camembert"].extend(
@@ -537,6 +497,20 @@
537497
"CamembertModel",
538498
]
539499
)
500+
_import_structure["models.convbert"].extend(
501+
[
502+
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
503+
"ConvBertForMaskedLM",
504+
"ConvBertForMultipleChoice",
505+
"ConvBertForQuestionAnswering",
506+
"ConvBertForSequenceClassification",
507+
"ConvBertForTokenClassification",
508+
"ConvBertLayer",
509+
"ConvBertModel",
510+
"ConvBertPreTrainedModel",
511+
"load_tf_weights_in_convbert",
512+
]
513+
)
540514
_import_structure["models.ctrl"].extend(
541515
[
542516
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
@@ -549,23 +523,23 @@
549523
_import_structure["models.deberta"].extend(
550524
[
551525
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
526+
"DebertaForMaskedLM",
527+
"DebertaForQuestionAnswering",
552528
"DebertaForSequenceClassification",
529+
"DebertaForTokenClassification",
553530
"DebertaModel",
554-
"DebertaForMaskedLM",
555531
"DebertaPreTrainedModel",
556-
"DebertaForTokenClassification",
557-
"DebertaForQuestionAnswering",
558532
]
559533
)
560534
_import_structure["models.deberta_v2"].extend(
561535
[
562536
"DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
537+
"DebertaV2ForMaskedLM",
538+
"DebertaV2ForQuestionAnswering",
563539
"DebertaV2ForSequenceClassification",
540+
"DebertaV2ForTokenClassification",
564541
"DebertaV2Model",
565-
"DebertaV2ForMaskedLM",
566542
"DebertaV2PreTrainedModel",
567-
"DebertaV2ForTokenClassification",
568-
"DebertaV2ForQuestionAnswering",
569543
]
570544
)
571545
_import_structure["models.distilbert"].extend(
@@ -699,7 +673,14 @@
699673
"LxmertXLayer",
700674
]
701675
)
702-
_import_structure["models.marian"].extend(["MarianModel", "MarianMTModel", "MarianForCausalLM"])
676+
_import_structure["models.m2m_100"].extend(
677+
[
678+
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
679+
"M2M100ForConditionalGeneration",
680+
"M2M100Model",
681+
]
682+
)
683+
_import_structure["models.marian"].extend(["MarianForCausalLM", "MarianModel", "MarianMTModel"])
703684
_import_structure["models.mbart"].extend(
704685
[
705686
"MBartForCausalLM",
@@ -752,7 +733,7 @@
752733
]
753734
)
754735
_import_structure["models.pegasus"].extend(
755-
["PegasusForConditionalGeneration", "PegasusModel", "PegasusForCausalLM"]
736+
["PegasusForCausalLM", "PegasusForConditionalGeneration", "PegasusModel"]
756737
)
757738
_import_structure["models.prophetnet"].extend(
758739
[
@@ -793,6 +774,13 @@
793774
"RobertaModel",
794775
]
795776
)
777+
_import_structure["models.speech_to_text"].extend(
778+
[
779+
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
780+
"Speech2TextForConditionalGeneration",
781+
"Speech2TextModel",
782+
]
783+
)
796784
_import_structure["models.squeezebert"].extend(
797785
[
798786
"SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
@@ -836,6 +824,15 @@
836824
"load_tf_weights_in_transfo_xl",
837825
]
838826
)
827+
_import_structure["models.wav2vec2"].extend(
828+
[
829+
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
830+
"Wav2Vec2ForCTC",
831+
"Wav2Vec2ForMaskedLM",
832+
"Wav2Vec2Model",
833+
"Wav2Vec2PreTrainedModel",
834+
]
835+
)
839836
_import_structure["models.xlm"].extend(
840837
[
841838
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
@@ -916,20 +913,6 @@
916913
"shape_list",
917914
]
918915
# TensorFlow models structure
919-
920-
_import_structure["models.convbert"].extend(
921-
[
922-
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
923-
"TFConvBertForMaskedLM",
924-
"TFConvBertForMultipleChoice",
925-
"TFConvBertForQuestionAnswering",
926-
"TFConvBertForSequenceClassification",
927-
"TFConvBertForTokenClassification",
928-
"TFConvBertLayer",
929-
"TFConvBertModel",
930-
"TFConvBertPreTrainedModel",
931-
]
932-
)
933916
_import_structure["models.albert"].extend(
934917
[
935918
"TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
@@ -1002,6 +985,19 @@
1002985
"TFCamembertModel",
1003986
]
1004987
)
988+
_import_structure["models.convbert"].extend(
989+
[
990+
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
991+
"TFConvBertForMaskedLM",
992+
"TFConvBertForMultipleChoice",
993+
"TFConvBertForQuestionAnswering",
994+
"TFConvBertForSequenceClassification",
995+
"TFConvBertForTokenClassification",
996+
"TFConvBertLayer",
997+
"TFConvBertModel",
998+
"TFConvBertPreTrainedModel",
999+
]
1000+
)
10051001
_import_structure["models.ctrl"].extend(
10061002
[
10071003
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
@@ -1108,7 +1104,7 @@
11081104
"TFLxmertVisualFeatureEncoder",
11091105
]
11101106
)
1111-
_import_structure["models.marian"].extend(["TFMarianMTModel", "TFMarianModel"])
1107+
_import_structure["models.marian"].extend(["TFMarianModel", "TFMarianMTModel"])
11121108
_import_structure["models.mbart"].extend(["TFMBartForConditionalGeneration", "TFMBartModel"])
11131109
_import_structure["models.mobilebert"].extend(
11141110
[
@@ -2170,7 +2166,7 @@
21702166
TFLxmertPreTrainedModel,
21712167
TFLxmertVisualFeatureEncoder,
21722168
)
2173-
from .models.marian import TFMarian, TFMarianMTModel
2169+
from .models.marian import TFMarianModel, TFMarianMTModel
21742170
from .models.mbart import TFMBartForConditionalGeneration, TFMBartModel
21752171
from .models.mobilebert import (
21762172
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,

src/transformers/models/blenderbot/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,10 +29,10 @@
2929
if is_torch_available():
3030
_import_structure["modeling_blenderbot"] = [
3131
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
32+
"BlenderbotForCausalLM",
3233
"BlenderbotForConditionalGeneration",
3334
"BlenderbotModel",
3435
"BlenderbotPreTrainedModel",
35-
"BlenderbotForCausalLM",
3636
]
3737

3838

0 commit comments

Comments
 (0)