diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6ebfeaf26f..317cf56510 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -22,20 +22,20 @@ repos: - id: detect-private-key - repo: https://github.com/asottile/pyupgrade - rev: v2.34.0 + rev: v2.38.2 hooks: - id: pyupgrade args: [--py36-plus] name: Upgrade code - - repo: https://github.com/myint/docformatter - rev: v1.4 + - repo: https://github.com/PyCQA/docformatter + rev: v1.5.0 hooks: - id: docformatter args: [--in-place, --wrap-summaries=115, --wrap-descriptions=120] - repo: https://github.com/executablebooks/mdformat - rev: 0.7.14 + rev: 0.7.16 hooks: - id: mdformat additional_dependencies: @@ -50,17 +50,17 @@ repos: - id: isort - repo: https://github.com/psf/black - rev: 22.6.0 + rev: 22.8.0 hooks: - id: black name: Format code - repo: https://github.com/asottile/yesqa - rev: v1.3.0 + rev: v1.4.0 hooks: - id: yesqa - repo: https://github.com/PyCQA/flake8 - rev: 4.0.1 + rev: 5.0.4 hooks: - id: flake8 diff --git a/pl_bolts/models/self_supervised/resnets.py b/pl_bolts/models/self_supervised/resnets.py index 5e229966e7..4d03541792 100644 --- a/pl_bolts/models/self_supervised/resnets.py +++ b/pl_bolts/models/self_supervised/resnets.py @@ -285,10 +285,12 @@ def _resnet(arch, block, layers, pretrained, progress, **kwargs): return model +RESNET_PAPER = '`"Deep Residual Learning for Image Recognition" `_' + + @under_review() def resnet18(pretrained: bool = False, progress: bool = True, **kwargs): - r"""ResNet-18 model from - `"Deep Residual Learning for Image Recognition" `_ + f"""ResNet-18 model from {RESNET_PAPER} Args: pretrained: If True, returns a model pre-trained on ImageNet @@ -299,8 +301,7 @@ def resnet18(pretrained: bool = False, progress: bool = True, **kwargs): @under_review() def resnet34(pretrained=False, progress=True, **kwargs): - r"""ResNet-34 model from - `"Deep Residual Learning for Image Recognition" `_ + f"""ResNet-34 model from {RESNET_PAPER} Args: pretrained: If True, returns a model pre-trained on ImageNet @@ -311,8 +312,7 @@ def resnet34(pretrained=False, progress=True, **kwargs): @under_review() def resnet50(pretrained: bool = False, progress: bool = True, **kwargs): - r"""ResNet-50 model from - `"Deep Residual Learning for Image Recognition" `_ + f"""ResNet-50 model from {RESNET_PAPER} Args: pretrained: If True, returns a model pre-trained on ImageNet @@ -323,8 +323,7 @@ def resnet50(pretrained: bool = False, progress: bool = True, **kwargs): @under_review() def resnet101(pretrained: bool = False, progress: bool = True, **kwargs): - r"""ResNet-101 model from - `"Deep Residual Learning for Image Recognition" `_ + f"""ResNet-101 model from {RESNET_PAPER} Args: pretrained: If True, returns a model pre-trained on ImageNet @@ -335,8 +334,7 @@ def resnet101(pretrained: bool = False, progress: bool = True, **kwargs): @under_review() def resnet152(pretrained: bool = False, progress: bool = True, **kwargs): - r"""ResNet-152 model from - `"Deep Residual Learning for Image Recognition" `_ + f"""ResNet-34 model from {RESNET_PAPER} Args: pretrained: If True, returns a model pre-trained on ImageNet @@ -345,10 +343,14 @@ def resnet152(pretrained: bool = False, progress: bool = True, **kwargs): return _resnet("resnet152", Bottleneck, [3, 8, 36, 3], pretrained, progress, **kwargs) +AGGREGATED_RESNET_PAPER = ( + '`"Aggregated Residual Transformation for Deep Neural Networks" `_' +) + + @under_review() def resnext50_32x4d(pretrained: bool = False, progress: bool = True, **kwargs): - r"""ResNeXt-50 32x4d model from - `"Aggregated Residual Transformation for Deep Neural Networks" `_ + f"""ResNeXt-50 32x4d model from {AGGREGATED_RESNET_PAPER} Args: pretrained: If True, returns a model pre-trained on ImageNet @@ -361,8 +363,7 @@ def resnext50_32x4d(pretrained: bool = False, progress: bool = True, **kwargs): @under_review() def resnext101_32x8d(pretrained: bool = False, progress: bool = True, **kwargs): - r"""ResNeXt-101 32x8d model from - `"Aggregated Residual Transformation for Deep Neural Networks" `_ + f"""ResNeXt-101 32x8d model from {AGGREGATED_RESNET_PAPER} Args: pretrained: If True, returns a model pre-trained on ImageNet @@ -375,11 +376,9 @@ def resnext101_32x8d(pretrained: bool = False, progress: bool = True, **kwargs): @under_review() def wide_resnet50_2(pretrained: bool = False, progress: bool = True, **kwargs): - r"""Wide ResNet-50-2 model from - `"Wide Residual Networks" `_ - The model is the same as ResNet except for the bottleneck number of channels - which is twice larger in every block. The number of channels in outer 1x1 - convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 + r"""Wide ResNet-50-2 model from `"Wide Residual Networks" `_ The model is + the same as ResNet except for the bottleneck number of channels which is twice larger in every block. The + number of channels in outer 1x1 convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 channels, and in Wide ResNet-50-2 has 2048-1024-2048. Args: @@ -392,11 +391,9 @@ def wide_resnet50_2(pretrained: bool = False, progress: bool = True, **kwargs): @under_review() def wide_resnet101_2(pretrained: bool = False, progress: bool = True, **kwargs): - r"""Wide ResNet-101-2 model from - `"Wide Residual Networks" `_ - The model is the same as ResNet except for the bottleneck number of channels - which is twice larger in every block. The number of channels in outer 1x1 - convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 + r"""Wide ResNet-101-2 model from `"Wide Residual Networks" `_ The model is + the same as ResNet except for the bottleneck number of channels which is twice larger in every block. The + number of channels in outer 1x1 convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 channels, and in Wide ResNet-50-2 has 2048-1024-2048. Args: diff --git a/pl_bolts/utils/arguments.py b/pl_bolts/utils/arguments.py index f83ffd988f..706e03b372 100644 --- a/pl_bolts/utils/arguments.py +++ b/pl_bolts/utils/arguments.py @@ -78,7 +78,6 @@ def parse_lit_args(self, *args: Any, **kwargs: Any) -> Namespace: @under_review() def gather_lit_args(cls: Any, root_cls: Optional[Any] = None) -> List[LitArg]: - if root_cls is None: if issubclass(cls, LightningModule): root_cls = LightningModule diff --git a/tests/datamodules/test_datamodules.py b/tests/datamodules/test_datamodules.py index c81db6406b..e827147abe 100644 --- a/tests/datamodules/test_datamodules.py +++ b/tests/datamodules/test_datamodules.py @@ -20,7 +20,6 @@ def test_dev_datasets(datadir): - ds = CIFAR10(data_dir=datadir) for _ in ds: pass @@ -48,7 +47,6 @@ def _create_synth_Cityscapes_dataset(path_dir): def test_cityscapes_datamodule(datadir): - _create_synth_Cityscapes_dataset(datadir) batch_size = 1 diff --git a/tests/datasets/test_datasets.py b/tests/datasets/test_datasets.py index b45e987589..882a6cbe1c 100644 --- a/tests/datasets/test_datasets.py +++ b/tests/datasets/test_datasets.py @@ -17,7 +17,6 @@ @pytest.mark.parametrize("batch_size,num_samples", [(16, 100), (1, 0)]) def test_dummy_ds(catch_warnings, batch_size, num_samples): - if num_samples > 0: ds = DummyDataset((1, 28, 28), (1,), num_samples=num_samples) @@ -41,7 +40,6 @@ def test_dummy_ds(catch_warnings, batch_size, num_samples): @pytest.mark.parametrize("batch_size,size,num_samples", [(16, 32, 100), (1, 0, 0)]) def test_rand_dict_ds(catch_warnings, batch_size, size, num_samples): - if num_samples > 0 or size > 0: ds = RandomDictDataset(size, num_samples=num_samples) dl = DataLoader(ds, batch_size=batch_size) @@ -83,7 +81,6 @@ def test_rand_ds(catch_warnings, batch_size, size, num_samples): @pytest.mark.parametrize("batch_size,size,num_samples", [(16, 32, 100), (1, 0, 0)]) def test_rand_str_dict_ds(catch_warnings, batch_size, size, num_samples): - if num_samples > 0 and size > 0: ds = RandomDictStringDataset(size=size, num_samples=100) dl = DataLoader(ds, batch_size=batch_size)