Skip to content

Commit

Permalink
Upgrade all files to Python 3.7+ syntax (#486)
Browse files Browse the repository at this point in the history
* Upgrade all files to Python 3.7+ syntax

* Update contributing guide

* Move pyupgrade to correct section
  • Loading branch information
adamjstewart authored Mar 30, 2022
1 parent 83f4be7 commit f20f02a
Show file tree
Hide file tree
Showing 26 changed files with 60 additions and 43 deletions.
14 changes: 14 additions & 0 deletions .github/workflows/style.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -65,3 +65,17 @@ jobs:
run: pip install .[style]
- name: Run pydocstyle checks
run: pydocstyle
pyupgrade:
name: pyupgrade
runs-on: ubuntu-latest
steps:
- name: Clone repo
uses: actions/checkout@v2
- name: Set up python
uses: actions/setup-python@v2
with:
python-version: 3.9
- name: Install pip dependencies
run: pip install .[style]
- name: Run pyupgrade checks
run: pyupgrade --py37-plus $(find . -name "*.py")
6 changes: 6 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,12 @@ repos:
- id: black
args: [--skip-magic-trailing-comma]

- repo: https://github.com/asottile/pyupgrade
rev: 2.31.1
hooks:
- id: pyupgrade
args: [--py37-plus]

- repo: https://gitlab.com/pycqa/flake8.git
rev: 3.9.2
hooks:
Expand Down
4 changes: 3 additions & 1 deletion docs/user/contributing.rst
Original file line number Diff line number Diff line change
Expand Up @@ -94,14 +94,16 @@ In order to remain `PEP-8 <https://www.python.org/dev/peps/pep-0008/>`_ complian
* `isort <https://pycqa.github.io/isort/>`_ for import ordering
* `flake8 <https://flake8.pycqa.org/>`_ for code formatting
* `pydocstyle <https://www.pydocstyle.org/>`_ for docstrings
* `pyupgrade <https://github.com/asottile/pyupgrade>`_ for code formatting
* `mypy <https://mypy.readthedocs.io/>`_ for static type analysis

All of these tools should be used from the root of the project to ensure that our configuration files are found. Black and isort are relatively easy to use, and will automatically format your code for you:
All of these tools should be used from the root of the project to ensure that our configuration files are found. Black, isort, and pyupgrade are relatively easy to use, and will automatically format your code for you:

.. code-block:: console
$ black .
$ isort .
$ pyupgrade --py37-plus $(find . -name "*.py")
Flake8, pydocstyle, and mypy won't format your code for you, but they will warn you about potential issues with your code or docstrings:
Expand Down
2 changes: 2 additions & 0 deletions setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,8 @@ style =
isort[colors]>=5.8
# pydocstyle 6.1+ required for pyproject.toml support
pydocstyle[toml]>=6.1
# pyupgrade 1.24+ required for --py37-plus flag
pyupgrade>=1.24
# Optional testing requirements
tests =
# mypy 0.900+ required for pyproject.toml support
Expand Down
2 changes: 1 addition & 1 deletion tests/data/globbiomass/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def create_file(path: str, dtype: str, num_channels: int) -> None:
if __name__ == "__main__":

for measurement, file_paths in files.items():
zipfilename = "N00E020_{}.zip".format(measurement)
zipfilename = f"N00E020_{measurement}.zip"
files_to_zip = []
for path in file_paths:
# remove old data
Expand Down
2 changes: 1 addition & 1 deletion tests/data/openbuildings/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def create_meta_data_file(zipfilename):
},
"properties": {
"tile_id": "025",
"tile_url": "polygons_s2_level_4_gzip/{}".format(zipfilename),
"tile_url": f"polygons_s2_level_4_gzip/{zipfilename}",
"size_mb": 0.2,
},
}
Expand Down
2 changes: 1 addition & 1 deletion tests/datasets/test_openbuildings.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ def test_no_meta_data_found(self, tmp_path: Path) -> None:

def test_nothing_in_index(self, dataset: OpenBuildings, tmp_path: Path) -> None:
# change meta data to another 'title_url' so that there is no match found
with open(os.path.join(tmp_path, "tiles.geojson"), "r") as f:
with open(os.path.join(tmp_path, "tiles.geojson")) as f:
content = json.load(f)
content["features"][0]["properties"]["tile_url"] = "mismatch.csv.gz"

Expand Down
14 changes: 6 additions & 8 deletions tests/test_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

def test_required_args() -> None:
args = [sys.executable, "train.py"]
ps = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ps = subprocess.run(args, capture_output=True)
assert ps.returncode != 0
assert b"ConfigKeyError" in ps.stderr

Expand All @@ -29,7 +29,7 @@ def test_output_file(tmp_path: Path) -> None:
"program.output_dir=" + str(output_file),
"experiment.task=test",
]
ps = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ps = subprocess.run(args, capture_output=True)
assert ps.returncode != 0
assert b"NotADirectoryError" in ps.stderr

Expand All @@ -47,7 +47,7 @@ def test_experiment_dir_not_empty(tmp_path: Path) -> None:
"program.output_dir=" + str(output_dir),
"experiment.task=test",
]
ps = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ps = subprocess.run(args, capture_output=True)
assert ps.returncode != 0
assert b"FileExistsError" in ps.stderr

Expand All @@ -72,9 +72,7 @@ def test_overwrite_experiment_dir(tmp_path: Path) -> None:
"program.overwrite=True",
"trainer.fast_dev_run=1",
]
ps = subprocess.run(
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True
)
ps = subprocess.run(args, capture_output=True, check=True)
assert re.search(
b"The experiment directory, .*, already exists, we might overwrite data in it!",
ps.stdout,
Expand All @@ -90,7 +88,7 @@ def test_invalid_task(tmp_path: Path) -> None:
"program.output_dir=" + str(output_dir),
"experiment.task=foo",
]
ps = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ps = subprocess.run(args, capture_output=True)
assert ps.returncode != 0
assert b"ValueError" in ps.stderr

Expand All @@ -106,7 +104,7 @@ def test_missing_config_file(tmp_path: Path) -> None:
"experiment.task=test",
"config_file=" + str(config_file),
]
ps = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ps = subprocess.run(args, capture_output=True)
assert ps.returncode != 0
assert b"FileNotFoundError" in ps.stderr

Expand Down
2 changes: 1 addition & 1 deletion torchgeo/datasets/advance.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ def __init__(
)

self.files = self._load_files(self.root)
self.classes = sorted(set(f["cls"] for f in self.files))
self.classes = sorted({f["cls"] for f in self.files})
self.class_to_idx: Dict[str, int] = {c: i for i, c in enumerate(self.classes)}

def __getitem__(self, index: int) -> Dict[str, Tensor]:
Expand Down
2 changes: 1 addition & 1 deletion torchgeo/datasets/agb_live_woody_density.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ def _download(self) -> None:
"""Download the dataset."""
download_url(self.url, self.root, self.base_filename)

with open(os.path.join(self.root, self.base_filename), "r") as f:
with open(os.path.join(self.root, self.base_filename)) as f:
content = json.load(f)

for item in content["features"]:
Expand Down
2 changes: 1 addition & 1 deletion torchgeo/datasets/benin_cashews.py
Original file line number Diff line number Diff line change
Expand Up @@ -365,7 +365,7 @@ def _load_mask(self, transform: rasterio.Affine) -> Tensor:
mask_geojson_fn = os.path.join(
self.root, "ts_cashew_benin_labels", "_common", "labels.geojson"
)
with open(mask_geojson_fn, "r") as f:
with open(mask_geojson_fn) as f:
geojson = json.load(f)

labels = [
Expand Down
2 changes: 1 addition & 1 deletion torchgeo/datasets/bigearthnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -412,7 +412,7 @@ def _load_target(self, index: int) -> Tensor:
folder = self.folders[index]["s1"]

path = glob.glob(os.path.join(folder, "*.json"))[0]
with open(path, "r") as f:
with open(path) as f:
labels = json.load(f)["labels"]

# labels -> indices
Expand Down
2 changes: 1 addition & 1 deletion torchgeo/datasets/cms_mangrove_canopy.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ def __init__(
)
self.measurement = measurement

self.filename_glob = "**/Mangrove_{}_{}*".format(self.measurement, self.country)
self.filename_glob = f"**/Mangrove_{self.measurement}_{self.country}*"

self._verify()

Expand Down
2 changes: 1 addition & 1 deletion torchgeo/datasets/eurosat.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ def __init__(
self._verify()

valid_fns = set()
with open(os.path.join(self.root, f"eurosat-{split}.txt"), "r") as f:
with open(os.path.join(self.root, f"eurosat-{split}.txt")) as f:
for fn in f:
valid_fns.add(fn.strip().replace(".jpg", ".tif"))
is_in_split: Callable[[str], bool] = lambda x: os.path.basename(x) in valid_fns
Expand Down
4 changes: 2 additions & 2 deletions torchgeo/datasets/globbiomass.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,8 +155,8 @@ def __init__(
)
self.measurement = measurement

self.filename_glob = "*0_{}*.tif".format(self.measurement)
self.zipfile_glob = "*0_{}.zip".format(self.measurement)
self.filename_glob = f"*0_{self.measurement}*.tif"
self.zipfile_glob = f"*0_{self.measurement}.zip"

self._verify()

Expand Down
8 changes: 4 additions & 4 deletions torchgeo/datasets/idtrees.py
Original file line number Diff line number Diff line change
Expand Up @@ -289,10 +289,10 @@ def _load_boxes(self, path: str) -> Tensor:
with rasterio.open(path) as f:
for geom in geoms:
coords = [f.index(x, y) for x, y in geom]
xmin = min([coord[0] for coord in coords])
xmax = max([coord[0] for coord in coords])
ymin = min([coord[1] for coord in coords])
ymax = max([coord[1] for coord in coords])
xmin = min(coord[0] for coord in coords)
xmax = max(coord[0] for coord in coords)
ymin = min(coord[1] for coord in coords)
ymax = max(coord[1] for coord in coords)
boxes.append([xmin, ymin, xmax, ymax])

tensor = torch.tensor(boxes)
Expand Down
2 changes: 1 addition & 1 deletion torchgeo/datasets/levircd.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ def _load_files(
"""
files = []
images = glob.glob(os.path.join(root, directory, split, "A", "*.png"))
images = sorted([os.path.basename(image) for image in images])
images = sorted(os.path.basename(image) for image in images)
for image in images:
image1 = os.path.join(root, directory, split, "A", image)
image2 = os.path.join(root, directory, split, "B", image)
Expand Down
2 changes: 1 addition & 1 deletion torchgeo/datasets/nasa_marine_debris.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ def _load_files(self) -> List[Dict[str, str]]:
image_root = os.path.join(self.root, self.directories[0])
target_root = os.path.join(self.root, self.directories[1])
image_folders = sorted(
[f for f in os.listdir(image_root) if not f.endswith("json")]
f for f in os.listdir(image_root) if not f.endswith("json")
)

files = []
Expand Down
2 changes: 1 addition & 1 deletion torchgeo/datasets/openbuildings.py
Original file line number Diff line number Diff line change
Expand Up @@ -402,7 +402,7 @@ def _verify(self) -> None:
for zipfile in glob.iglob(pathname):
filename = os.path.basename(zipfile)
if self.checksum and not check_integrity(zipfile, self.md5s[filename]):
raise RuntimeError("Dataset found, but corrupted: {}.".format(filename))
raise RuntimeError(f"Dataset found, but corrupted: {filename}.")
i += 1

if i != 0:
Expand Down
2 changes: 1 addition & 1 deletion torchgeo/datasets/oscd.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ def get_image_paths(ind: int) -> List[str]:

with open(os.path.join(images_root, region, "dates.txt")) as f:
dates = tuple(
[line.split()[-1] for line in f.read().strip().splitlines()]
line.split()[-1] for line in f.read().strip().splitlines()
)

regions.append(
Expand Down
2 changes: 1 addition & 1 deletion torchgeo/datasets/resisc45.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ def __init__(
self._verify()

valid_fns = set()
with open(os.path.join(self.root, f"resisc45-{split}.txt"), "r") as f:
with open(os.path.join(self.root, f"resisc45-{split}.txt")) as f:
for fn in f:
valid_fns.add(fn.strip())
is_in_split: Callable[[str], bool] = lambda x: os.path.basename(x) in valid_fns
Expand Down
4 changes: 2 additions & 2 deletions torchgeo/datasets/sen12ms.py
Original file line number Diff line number Diff line change
Expand Up @@ -261,9 +261,9 @@ def _load_raster(self, filename: str, source: str) -> Tensor:
with rasterio.open(
os.path.join(
self.root,
"{0}_{1}".format(*parts),
"{}_{}".format(*parts),
"{2}_{3}".format(*parts),
"{0}_{1}_{2}_{3}_{4}".format(*parts),
"{}_{}_{}_{}_{}".format(*parts),
)
) as f:
array = f.read().astype(np.int32)
Expand Down
2 changes: 1 addition & 1 deletion torchgeo/datasets/ucmerced.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ def __init__(
self._verify()

valid_fns = set()
with open(os.path.join(self.root, f"uc_merced-{split}.txt"), "r") as f:
with open(os.path.join(self.root, f"uc_merced-{split}.txt")) as f:
for fn in f:
valid_fns.add(fn.strip())
is_in_split: Callable[[str], bool] = lambda x: os.path.basename(x) in valid_fns
Expand Down
13 changes: 4 additions & 9 deletions torchgeo/datasets/usavars.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,15 +118,10 @@ def __init__(

self.files = self._load_files()

self.label_dfs = dict(
[
(
lab,
pd.read_csv(os.path.join(self.root, lab + ".csv"), index_col="ID"),
)
for lab in self.labels
]
)
self.label_dfs = {
lab: pd.read_csv(os.path.join(self.root, lab + ".csv"), index_col="ID")
for lab in self.labels
}

def __getitem__(self, index: int) -> Dict[str, Tensor]:
"""Return an index within the dataset.
Expand Down
2 changes: 1 addition & 1 deletion torchgeo/datasets/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ def download_and_extract_archive(
download_url(url, download_root, filename, md5)

archive = os.path.join(download_root, filename)
print("Extracting {} to {}".format(archive, extract_root))
print(f"Extracting {archive} to {extract_root}")
extract_archive(archive, extract_root)


Expand Down
2 changes: 1 addition & 1 deletion torchgeo/models/fcsiam.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ def __init__(
kernel_size=3,
)
self.classification_head = None
self.name = "u-{}".format(encoder_name)
self.name = f"u-{encoder_name}"
self.initialize()

def forward(self, x: Tensor) -> Tensor:
Expand Down

0 comments on commit f20f02a

Please sign in to comment.