Skip to content

Commit

Permalink
Fix ruff FURB192 (#3785)
Browse files Browse the repository at this point in the history
* tests/io/cp2k/test_sets.py rename TestSet->TestDftSet

* f-strings

* rename m->match and use walrus op

* tlist -> temperatures

* variable renames

* ruff check . --select FURB192 --preview --fix

* bump ruff, mypy, pyright pre-commit hooks

* collections.defaultdict -> defaultdict everywhere
  • Loading branch information
janosh authored Apr 25, 2024
1 parent f52c921 commit ff94faa
Show file tree
Hide file tree
Showing 71 changed files with 582 additions and 593 deletions.
6 changes: 3 additions & 3 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ ci:

repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.4.1
rev: v0.4.2
hooks:
- id: ruff
args: [--fix, --unsafe-fixes]
Expand All @@ -22,7 +22,7 @@ repos:
- id: trailing-whitespace

- repo: https://github.com/pre-commit/mirrors-mypy
rev: v1.9.0
rev: v1.10.0
hooks:
- id: mypy

Expand Down Expand Up @@ -64,6 +64,6 @@ repos:
args: [--drop-empty-cells, --keep-output]

- repo: https://github.com/RobertCraigie/pyright-python
rev: v1.1.359
rev: v1.1.360
hooks:
- id: pyright
6 changes: 3 additions & 3 deletions dev_scripts/update_pt_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,9 @@

from __future__ import annotations

import collections
import json
import re
from collections import defaultdict
from itertools import product

import requests
Expand Down Expand Up @@ -154,7 +154,7 @@ def parse_shannon_radii():
sheet = wb["Sheet1"]
i = 2
el = charge = cn = None
radii = collections.defaultdict(dict)
radii = defaultdict(dict)
while sheet[f"E{i}"].value:
if sheet[f"A{i}"].value:
el = sheet[f"A{i}"].value
Expand Down Expand Up @@ -277,7 +277,7 @@ def add_ionization_energies():
for table in soup.find_all("table"):
if "Hydrogen" in table.text:
break
data = collections.defaultdict(list)
data = defaultdict(list)
for row in table.find_all("tr"):
row = [td.get_text().strip() for td in row.find_all("td")]
if row:
Expand Down
8 changes: 4 additions & 4 deletions pymatgen/analysis/bond_valence.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,10 @@

from __future__ import annotations

import collections
import functools
import operator
import os
from collections import defaultdict
from math import exp, sqrt
from typing import TYPE_CHECKING

Expand Down Expand Up @@ -279,7 +279,7 @@ def get_valences(self, structure: Structure):
self._best_vset = None

def evaluate_assignment(v_set):
el_oxi = collections.defaultdict(list)
el_oxi = defaultdict(list)
for idx, sites in enumerate(equi_sites):
el_oxi[sites[0].specie.symbol].append(v_set[idx])
max_diff = max(max(v) - min(v) for v in el_oxi.values())
Expand Down Expand Up @@ -346,7 +346,7 @@ def _recurse(assigned=None):
self._best_vset = None

def evaluate_assignment(v_set):
el_oxi = collections.defaultdict(list)
el_oxi = defaultdict(list)
jj = 0
for sites in equi_sites:
for specie, _ in get_z_ordered_elmap(sites[0].species):
Expand Down Expand Up @@ -474,7 +474,7 @@ def add_oxidation_state_by_site_fraction(structure, oxidation_states):
"""
try:
for idx, site in enumerate(structure):
new_sp = collections.defaultdict(float)
new_sp = defaultdict(float)
for j, (el, occu) in enumerate(get_z_ordered_elmap(site.species)):
specie = Species(el.symbol, oxidation_states[idx][j])
new_sp[specie] += occu
Expand Down
10 changes: 5 additions & 5 deletions pymatgen/analysis/diffraction/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from __future__ import annotations

import abc
import collections
from collections import defaultdict
from typing import TYPE_CHECKING

import matplotlib.pyplot as plt
Expand Down Expand Up @@ -215,9 +215,9 @@ def get_unique_families(hkls):
def is_perm(hkl1, hkl2) -> bool:
h1 = np.abs(hkl1)
h2 = np.abs(hkl2)
return all(i == j for i, j in zip(sorted(h1), sorted(h2)))
return np.all(np.sort(h1) == np.sort(h2))

unique = collections.defaultdict(list)
unique = defaultdict(list)
for hkl1 in hkls:
found = False
for hkl2, v2 in unique.items():
Expand All @@ -229,7 +229,7 @@ def is_perm(hkl1, hkl2) -> bool:
unique[hkl1].append(hkl1)

pretty_unique = {}
for v in unique.values():
pretty_unique[sorted(v)[-1]] = len(v)
for val in unique.values():
pretty_unique[max(val)] = len(val)

return pretty_unique
8 changes: 4 additions & 4 deletions pymatgen/analysis/diffraction/neutron.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,8 +98,8 @@ def get_pattern(self, structure: Structure, scaled=True, two_theta_range=(0, 90)
)

# Obtain crystallographic reciprocal lattice points within range
recip_latt = lattice.reciprocal_lattice_crystallographic
recip_pts = recip_latt.get_points_in_sphere([[0, 0, 0]], [0, 0, 0], max_r)
recip_lattice = lattice.reciprocal_lattice_crystallographic
recip_pts = recip_lattice.get_points_in_sphere([[0, 0, 0]], [0, 0, 0], max_r)
if min_r:
recip_pts = [pt for pt in recip_pts if pt[1] >= min_r]

Expand Down Expand Up @@ -129,7 +129,7 @@ def get_pattern(self, structure: Structure, scaled=True, two_theta_range=(0, 90)
coeffs = np.array(_coeffs)
frac_coords = np.array(_frac_coords)
occus = np.array(_occus)
dwfactors = np.array(_dwfactors)
dw_factors = np.array(_dwfactors)
peaks: dict[float, list[float | list[tuple[int, ...]]]] = {}
two_thetas: list[float] = []

Expand All @@ -147,7 +147,7 @@ def get_pattern(self, structure: Structure, scaled=True, two_theta_range=(0, 90)
s = g_hkl / 2

# Calculate Debye-Waller factor
dw_correction = np.exp(-dwfactors * (s**2))
dw_correction = np.exp(-dw_factors * (s**2))

# Vectorized computation of g.r for all fractional coords and
# hkl.
Expand Down
12 changes: 6 additions & 6 deletions pymatgen/analysis/diffraction/xrd.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,8 +159,8 @@ def get_pattern(self, structure: Structure, scaled=True, two_theta_range=(0, 90)
)

# Obtain crystallographic reciprocal lattice points within range
recip_latt = lattice.reciprocal_lattice_crystallographic
recip_pts = recip_latt.get_points_in_sphere([[0, 0, 0]], [0, 0, 0], max_r)
recip_lattice = lattice.reciprocal_lattice_crystallographic
recip_pts = recip_lattice.get_points_in_sphere([[0, 0, 0]], [0, 0, 0], max_r)
if min_r:
recip_pts = [pt for pt in recip_pts if pt[1] >= min_r]

Expand All @@ -172,7 +172,7 @@ def get_pattern(self, structure: Structure, scaled=True, two_theta_range=(0, 90)
_coeffs = []
_frac_coords = []
_occus = []
_dwfactors = []
_dw_factors = []

for site in structure:
for sp, occu in site.species.items():
Expand All @@ -184,15 +184,15 @@ def get_pattern(self, structure: Structure, scaled=True, two_theta_range=(0, 90)
f"Unable to calculate XRD pattern as there is no scattering coefficients for {sp.symbol}."
)
_coeffs.append(c)
_dwfactors.append(self.debye_waller_factors.get(sp.symbol, 0))
_dw_factors.append(self.debye_waller_factors.get(sp.symbol, 0))
_frac_coords.append(site.frac_coords)
_occus.append(occu)

zs = np.array(_zs)
coeffs = np.array(_coeffs)
frac_coords = np.array(_frac_coords)
occus = np.array(_occus)
dwfactors = np.array(_dwfactors)
dw_factors = np.array(_dw_factors)
peaks: dict[float, list[float | list[tuple[int, ...]]]] = {}
two_thetas: list[float] = []

Expand Down Expand Up @@ -227,7 +227,7 @@ def get_pattern(self, structure: Structure, scaled=True, two_theta_range=(0, 90)
axis=1, # type: ignore
)

dw_correction = np.exp(-dwfactors * s2)
dw_correction = np.exp(-dw_factors * s2)

# Structure factor = sum of atomic scattering factors (with
# position factor exp(2j * pi * g.r and occupancies).
Expand Down
6 changes: 3 additions & 3 deletions pymatgen/analysis/dimensionality.py
Original file line number Diff line number Diff line change
Expand Up @@ -347,7 +347,7 @@ def get_dimensionality_cheon(
else:
dim = np.log2(float(max3) / max1) / np.log2(3)
if dim == int(dim):
dim = str(int(dim)) + "D"
dim = f"{int(dim)}D"
else:
return None
else:
Expand All @@ -361,7 +361,7 @@ def get_dimensionality_cheon(
else:
dim = np.log2(float(max2) / max1)
if dim == int(dim):
dim = str(int(dim)) + "D"
dim = f"{int(dim)}D"
else:
structure = copy.copy(structure_save)
structure.make_supercell(np.eye(3) * 3)
Expand All @@ -372,7 +372,7 @@ def get_dimensionality_cheon(
else:
dim = np.log2(float(max3) / max1) / np.log2(3)
if dim == int(dim):
dim = str(int(dim)) + "D"
dim = f"{int(dim)}D"
else:
return None
return dim
Expand Down
6 changes: 3 additions & 3 deletions pymatgen/analysis/disorder.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@

from __future__ import annotations

import collections
import itertools
from collections import defaultdict
from typing import TYPE_CHECKING

if TYPE_CHECKING:
Expand All @@ -24,8 +24,8 @@ def get_warren_cowley_parameters(structure: Structure, r: float, dr: float) -> d
"""
comp = structure.composition

n_ij = collections.defaultdict(int) # type: ignore
n_neighbors = collections.defaultdict(int) # type: ignore
n_ij = defaultdict(int) # type: ignore
n_neighbors = defaultdict(int) # type: ignore
for site in structure:
for nn in structure.get_neighbors_in_shell(site.coords, r, dr):
n_ij[(site.specie, nn.specie)] += 1
Expand Down
2 changes: 1 addition & 1 deletion pymatgen/analysis/nmr.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ def principal_axis_system(self):
def V_xx(self):
"""Returns: First diagonal element."""
diags = np.diag(self.principal_axis_system)
return sorted(diags, key=np.abs)[0]
return min(diags, key=np.abs)

@property
def V_yy(self):
Expand Down
6 changes: 3 additions & 3 deletions pymatgen/analysis/phase_diagram.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,14 @@

from __future__ import annotations

import collections
import itertools
import json
import logging
import math
import os
import re
import warnings
from collections import defaultdict
from functools import lru_cache
from typing import TYPE_CHECKING, Any, Literal, no_type_check

Expand Down Expand Up @@ -1181,7 +1181,7 @@ def get_chempot_range_map(
else:
el_energies = dict.fromkeys(elements, 0)

chempot_ranges = collections.defaultdict(list)
chempot_ranges = defaultdict(list)
vertices = [list(range(len(self.elements)))]

if len(all_chempots) > len(self.elements):
Expand Down Expand Up @@ -2631,7 +2631,7 @@ def _create_plotly_figure_layout(self, label_stable=True):
if hasattr(el_ref, "original_entry"): # for grand potential PDs, etc.
clean_formula = htmlify(el_ref.original_entry.reduced_formula)

layout["ternary"][axis + "axis"]["title"] = {
layout["ternary"][f"{axis}axis"]["title"] = {
"text": clean_formula,
"font": {"size": 24},
}
Expand Down
6 changes: 3 additions & 3 deletions pymatgen/analysis/structure_analyzer.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@

from __future__ import annotations

import collections
import itertools
from collections import defaultdict
from math import acos, pi
from typing import TYPE_CHECKING
from warnings import warn
Expand Down Expand Up @@ -227,7 +227,7 @@ def get_percentage_bond_dist_changes(self, max_radius: float = 3.0) -> dict[int,
between site1 and site_n is the same as bonding between site_n and site1, there is no
reason to duplicate the information or computation.
"""
data: dict[int, dict[int, float]] = collections.defaultdict(dict)
data: dict[int, dict[int, float]] = defaultdict(dict)
for indices in itertools.combinations(list(range(len(self.initial))), 2):
ii, jj = sorted(indices)
initial_dist = self.initial[ii].distance(self.initial[jj])
Expand Down Expand Up @@ -442,7 +442,7 @@ def parse_oxide(self) -> tuple[str, int]:
if isinstance(structure.elements[0], Element):
comp = structure.composition
elif isinstance(structure.elements[0], Species):
elem_map: dict[Element, float] = collections.defaultdict(float)
elem_map: dict[Element, float] = defaultdict(float)
for site in structure:
for species, occu in site.species.items():
elem_map[species.element] += occu
Expand Down
2 changes: 1 addition & 1 deletion pymatgen/analysis/structure_matcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -979,7 +979,7 @@ def get_rms_anonymous(self, struct1, struct2):

matches = self._anonymous_match(struct1, struct2, fu, s1_supercell, use_rms=True, break_on_match=False)
if matches:
best = sorted(matches, key=lambda x: x[1][0])[0]
best = min(matches, key=lambda x: x[1][0])
return best[1][0], best[0]

return None, None
Expand Down
10 changes: 5 additions & 5 deletions pymatgen/analysis/topological/spillage.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,9 +203,9 @@ def overlap_so_spinpol(self):
"!!!!!!!!!!",
)

gmax = max(np.real(gamma_k))
nkmax = np.argmax(np.real(gamma_k))
kmax = kpoints[nkmax]
gamma_max = max(np.real(gamma_k))
n_kmax = np.argmax(np.real(gamma_k))
k_max = kpoints[n_kmax]

print("------------------------------------")
print("\n INDIRECT DIRECT HOMO/LUMO (eV)")
Expand All @@ -223,5 +223,5 @@ def overlap_so_spinpol(self):
" ",
[so_homo, so_lumo],
)
print("gamma max", np.real(gmax), " at k = ", kmax)
return gmax
print("gamma max", np.real(gamma_max), " at k = ", k_max)
return gamma_max
4 changes: 2 additions & 2 deletions pymatgen/analysis/xps.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@

from __future__ import annotations

import collections
import warnings
from collections import defaultdict
from pathlib import Path
from typing import TYPE_CHECKING

Expand Down Expand Up @@ -49,7 +49,7 @@
def _load_cross_sections(fname):
data = pd.read_csv(fname)

dct = collections.defaultdict(dict)
dct = defaultdict(dict)
for row in data.itertuples():
sym = row.element
el = Element(sym)
Expand Down
10 changes: 5 additions & 5 deletions pymatgen/apps/borg/hive.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,8 +124,8 @@ def assimilate(self, path):
filepath = vasprun_files[0]
elif len(vasprun_files) > 1:
# Since multiple files are ambiguous, we will always read
# the one that it the last one alphabetically.
filepath = sorted(vasprun_files)[-1]
# the last one alphabetically.
filepath = max(vasprun_files)
warnings.warn(f"{len(vasprun_files)} vasprun.xml.* found. {filepath} is being parsed.")

try:
Expand Down Expand Up @@ -220,14 +220,14 @@ def assimilate(self, path):
filenames = {"INCAR", "POTCAR", "CONTCAR", "OSZICAR", "POSCAR", "DYNMAT"}
if "relax1" in files and "relax2" in files:
for filename in ("INCAR", "POTCAR", "POSCAR"):
search_str = f"{path}/relax1", filename + "*"
search_str = f"{path}/relax1", f"{filename}*"
files_to_parse[filename] = glob(search_str)[0]
for filename in ("CONTCAR", "OSZICAR"):
search_str = f"{path}/relax2", filename + "*"
search_str = f"{path}/relax2", f"{filename}*"
files_to_parse[filename] = glob(search_str)[-1]
else:
for filename in filenames:
files = sorted(glob(os.path.join(path, filename + "*")))
files = sorted(glob(os.path.join(path, f"{filename}*")))
if len(files) == 1 or filename in ("INCAR", "POTCAR") or (len(files) == 1 and filename == "DYNMAT"):
files_to_parse[filename] = files[0]
elif len(files) > 1:
Expand Down
Loading

0 comments on commit ff94faa

Please sign in to comment.