diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 4683e81653e..1134dc92a41 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -34,6 +34,10 @@ jobs: python: "3.10" resolution: highest extras: ci,optional + - os: windows-latest + python: "3.10" + resolution: highest + extras: ci,optional,numpy1 # Test NP1 on Windows (quite buggy ATM) - os: ubuntu-latest python: ">3.10" resolution: lowest-direct diff --git a/pyproject.toml b/pyproject.toml index bd11fd59895..8d302352ea0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -59,8 +59,6 @@ dependencies = [ "matplotlib>=3.8", "monty>=2024.7.29", "networkx>=2.2", - # NumPy documentation suggests pinning the current major version as the C API is used - # https://numpy.org/devdocs/dev/depending_on_numpy.html#runtime-dependency-version-ranges "palettable>=3.3.3", "pandas>=2", "plotly>=4.5.0", @@ -73,7 +71,9 @@ dependencies = [ "tabulate>=0.9", "tqdm>=4.60", "uncertainties>=3.1.4", - 'numpy>=1.25.0,<3.0', + # NumPy documentation suggests pinning the current major version as the C API is used + # https://numpy.org/devdocs/dev/depending_on_numpy.html#runtime-dependency-version-ranges + "numpy>=1.25.0,<3", ] version = "2024.8.9" @@ -115,6 +115,7 @@ optional = [ # "openbabel>=3.1.1; platform_system=='Linux'", ] numba = ["numba>=0.55"] +numpy1 = ["numpy>=1.25.0,<2"] # Test NP1 on Windows (quite buggy ATM) [project.scripts] pmg = "pymatgen.cli.pmg:main" @@ -266,9 +267,7 @@ exclude_also = [ "@np.deprecate", "def __repr__", "except ImportError:", - "if 0:", "if TYPE_CHECKING:", - "if __name__ == .__main__.:", "if self.debug:", "if settings.DEBUG", "if typing.TYPE_CHECKING:", diff --git a/src/pymatgen/analysis/chemenv/coordination_environments/coordination_geometry_finder.py b/src/pymatgen/analysis/chemenv/coordination_environments/coordination_geometry_finder.py index 4bfcf2f266a..1666a0cf3ef 100644 --- a/src/pymatgen/analysis/chemenv/coordination_environments/coordination_geometry_finder.py +++ b/src/pymatgen/analysis/chemenv/coordination_environments/coordination_geometry_finder.py @@ -1728,7 +1728,7 @@ def coordination_geometry_symmetry_measures_separation_plane_optim( continue if sep not in nb_set.separations: nb_set.separations[sep] = {} - _sep = [np.array(ss, dtype=int) for ss in separation] + _sep = [np.array(ss, dtype=np.int64) for ss in separation] nb_set.separations[sep][separation] = (plane, _sep) if sep == separation_plane_algo.separation: new_seps.append(_sep) diff --git a/src/pymatgen/analysis/chemenv/utils/graph_utils.py b/src/pymatgen/analysis/chemenv/utils/graph_utils.py index 18103826bb1..5d88ab3cd12 100644 --- a/src/pymatgen/analysis/chemenv/utils/graph_utils.py +++ b/src/pymatgen/analysis/chemenv/utils/graph_utils.py @@ -25,9 +25,9 @@ def get_delta(node1, node2, edge_data): edge_data: """ if node1.isite == edge_data["start"] and node2.isite == edge_data["end"]: - return np.array(edge_data["delta"], dtype=int) + return np.array(edge_data["delta"], dtype=np.int64) if node2.isite == edge_data["start"] and node1.isite == edge_data["end"]: - return -np.array(edge_data["delta"], dtype=int) + return -np.array(edge_data["delta"], dtype=np.int64) raise ValueError("Trying to find a delta between two nodes with an edge that seems not to link these nodes.") diff --git a/src/pymatgen/analysis/local_env.py b/src/pymatgen/analysis/local_env.py index 369b2e923fe..4a985898192 100644 --- a/src/pymatgen/analysis/local_env.py +++ b/src/pymatgen/analysis/local_env.py @@ -822,7 +822,7 @@ def get_all_voronoi_polyhedra(self, structure: Structure): indices.extend([(x[2],) + x[3] for x in neighs]) # Get the non-duplicates (using the site indices for numerical stability) - indices = np.array(indices, dtype=int) + indices = np.array(indices, dtype=np.int64) indices, uniq_inds = np.unique(indices, return_index=True, axis=0) # type: ignore[assignment] sites = [sites[idx] for idx in uniq_inds] diff --git a/src/pymatgen/analysis/molecule_matcher.py b/src/pymatgen/analysis/molecule_matcher.py index 152de7be790..43c13150734 100644 --- a/src/pymatgen/analysis/molecule_matcher.py +++ b/src/pymatgen/analysis/molecule_matcher.py @@ -1042,7 +1042,7 @@ def permutations(p_atoms, p_centroid, p_weights, q_atoms, q_centroid, q_weights) p_centroid_test = np.dot(p_centroid, U) # generate full view from q shape to fill in atom view on the fly - perm_inds = np.zeros(len(p_atoms), dtype=int) + perm_inds = np.zeros(len(p_atoms), dtype=np.int64) # Find unique atoms species = np.unique(p_atoms) @@ -1067,7 +1067,7 @@ def permutations(p_atoms, p_centroid, p_weights, q_atoms, q_centroid, q_weights) p_centroid_test = np.dot(p_centroid, U) # generate full view from q shape to fill in atom view on the fly - perm_inds = np.zeros(len(p_atoms), dtype=int) + perm_inds = np.zeros(len(p_atoms), dtype=np.int64) # Find unique atoms species = np.unique(p_atoms) diff --git a/src/pymatgen/analysis/structure_matcher.py b/src/pymatgen/analysis/structure_matcher.py index ca4ed5b949a..ee1a73fcbae 100644 --- a/src/pymatgen/analysis/structure_matcher.py +++ b/src/pymatgen/analysis/structure_matcher.py @@ -559,7 +559,7 @@ def _get_mask(self, struct1, struct2, fu, s1_supercell): if s1_supercell: # remove the symmetrically equivalent s1 indices inds = inds[::fu] - return np.array(mask, dtype=int), inds, idx + return np.array(mask, dtype=np.int64), inds, idx def fit( self, struct1: Structure, struct2: Structure, symmetric: bool = False, skip_structure_reduction: bool = False diff --git a/src/pymatgen/analysis/topological/spillage.py b/src/pymatgen/analysis/topological/spillage.py index d0da47ccfe1..a29b5446c9c 100644 --- a/src/pymatgen/analysis/topological/spillage.py +++ b/src/pymatgen/analysis/topological/spillage.py @@ -42,7 +42,7 @@ def orth(A): n_rows, n_cols = A.shape eps = np.finfo(float).eps tol = max(n_rows, n_cols) * np.amax(s) * eps - num = np.sum(s > tol, dtype=int) + num = np.sum(s > tol, dtype=np.int64) orthonormal_basis = u[:, :num] return orthonormal_basis, num diff --git a/src/pymatgen/core/interface.py b/src/pymatgen/core/interface.py index 3a2c0122723..107241385d8 100644 --- a/src/pymatgen/core/interface.py +++ b/src/pymatgen/core/interface.py @@ -1269,7 +1269,7 @@ def get_trans_mat( r_matrix = np.dot(np.dot(np.linalg.inv(trans_cry.T), r_matrix), trans_cry.T) # Set one vector of the basis to the rotation axis direction, and # obtain the corresponding transform matrix - eye = np.eye(3, dtype=int) + eye = np.eye(3, dtype=np.int64) hh = kk = ll = None for hh in range(3): if abs(r_axis[hh]) != 0: @@ -2107,7 +2107,7 @@ def slab_from_csl( # Quickly generate a supercell, normal is not working in this way if quick_gen: scale_factor = [] - eye = np.eye(3, dtype=int) + eye = np.eye(3, dtype=np.int64) for i, j in enumerate(miller): if j == 0: scale_factor.append(eye[i]) diff --git a/src/pymatgen/core/lattice.py b/src/pymatgen/core/lattice.py index b9b92554686..9ce07f898ea 100644 --- a/src/pymatgen/core/lattice.py +++ b/src/pymatgen/core/lattice.py @@ -961,7 +961,7 @@ def get_angles(v1, v2, l1, l2): for idx, all_j in enumerate(gamma_b): inds = np.logical_and(all_j[:, None], np.logical_and(alpha_b, beta_b[idx][None, :])) for j, k in np.argwhere(inds): - scale_m = np.array((f_a[idx], f_b[j], f_c[k]), dtype=int) # type: ignore[index] + scale_m = np.array((f_a[idx], f_b[j], f_c[k]), dtype=np.int64) # type: ignore[index] if abs(np.linalg.det(scale_m)) < 1e-8: continue @@ -1381,7 +1381,7 @@ def get_points_in_sphere( frac_points = np.ascontiguousarray(frac_points, dtype=float) latt_matrix = np.ascontiguousarray(self.matrix, dtype=float) cart_coords = np.ascontiguousarray(self.get_cartesian_coords(frac_points), dtype=float) - pbc = np.ascontiguousarray(self.pbc, dtype=int) + pbc = np.ascontiguousarray(self.pbc, dtype=np.int64) center_coords = np.ascontiguousarray([center], dtype=float) _, indices, images, distances = find_points_in_spheres( @@ -1510,12 +1510,12 @@ def get_points_in_sphere_old( # Generate all possible images that could be within `r` of `center` mins = np.floor(pcoords - nmax) maxes = np.ceil(pcoords + nmax) - arange = np.arange(start=mins[0], stop=maxes[0], dtype=int) - brange = np.arange(start=mins[1], stop=maxes[1], dtype=int) - crange = np.arange(start=mins[2], stop=maxes[2], dtype=int) - arange = arange[:, None] * np.array([1, 0, 0], dtype=int)[None, :] - brange = brange[:, None] * np.array([0, 1, 0], dtype=int)[None, :] - crange = crange[:, None] * np.array([0, 0, 1], dtype=int)[None, :] + arange = np.arange(start=mins[0], stop=maxes[0], dtype=np.int64) + brange = np.arange(start=mins[1], stop=maxes[1], dtype=np.int64) + crange = np.arange(start=mins[2], stop=maxes[2], dtype=np.int64) + arange = arange[:, None] * np.array([1, 0, 0], dtype=np.int64)[None, :] + brange = brange[:, None] * np.array([0, 1, 0], dtype=np.int64)[None, :] + crange = crange[:, None] * np.array([0, 0, 1], dtype=np.int64)[None, :] images = arange[:, None, None] + brange[None, :, None] + crange[None, None, :] # Generate the coordinates of all atoms within these images @@ -1629,7 +1629,7 @@ def get_distance_and_image( if jimage is None: v, d2 = pbc_shortest_vectors(self, frac_coords1, frac_coords2, return_d2=True) fc = self.get_fractional_coords(v[0][0]) + frac_coords1 - frac_coords2 - fc = np.array(np.round(fc), dtype=int) + fc = np.array(np.round(fc), dtype=np.int64) return np.sqrt(d2[0, 0]), fc jimage = np.array(jimage) @@ -1866,7 +1866,7 @@ def get_points_in_spheres( neighbors: list[list[tuple[np.ndarray, float, int, np.ndarray]]] = [] for ii, jj in zip(center_coords, site_neighbors, strict=True): - l1 = np.array(_three_to_one(jj, ny, nz), dtype=int).ravel() + l1 = np.array(_three_to_one(jj, ny, nz), dtype=np.int64).ravel() # Use the cube index map to find the all the neighboring # coords, images, and indices ks = [k for k in l1 if k in cube_to_coords] @@ -1905,7 +1905,7 @@ def _compute_cube_index( Returns: np.ndarray: nx3 array int indices """ - return np.array(np.floor((coords - global_min) / radius), dtype=int) + return np.array(np.floor((coords - global_min) / radius), dtype=np.int64) def _one_to_three(label1d: np.ndarray, ny: int, nz: int) -> np.ndarray: @@ -1943,7 +1943,7 @@ def find_neighbors(label: np.ndarray, nx: int, ny: int, nz: int) -> list[np.ndar Neighbor cell indices. """ array = [[-1, 0, 1]] * 3 - neighbor_vectors = np.array(list(itertools.product(*array)), dtype=int) + neighbor_vectors = np.array(list(itertools.product(*array)), dtype=np.int64) label3d = _one_to_three(label, ny, nz) if np.shape(label)[1] == 1 else label all_labels = label3d[:, None, :] - neighbor_vectors[None, :, :] filtered_labels = [] diff --git a/src/pymatgen/core/structure.py b/src/pymatgen/core/structure.py index 588fc1d559a..bf8803c15ff 100644 --- a/src/pymatgen/core/structure.py +++ b/src/pymatgen/core/structure.py @@ -1778,7 +1778,7 @@ def get_neighbor_list( site_coords = np.ascontiguousarray([site.coords for site in sites], dtype=float) cart_coords = np.ascontiguousarray(self.cart_coords, dtype=float) lattice_matrix = np.ascontiguousarray(self.lattice.matrix, dtype=float) - pbc = np.ascontiguousarray(self.pbc, dtype=int) + pbc = np.ascontiguousarray(self.pbc, dtype=np.int64) center_indices, points_indices, images, distances = find_points_in_spheres( cart_coords, site_coords, diff --git a/src/pymatgen/core/surface.py b/src/pymatgen/core/surface.py index 7f43698501d..db3e11e1a57 100644 --- a/src/pymatgen/core/surface.py +++ b/src/pymatgen/core/surface.py @@ -949,9 +949,9 @@ def add_site_types() -> None: or "bulk_equivalent" not in initial_structure.site_properties ): spg_analyzer = SpacegroupAnalyzer(initial_structure) - initial_structure.add_site_property("bulk_wyckoff", spg_analyzer.get_symmetry_dataset()["wyckoffs"]) + initial_structure.add_site_property("bulk_wyckoff", spg_analyzer.get_symmetry_dataset().wyckoffs) initial_structure.add_site_property( - "bulk_equivalent", spg_analyzer.get_symmetry_dataset()["equivalent_atoms"].tolist() + "bulk_equivalent", spg_analyzer.get_symmetry_dataset().equivalent_atoms.tolist() ) def calculate_surface_normal() -> np.ndarray: @@ -971,7 +971,7 @@ def calculate_scaling_factor() -> np.ndarray: """ slab_scale_factor = [] non_orth_ind = [] - eye = np.eye(3, dtype=int) + eye = np.eye(3, dtype=np.int64) for idx, miller_idx in enumerate(miller_index): if miller_idx == 0: # If lattice vector is perpendicular to surface normal, i.e., diff --git a/src/pymatgen/electronic_structure/boltztrap.py b/src/pymatgen/electronic_structure/boltztrap.py index 3acc8fb7846..5e23f9888a6 100644 --- a/src/pymatgen/electronic_structure/boltztrap.py +++ b/src/pymatgen/electronic_structure/boltztrap.py @@ -325,7 +325,7 @@ def write_struct(self, output_file) -> None: + "\n" ) - ops = [np.eye(3)] if self._symprec is None else sym.get_symmetry_dataset()["rotations"] # type: ignore[reportPossiblyUnboundVariable] + ops = [np.eye(3)] if self._symprec is None else sym.get_symmetry_dataset().rotations # type: ignore[reportPossiblyUnboundVariable] file.write(f"{len(ops)}\n") diff --git a/src/pymatgen/io/abinit/abiobjects.py b/src/pymatgen/io/abinit/abiobjects.py index 76d99e289fa..33a8e563803 100644 --- a/src/pymatgen/io/abinit/abiobjects.py +++ b/src/pymatgen/io/abinit/abiobjects.py @@ -150,7 +150,7 @@ def structure_from_abivars(cls=None, *args, **kwargs) -> Structure: raise ValueError(f"{len(typat)=} must equal {len(coords)=}") # Note conversion to int and Fortran --> C indexing - typat = np.array(typat, dtype=int) + typat = np.array(typat, dtype=np.int64) species = [znucl_type[typ - 1] for typ in typat] return cls( diff --git a/src/pymatgen/io/lmto.py b/src/pymatgen/io/lmto.py index a618d1b666d..2dac7a823d6 100644 --- a/src/pymatgen/io/lmto.py +++ b/src/pymatgen/io/lmto.py @@ -108,7 +108,7 @@ def as_dict(self): # The following is to find the classes (atoms that are not symmetry equivalent, # and create labels. Note that LMTO only attaches numbers with the second atom # of the same species, e.g. "Bi", "Bi1", "Bi2", etc. - eq_atoms = sga.get_symmetry_dataset()["equivalent_atoms"] + eq_atoms = sga.get_symmetry_dataset().equivalent_atoms ineq_sites_index = list(set(eq_atoms)) sites = [] classes = [] diff --git a/src/pymatgen/optimization/linear_assignment.pyx b/src/pymatgen/optimization/linear_assignment.pyx index 6f23c318ea0..d94a188e2d1 100644 --- a/src/pymatgen/optimization/linear_assignment.pyx +++ b/src/pymatgen/optimization/linear_assignment.pyx @@ -1,10 +1,8 @@ # cython: language_level=3 """ -This module contains an algorithm to solve the Linear Assignment Problem +This module contains the LAPJV algorithm to solve the Linear Assignment Problem. """ -# isort: dont-add-imports - import numpy as np cimport cython @@ -38,26 +36,21 @@ class LinearAssignment: rectangular epsilon: Tolerance for determining if solution vector is < 0 - .. attribute: min_cost: - - The minimum cost of the matching - - .. attribute: solution: - - The matching of the rows to columns. i.e solution = [1, 2, 0] - would match row 0 to column 1, row 1 to column 2 and row 2 - to column 0. Total cost would be c[0, 1] + c[1, 2] + c[2, 0] + Attributes: + min_cost: The minimum cost of the matching. + solution: The matching of the rows to columns. i.e solution = [1, 2, 0] + would match row 0 to column 1, row 1 to column 2 and row 2 + to column 0. Total cost would be c[0, 1] + c[1, 2] + c[2, 0]. """ - def __init__(self, costs, epsilon=1e-13): - # https://numpy.org/devdocs/numpy_2_0_migration_guide.html#adapting-to-changes-in-the-copy-keyword + def __init__(self, costs: np.ndarray, epsilon: float=1e-13) -> None: self.orig_c = np.asarray(costs, dtype=np.float64, order="C") self.nx, self.ny = self.orig_c.shape self.n = self.ny self.epsilon = fabs(epsilon) - # check that cost matrix is square + # Check that cost matrix is square if self.nx > self.ny: raise ValueError("cost matrix must have at least as many columns as rows") @@ -67,9 +60,9 @@ class LinearAssignment: self.c = np.zeros((self.n, self.n), dtype=np.float64) self.c[:self.nx] = self.orig_c - # initialize solution vectors - self._x = np.empty(self.n, dtype=int) - self._y = np.empty(self.n, dtype=int) + # Initialize solution vectors + self._x = np.empty(self.n, dtype=np.int64) + self._y = np.empty(self.n, dtype=np.int64) self.min_cost = compute(self.n, self.c, self._x, self._y, self.epsilon) self.solution = self._x[:self.nx] @@ -79,7 +72,7 @@ class LinearAssignment: @cython.wraparound(False) cdef np.float_t compute(int size, np.float_t[:, :] c, np.int64_t[:] x, np.int64_t[:] y, np.float_t eps) nogil: - # augment + # Augment cdef int i, j, k, i1, j1, f, f0, cnt, low, up, z, last, nrr cdef int n = size cdef bint b @@ -93,7 +86,7 @@ cdef np.float_t compute(int size, np.float_t[:, :] c, np.int64_t[:] x, np.int64_ for i in range(n): x[i] = -1 - # column reduction + # Column reduction for j from n > j >= 0: col[j] = j h = c[0, j] @@ -107,12 +100,12 @@ cdef np.float_t compute(int size, np.float_t[:, :] c, np.int64_t[:] x, np.int64_ x[i1] = j y[j] = i1 else: - # in the paper its x[i], but likely a typo + # NOTE: in the paper it's x[i], but likely a typo if x[i1] > -1: x[i1] = -2 - x[i1] y[j] = -1 - # reduction transfer + # Reduction transfer f = -1 for i in range(n): if x[i] == -1: @@ -129,12 +122,12 @@ cdef np.float_t compute(int size, np.float_t[:, :] c, np.int64_t[:] x, np.int64_ m = c[i, j] - v[j] v[j1] = v[j1] - m - # augmenting row reduction + # Augmenting row reduction for cnt in range(2): k = 0 f0 = f f = -1 - # this step isn't strictly necessary, and + # This step isn't strictly necessary, and # time is proportional to 1/eps in the worst case, # so break early by keeping track of nrr nrr = 0 @@ -172,7 +165,7 @@ cdef np.float_t compute(int size, np.float_t[:, :] c, np.int64_t[:] x, np.int64_ x[i] = j1 y[j1] = i - # augmentation + # Augmentation f0 = f for f in range(f0 + 1): i1 = fre[f] @@ -182,7 +175,7 @@ cdef np.float_t compute(int size, np.float_t[:, :] c, np.int64_t[:] x, np.int64_ d[j] = c[i1, j] - v[j] pred[j] = i1 while True: - # the pascal code ends when a single augmentation is found + # The pascal code ends when a single augmentation is found # really we need to get back to the for f in range(f0+1) loop b = False if up == low: @@ -231,7 +224,7 @@ cdef np.float_t compute(int size, np.float_t[:, :] c, np.int64_t[:] x, np.int64_ pred[j] = i if fabs(h - m) < eps: if y[j] == -1: - # augment + # Augment for k in range(last+1): j1 = col[k] v[j1] = v[j1] + d[j1] - m diff --git a/src/pymatgen/optimization/neighbors.pyx b/src/pymatgen/optimization/neighbors.pyx index 1ecf733138d..f2e7c5b6822 100644 --- a/src/pymatgen/optimization/neighbors.pyx +++ b/src/pymatgen/optimization/neighbors.pyx @@ -7,8 +7,6 @@ # cython: profile=False # distutils: language = c -# isort: dont-add-imports - # Setting cdivision=True gives a small speed improvement, but the code is currently # written based on Python division so using cdivision may result in missing neighbors # in some off cases. See https://github.com/materialsproject/pymatgen/issues/2226 @@ -22,7 +20,7 @@ from libc.string cimport memset cdef void *safe_malloc(size_t size) except? NULL: - """Raise memory error if malloc fails""" + """Raise MemoryError if malloc fails.""" if size == 0: return NULL cdef void *ptr = malloc(size) @@ -32,7 +30,7 @@ cdef void *safe_malloc(size_t size) except? NULL: cdef void *safe_realloc(void *ptr_orig, size_t size) except? NULL: - """Raise memory error if realloc fails""" + """Raise MemoryError if realloc fails.""" if size == 0: return NULL cdef void *ptr = realloc(ptr_orig, size) @@ -64,10 +62,12 @@ def find_points_in_spheres( directly. If the cutoff is less than this value, the algorithm will calculate neighbor list using min_r as cutoff and discard those that have larger distances. + Returns: - index1 (n, ), index2 (n, ), offset_vectors (n, 3), distances (n, ). - index1 of center_coords, and index2 of all_coords that form the neighbor pair - offset_vectors are the periodic image offsets for the all_coords. + index1 (n, ): Indexes of center_coords. + index2 (n, ): Indexes of all_coords that form the neighbor pair. + offset_vectors (n, 3): The periodic image offsets for all_coords. + distances (n, ). """ if r < min_r: findex1, findex2, foffset_vectors, fdistances = find_points_in_spheres( @@ -80,7 +80,7 @@ def find_points_in_spheres( cdef: int i, j, k, l, m double[3] maxr - # valid boundary, that is the minimum in center_coords - r + # Valid boundary, that is the minimum in center_coords - r double[3] valid_min double[3] valid_max double ledge @@ -148,14 +148,14 @@ def find_points_in_spheres( for i in range(n_total): for j in range(3): if pbc[j]: - # only wrap atoms when this dimension is PBC + # Only wrap atoms when this dimension is PBC all_frac_coords[i, j] = offset_correction[i, j] % 1 offset_correction[i, j] = offset_correction[i, j] - all_frac_coords[i, j] else: all_frac_coords[i, j] = offset_correction[i, j] offset_correction[i, j] = 0 - # compute the reciprocal lattice in place + # Compute the reciprocal lattice in place get_reciprocal_lattice(lattice, reciprocal_lattice) get_max_r(reciprocal_lattice, maxr, r) @@ -226,7 +226,7 @@ def find_points_in_spheres( else: failed_malloc = 0 - # if no valid neighbors were found return empty + # If no valid neighbors were found return empty if count == 0: free(&frac_coords[0, 0]) free(&all_frac_coords[0, 0]) @@ -244,7 +244,7 @@ def find_points_in_spheres( free(offset_final) free(distances) - return (np.array([], dtype=int), np.array([], dtype=int), + return (np.array([], dtype=np.int64), np.array([], dtype=np.int64), np.array([[], [], []], dtype=float).T, np.array([], dtype=float)) n_atoms = count @@ -349,8 +349,8 @@ def find_points_in_spheres( failed_malloc = 0 if count == 0: - py_index_1 = np.array([], dtype=int) - py_index_2 = np.array([], dtype=int) + py_index_1 = np.array([], dtype=np.int64) + py_index_2 = np.array([], dtype=np.int64) py_offsets = np.array([[], [], []], dtype=float).T py_distances = np.array([], dtype=float) else: @@ -393,7 +393,7 @@ def find_points_in_spheres( cdef void get_cube_neighbors(np.int64_t[3] ncube, np.int64_t[:, ::1] neighbor_map): """ - Get {cube_index: cube_neighbor_indices} map + Get {cube_index: cube_neighbor_indices} map. """ cdef: int i, j, k @@ -405,7 +405,7 @@ cdef void get_cube_neighbors(np.int64_t[3] ncube, np.int64_t[:, ::1] neighbor_ma ) np.int64_t[::1] cube_indices_1d = safe_malloc(ncubes*sizeof(np.int64_t)) - # creating the memviews of c-arrays once substantially improves speed + # Creating the memviews of c-arrays once substantially improves speed # but for some reason it makes the runtime scaling with the number of # atoms worse np.int64_t[1][3] index3_arr @@ -419,7 +419,7 @@ cdef void get_cube_neighbors(np.int64_t[3] ncube, np.int64_t[:, ::1] neighbor_ma np.int64_t *ovectors_p = safe_malloc(ntotal * 3 * sizeof(np.int64_t)) int n_ovectors = compute_offset_vectors(ovectors_p, n) - # now resize to the actual size + # Resize to the actual size ovectors_p = safe_realloc(ovectors_p, n_ovectors * 3 * sizeof(np.int64_t)) ovectors = ovectors_p @@ -496,8 +496,8 @@ cdef double distance2( np.int64_t index2, np.int64_t size ) nogil: - """Faster way to compute the distance squared by not using slice but providing indices - in each matrix + """Faster way to compute the distance squared by not using slice + but providing indices in each matrix. """ cdef: int i @@ -517,7 +517,7 @@ cdef void get_bounds( ) nogil: """ Given the fractional coordinates and the number of repeation needed in each - direction, maxr, compute the translational bounds in each dimension + direction, maxr, compute the translational bounds in each dimension. """ cdef: int i @@ -542,7 +542,7 @@ cdef void get_frac_coords( double[:, ::1] frac_coords ) nogil: """ - Compute the fractional coordinates + Compute the fractional coordinates. """ matrix_inv(lattice, inv_lattice) matmul(cart_coords, inv_lattice, frac_coords) @@ -553,7 +553,7 @@ cdef void matmul( double [:, ::1] out ) nogil: """ - Matrix multiplication + Matrix multiplication. """ cdef: int i, j, k @@ -567,7 +567,7 @@ cdef void matmul( cdef void matrix_inv(const double[:, ::1] matrix, double[:, ::1] inv) nogil: """ - Matrix inversion + Matrix inversion. """ cdef: int i, j @@ -580,7 +580,7 @@ cdef void matrix_inv(const double[:, ::1] matrix, double[:, ::1] inv) nogil: cdef double matrix_det(const double[:, ::1] matrix) nogil: """ - Matrix determinant + Matrix determinant. """ return ( matrix[0, 0] * (matrix[1, 1] * matrix[2, 2] - matrix[1, 2] * matrix[2, 1]) + @@ -594,13 +594,13 @@ cdef void get_max_r( double r ) nogil: """ - Get maximum repetition in each directions + Get maximum repetition in each directions. """ cdef: int i double recp_len - for i in range(3): # is it ever not 3x3 for our cases? + for i in range(3): # TODO: is it ever not 3x3 for our cases? recp_len = norm(reciprocal_lattice[i, :]) maxr[i] = ceil((r + 0.15) * recp_len / (2 * pi)) @@ -609,7 +609,7 @@ cdef void get_reciprocal_lattice( double[:, ::1] reciprocal_lattice ) nogil: """ - Compute the reciprocal lattice + Compute the reciprocal lattice. """ cdef int i for i in range(3): @@ -626,7 +626,7 @@ cdef void recip_component( double[::1] out ) nogil: """ - Compute the reciprocal lattice vector + Compute the reciprocal lattice vector. """ cdef: int i @@ -640,7 +640,7 @@ cdef void recip_component( cdef double inner(const double[3] x, const double[3] y) nogil: """ - Compute inner product of 3d vectors + Compute inner product of 3d vectors. """ cdef: double sum = 0 @@ -652,7 +652,7 @@ cdef double inner(const double[3] x, const double[3] y) nogil: cdef void cross(const double[3] x, const double[3] y, double[3] out) nogil: """ - Cross product of vector x and y, output in out + Cross product of vector x and y, output in out. """ out[0] = x[1] * y[2] - x[2] * y[1] out[1] = x[2] * y[0] - x[0] * y[2] @@ -660,7 +660,7 @@ cdef void cross(const double[3] x, const double[3] y, double[3] out) nogil: cdef double norm(const double[::1] vec) nogil: """ - Vector norm + Vector norm. """ cdef: int i @@ -677,7 +677,7 @@ cdef void max_and_min( double[3] min_coords ) nogil: """ - Compute the min and max of coords + Compute the min and max of coords. """ cdef: int i, j @@ -711,7 +711,7 @@ cdef void three_to_one( const np.int64_t[:, ::1] label3d, np.int64_t ny, np.int64_t nz, np.int64_t[::1] label1d ) nogil: """ - 3D vector representation to 1D + 3D vector representation to 1D. """ cdef: int i diff --git a/src/pymatgen/phonon/bandstructure.py b/src/pymatgen/phonon/bandstructure.py index f8354fdb743..aea712d0a73 100644 --- a/src/pymatgen/phonon/bandstructure.py +++ b/src/pymatgen/phonon/bandstructure.py @@ -598,7 +598,7 @@ def band_reorder(self) -> None: eig = self.bands n_phonons, n_qpoints = self.bands.shape - order = np.zeros([n_qpoints, n_phonons], dtype=int) + order = np.zeros([n_qpoints, n_phonons], dtype=np.int64) order[0] = np.array(range(n_phonons)) # get the atomic masses diff --git a/src/pymatgen/util/coord.py b/src/pymatgen/util/coord.py index a24236c5cbe..c466b49c5f2 100644 --- a/src/pymatgen/util/coord.py +++ b/src/pymatgen/util/coord.py @@ -267,7 +267,9 @@ def is_coord_subset_pbc(subset, superset, atol: float = 1e-8, mask=None, pbc: Pb """ c1 = np.array(subset, dtype=np.float64) c2 = np.array(superset, dtype=np.float64) - mask_arr = np.array(mask, dtype=int) if mask is not None else np.zeros((len(subset), len(superset)), dtype=int) + mask_arr = ( + np.array(mask, dtype=np.int64) if mask is not None else np.zeros((len(subset), len(superset)), dtype=np.int64) + ) atol = np.zeros(3, dtype=np.float64) + atol return coord_cython.is_coord_subset_pbc(c1, c2, atol, mask_arr, pbc) diff --git a/src/pymatgen/util/coord_cython.pyx b/src/pymatgen/util/coord_cython.pyx index f6958b0fbc7..a3a06bb6f6e 100644 --- a/src/pymatgen/util/coord_cython.pyx +++ b/src/pymatgen/util/coord_cython.pyx @@ -4,8 +4,6 @@ Utilities for manipulating coordinates or list of coordinates, under periodic boundary conditions or otherwise. """ -# isort: dont-add-imports - __author__ = "Will Richards" __copyright__ = "Copyright 2011, The Materials Project" __version__ = "1.0" @@ -22,7 +20,7 @@ from libc.stdlib cimport free, malloc np.import_array() -#create images, 2d array of all length 3 combinations of [-1,0,1] +# Create images, 2D array of all length 3 combinations of [-1, 0, 1] rng = np.arange(-1, 2, dtype=np.float64) arange = rng[:, None] * np.array([1, 0, 0])[None, :] brange = rng[:, None] * np.array([0, 1, 0])[None, :] @@ -87,7 +85,7 @@ def pbc_shortest_vectors(lattice, fcoords1, fcoords2, mask=None, return_d2=False first index is fcoords1 index, second is fcoords2 index """ - #ensure correct shape + # Ensure correct shape fcoords1, fcoords2 = np.atleast_2d(fcoords1, fcoords2) pbc = lattice.pbc @@ -129,7 +127,7 @@ def pbc_shortest_vectors(lattice, fcoords1, fcoords2, mask=None, return_d2=False cdef bint has_mask = mask is not None cdef np.int64_t[:, :] mask_arr if has_mask: - mask_arr = np.asarray(mask, dtype=np.int_, order="C") + mask_arr = np.asarray(mask, dtype=np.int64, order="C") cdef bint has_ftol = (lll_frac_tol is not None) cdef np.float_t[:] ftol @@ -200,10 +198,10 @@ def is_coord_subset_pbc(subset, superset, atol, mask, pbc=(True, True, True)): """ Tests if all fractional coords in subset are contained in superset. Allows specification of a mask determining pairs that are not - allowed to match to each other + allowed to match to each other. Args: - subset, superset: List of fractional coords + subset, superset: List of fractional coords. pbc: a tuple defining the periodic boundary conditions along the three axis of the lattice. @@ -214,7 +212,7 @@ def is_coord_subset_pbc(subset, superset, atol, mask, pbc=(True, True, True)): cdef np.float_t[:, :] fc1 = subset cdef np.float_t[:, :] fc2 = superset cdef np.float_t[:] t = atol - cdef np.int64_t[:, :] m = np.asarray(mask, dtype=np.int_, order="C") + cdef np.int64_t[:, :] m = np.asarray(mask, dtype=np.int64, order="C") cdef int i, j, k, len_fc1, len_fc2 cdef np.float_t d @@ -248,17 +246,17 @@ def is_coord_subset_pbc(subset, superset, atol, mask, pbc=(True, True, True)): def coord_list_mapping_pbc(subset, superset, atol=1e-8, pbc=(True, True, True)): """ Gives the index mapping from a subset to a superset. - Superset cannot contain duplicate matching rows + Superset cannot contain duplicate matching rows. Args: - subset, superset: List of frac_coords + subset, superset: List of frac_coords. pbc: a tuple defining the periodic boundary conditions along the three axis of the lattice. Returns: list of indices such that superset[indices] = subset """ - inds = -np.ones(len(subset), dtype=int) + inds = -np.ones(len(subset), dtype=np.int64) subset = np.atleast_2d(subset) superset = np.atleast_2d(superset) @@ -288,7 +286,7 @@ def coord_list_mapping_pbc(subset, superset, atol=1e-8, pbc=(True, True, True)): raise ValueError("Something wrong with the inputs, likely duplicates in superset") c_inds[i] = j ok_outer = True - # we don't break here so we can check for duplicates in superset + # We don't break here so we can check for duplicates in superset if not ok_outer: break diff --git a/tests/analysis/chemenv/connectivity/test_connected_components.py b/tests/analysis/chemenv/connectivity/test_connected_components.py index 2768fc5b815..1df465d33d1 100644 --- a/tests/analysis/chemenv/connectivity/test_connected_components.py +++ b/tests/analysis/chemenv/connectivity/test_connected_components.py @@ -375,7 +375,7 @@ def test_periodicity(self): assert cc.periodicity == "2D" assert_allclose(cc.periodicity_vectors, [[0, 1, 0], [1, 1, 0]]) assert isinstance(cc.periodicity_vectors, list) - assert cc.periodicity_vectors[0].dtype is np.dtype(int) + assert cc.periodicity_vectors[0].dtype is np.dtype(np.int64) # Test a 3d periodicity graph = nx.MultiGraph() @@ -445,7 +445,7 @@ def test_periodicity(self): assert cc.periodicity == "3D" assert_allclose(cc.periodicity_vectors, [[0, 1, 0], [1, 1, 0], [1, 1, 1]]) assert isinstance(cc.periodicity_vectors, list) - assert cc.periodicity_vectors[0].dtype is np.dtype(int) + assert cc.periodicity_vectors[0].dtype is np.dtype(np.int64) def test_real_systems(self): # Initialize geometry and connectivity finders diff --git a/tests/core/test_lattice.py b/tests/core/test_lattice.py index a18cdfa9fcd..84aa51f5953 100644 --- a/tests/core/test_lattice.py +++ b/tests/core/test_lattice.py @@ -534,7 +534,7 @@ def test_points_in_spheres(self): all_coords=np.array(points), center_coords=np.array(center_points), r=3, - pbc=np.array([0, 0, 0], dtype=int), + pbc=np.array([0, 0, 0], dtype=np.int64), lattice=lattice, numerical_tol=1e-8, ) @@ -555,7 +555,7 @@ def test_points_in_spheres(self): all_coords=np.array(points), center_coords=np.array(center_points), r=3, - pbc=np.array([True, False, False], dtype=int), + pbc=np.array([True, False, False], dtype=np.int64), lattice=lattice, ) assert len(nns[0]) == 4 diff --git a/tests/core/test_structure.py b/tests/core/test_structure.py index 29b271ec99d..5a20ec7ceaa 100644 --- a/tests/core/test_structure.py +++ b/tests/core/test_structure.py @@ -1688,6 +1688,7 @@ def test_calculate_ase(self): assert not hasattr(calculator, "dynamics") assert self.cu_structure == struct_copy, "original structure was modified" + @pytest.mark.skip(reason="chgnet is failing with Numpy 1, see #3992") @pytest.mark.skipif(int(np.__version__[0]) >= 2, reason="chgnet is not built against NumPy 2.0") def test_relax_chgnet(self): pytest.importorskip("chgnet") @@ -1712,6 +1713,7 @@ def test_relax_chgnet(self): assert custom_relaxed.calc.results.get("energy") == approx(-6.0151076, abs=1e-4) assert custom_relaxed.volume == approx(40.044794644, abs=1e-4) + @pytest.mark.skip(reason="chgnet is failing with Numpy 1, see #3992") @pytest.mark.skipif(int(np.__version__[0]) >= 2, reason="chgnet is not built against NumPy 2.0") def test_calculate_chgnet(self): pytest.importorskip("chgnet") diff --git a/tests/core/test_units.py b/tests/core/test_units.py index 2abfd5df8f0..70aef85d778 100644 --- a/tests/core/test_units.py +++ b/tests/core/test_units.py @@ -213,7 +213,7 @@ def test_time(self): """Similar to FloatWithUnitTest.test_time. Check whether EnergyArray and FloatWithUnit have same behavior. """ - # here there's a minor difference because we have a ndarray with dtype=int + # here there's a minor difference because we have a ndarray with dtype=np.int64 a = TimeArray(20, "h") assert a.to("s") == 3600 * 20 # Test left and right multiplication. diff --git a/tests/optimization/test_neighbors.py b/tests/optimization/test_neighbors.py index a6374c1f929..3542acd20ff 100644 --- a/tests/optimization/test_neighbors.py +++ b/tests/optimization/test_neighbors.py @@ -38,7 +38,7 @@ def test_points_in_spheres(self): all_coords=np.array(points), center_coords=np.array(center_points), r=3, - pbc=np.array([0, 0, 0], dtype=int), + pbc=np.array([0, 0, 0], dtype=np.int64), lattice=np.array(lattice.matrix), tol=1e-8, ) @@ -48,7 +48,7 @@ def test_points_in_spheres(self): all_coords=np.array(points), center_coords=np.array(center_points), r=3, - pbc=np.array([1, 1, 1], dtype=int), + pbc=np.array([1, 1, 1], dtype=np.int64), lattice=np.array(lattice.matrix), ) assert len(nns[0]) == 12 @@ -57,7 +57,7 @@ def test_points_in_spheres(self): all_coords=np.array(points), center_coords=np.array(center_points), r=3, - pbc=np.array([True, False, False], dtype=int), + pbc=np.array([True, False, False], dtype=np.int64), lattice=np.array(lattice.matrix), ) assert len(nns[0]) == 4