diff --git a/.cirrus.yml b/.cirrus.yml deleted file mode 100644 index 28fe5c9b2..000000000 --- a/.cirrus.yml +++ /dev/null @@ -1,86 +0,0 @@ -# this build script is adapted from scipy - -build_and_store_wheels: &BUILD_AND_STORE_WHEELS - install_cibuildwheel_script: - - python -m pip install cibuildwheel==2.11.4 - cibuildwheel_script: - - echo "Building wheels for matscipy version $(python discover_version.py)" - - cibuildwheel - - tools/wheels/release-wheels.sh - env: - GITHUB_TOKEN: ENCRYPTED[054da715bf3596559d27a6c74738981a8e878dbbc0dd896cb57a0a758c6a57143fca006376d0eb58d777c25e43ed91cb] - TWINE_USERNAME: __token__ - TWINE_PASSWORD: ENCRYPTED[13d27e71fe29d4d18d53c7b9dffa4472b8abcd094933530310e8dfe4fae4b8b576d1d7c1a7965918e1c4cff5ad4dc326] - wheels_artifacts: - path: "wheelhouse/*" - - -###################################################################### -# Build linux_aarch64 natively -###################################################################### - -# cirrus_wheels_linux_aarch64_task: -# compute_engine_instance: -# image_project: cirrus-images -# image: family/docker-builder-arm64 -# architecture: arm64 -# platform: linux -# cpu: 4 -# memory: 8G -# matrix: -# - env: -# CIBW_BUILD: cp38-* cp39-* -# - env: -# CIBW_BUILD: cp310-* cp311-* -# build_script: | -# apt install -y python3-venv python-is-python3 -# which python -# echo $CIRRUS_CHANGE_MESSAGE -# # needed for discover_version.py -# git fetch --all -# # needed for submodules -# git submodule update --init -# <<: *BUILD_AND_STORE_WHEELS - - -###################################################################### -# Build macosx_arm64 natively -###################################################################### - -cirrus_wheels_macos_arm64_task: - macos_instance: - image: ghcr.io/cirruslabs/macos-monterey-xcode:13.3.1 - matrix: - - env: - CIBW_BUILD: cp38-* - CIBW_BEFORE_ALL: bash tools/wheels/cibw_before_all_cp38_macosx_arm64.sh - - env: - CIBW_BUILD: cp39-* cp310-* cp311-* - env: - PATH: /opt/homebrew/opt/python@3.10/bin:$PATH - CIBW_ENVIRONMENT: MACOSX_DEPLOYMENT_TARGET=12.0 _PYTHON_HOST_PLATFORM="macosx-12.0-arm64" - PKG_CONFIG_PATH: /opt/arm64-builds/lib/pkgconfig - # assumes that the cmake config is in /usr/local/lib/cmake - CMAKE_PREFIX_PATH: /opt/arm64-builds/ - REPAIR_PATH: /usr/local/gfortran/lib:/opt/arm64-builds/lib - CIBW_REPAIR_WHEEL_COMMAND_MACOS: > - DYLD_LIBRARY_PATH=/usr/local/gfortran/lib:/opt/arm64-builds/lib delocate-listdeps {wheel} && - DYLD_LIBRARY_PATH=/usr/local/gfortran/lib:/opt/arm64-builds/lib delocate-wheel --require-archs {delocate_archs} -w {dest_dir} {wheel} - CIBW_TEST_COMMAND: python {project}/tests/test_ffi.py - - install_pre_requirements_script: - - brew install python@3.10 - - ln -s python3 /opt/homebrew/opt/python@3.10/bin/python - - build_script: - - which python - # needed for discover_version.py - - git fetch --all - # needed for submodules - - git submodule update --init - - uname -m - - python -c "import platform;print(platform.python_version());print(platform.system());print(platform.machine())" - - clang --version - <<: *BUILD_AND_STORE_WHEELS - - diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 908480235..e11bd7b14 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -25,7 +25,9 @@ jobs: matrix: buildplat: - [ubuntu-20.04, manylinux, x86_64] + - [ubuntu-20.04, manylinux, aarch64] - [macos-12, macosx, x86_64] + - [macos-12, macosx, arm64] - [windows-2019, win, AMD64] python: ["cp38", "cp39", "cp310", "cp311", "cp312"] @@ -35,6 +37,11 @@ jobs: IS_32_BIT: ${{ matrix.buildplat[2] == 'x86' }} steps: + - if: matrix.buildplat[2] == 'aarch64' + uses: docker/setup-qemu-action@v3 + with: + platforms: all + - uses: actions/checkout@v4 with: fetch-depth: 0 diff --git a/ChangeLog.md b/ChangeLog.md index f1e7c4c19..e64316e7a 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -1,9 +1,15 @@ Change log ========== -v1.0.0 (24Jan24) +v1.1.0 (not yet released) ------------------------- +- Fixes for recent ASE and numpy 2.0 +- Fix correct number of cores for quad of dissociated dislocations + +v1.0.0 (24Jan24) +---------------- + - JOSS paper! - Significant updates documentation packages - Displacement field and associated deformation tensors for dislocations in anisotropic elastic media diff --git a/c/angle_distribution.cpp b/c/angle_distribution.cpp index be8d30e4a..56e23c93a 100644 --- a/c/angle_distribution.cpp +++ b/c/angle_distribution.cpp @@ -22,16 +22,13 @@ #include #define PY_ARRAY_UNIQUE_SYMBOL MATSCIPY_ARRAY_API #define NO_IMPORT_ARRAY -#define NPY_NO_DEPRECATED_API NPY_1_5_API_VERSION +#define NPY_NO_DEPRECATED_API NPY_2_0_API_VERSION #include -#ifdef _MSC_VER -#define _USE_MATH_DEFINES -#include -#endif - #include "angle_distribution.h" +#define M_PI ((double)3.14159265358979323846) /* pi */ + /* * Compute bond angle distribution */ @@ -47,25 +44,25 @@ py_angle_distribution(PyObject *self, PyObject *args) &j_arr, &PyArray_Type, &r_arr, &nbins, &cutoff)) return NULL; - if (PyArray_NDIM(i_arr) != 1 || PyArray_TYPE(i_arr) != NPY_INT) { + if (PyArray_NDIM((PyArrayObject *) i_arr) != 1 || PyArray_TYPE((PyArrayObject *) i_arr) != NPY_INT) { PyErr_SetString(PyExc_TypeError, "First argument needs to be one-dimensional " "integer array."); return NULL; } - if (PyArray_NDIM(j_arr) != 1 || PyArray_TYPE(j_arr) != NPY_INT) { + if (PyArray_NDIM((PyArrayObject *) j_arr) != 1 || PyArray_TYPE((PyArrayObject *) j_arr) != NPY_INT) { PyErr_SetString(PyExc_TypeError, "Second argument needs to be one-dimensional " "integer array."); return NULL; } - if (PyArray_NDIM(r_arr) != 2 || PyArray_DIM(r_arr, 1) != 3 || - PyArray_TYPE(r_arr) != NPY_DOUBLE) { + if (PyArray_NDIM((PyArrayObject *) r_arr) != 2 || PyArray_DIM((PyArrayObject *) r_arr, 1) != 3 || + PyArray_TYPE((PyArrayObject *) r_arr) != NPY_DOUBLE) { PyErr_SetString(PyExc_TypeError, "Third argument needs to be two-dimensional " "double array."); return NULL; } - npy_intp npairs = PyArray_DIM(i_arr, 0); - if (PyArray_DIM(j_arr, 0) != npairs || PyArray_DIM(r_arr, 0) != npairs) { + npy_intp npairs = PyArray_DIM((PyArrayObject *) i_arr, 0); + if (PyArray_DIM((PyArrayObject *) j_arr, 0) != npairs || PyArray_DIM((PyArrayObject *) r_arr, 0) != npairs) { PyErr_SetString(PyExc_RuntimeError, "First three arguments need to be arrays of " "identical length."); return NULL; @@ -75,11 +72,10 @@ py_angle_distribution(PyObject *self, PyObject *args) PyObject *h_arr = PyArray_ZEROS(1, &dim, NPY_INT, 1); PyObject *tmp_arr = PyArray_ZEROS(1, &dim, NPY_INT, 1); - npy_int *i = (npy_int*)PyArray_DATA(i_arr); - npy_int *j = (npy_int*)PyArray_DATA(j_arr); - double *r = (double*)PyArray_DATA(r_arr); - npy_int *h = (npy_int*)PyArray_DATA(h_arr); - npy_int *tmp = (npy_int*)PyArray_DATA(tmp_arr); + npy_int *i = (npy_int*)PyArray_DATA((PyArrayObject *) i_arr); + double *r = (double*)PyArray_DATA((PyArrayObject *) r_arr); + npy_int *h = (npy_int*)PyArray_DATA((PyArrayObject *) h_arr); + npy_int *tmp = (npy_int*)PyArray_DATA((PyArrayObject *) tmp_arr); npy_int last_i = i[0], i_start = 0; memset(tmp, 0, nbins*sizeof(npy_int)); diff --git a/c/islands.cpp b/c/islands.cpp index 0e5de7f05..428099605 100644 --- a/c/islands.cpp +++ b/c/islands.cpp @@ -22,6 +22,7 @@ #include #define PY_ARRAY_UNIQUE_SYMBOL MATSCIPY_ARRAY_API #define NO_IMPORT_ARRAY +#define NPY_NO_DEPRECATED_API NPY_2_0_API_VERSION #include #include @@ -107,14 +108,14 @@ py_count_islands(PyObject *self, PyObject *args) if (py_stencil) { py_long_stencil = PyArray_FROMANY(py_stencil, NPY_LONG, 2, 2, - NPY_C_CONTIGUOUS); + NPY_ARRAY_C_CONTIGUOUS); if (!py_long_stencil) return NULL; - sx = PyArray_DIM(py_long_stencil, 0); - npy_intp sy = PyArray_DIM(py_long_stencil, 1); + sx = PyArray_DIM((PyArrayObject *) py_long_stencil, 0); + npy_intp sy = PyArray_DIM((PyArrayObject *) py_long_stencil, 1); - stencil = (npy_long*) PyArray_DATA(py_long_stencil); + stencil = (npy_long*) PyArray_DATA((PyArrayObject *) py_long_stencil); if (sy != 2) { PyErr_SetString(PyExc_TypeError, "Stencil must have dimension 2 in the " @@ -126,21 +127,21 @@ py_count_islands(PyObject *self, PyObject *args) stencil = default_stencil; } - py_bool_map = PyArray_FROMANY(py_map, NPY_BOOL, 2, 2, NPY_C_CONTIGUOUS); + py_bool_map = PyArray_FROMANY(py_map, NPY_BOOL, 2, 2, NPY_ARRAY_C_CONTIGUOUS); if (!py_bool_map) return NULL; - npy_intp nx = PyArray_DIM(py_bool_map, 0); - npy_intp ny = PyArray_DIM(py_bool_map, 1); + npy_intp nx = PyArray_DIM((PyArrayObject *) py_bool_map, 0); + npy_intp ny = PyArray_DIM((PyArrayObject *) py_bool_map, 1); - npy_bool *map = (npy_bool*) PyArray_DATA(py_bool_map); + npy_bool *map = (npy_bool*) PyArray_DATA((PyArrayObject *) py_bool_map); npy_intp dims[2] = { nx, ny }; PyObject *py_id = PyArray_ZEROS(2, dims, NPY_INT, 0); if (!py_id) return NULL; - npy_int *id = (npy_int *) PyArray_DATA(py_id); + npy_int *id = (npy_int *) PyArray_DATA((PyArrayObject *) py_id); int i, j, k = 0; npy_int p = 0; @@ -213,21 +214,21 @@ py_count_segments(PyObject *self, PyObject *args) return NULL; PyObject *py_bool_map = NULL; - py_bool_map = PyArray_FROMANY(py_map, NPY_BOOL, 2, 2, NPY_C_CONTIGUOUS); + py_bool_map = PyArray_FROMANY(py_map, NPY_BOOL, 2, 2, NPY_ARRAY_C_CONTIGUOUS); if (!py_bool_map) return NULL; - npy_intp nx = PyArray_DIM(py_bool_map, 0); - npy_intp ny = PyArray_DIM(py_bool_map, 1); + npy_intp nx = PyArray_DIM((PyArrayObject *) py_bool_map, 0); + npy_intp ny = PyArray_DIM((PyArrayObject *) py_bool_map, 1); - npy_bool *map = (npy_bool*) PyArray_DATA(py_bool_map); + npy_bool *map = (npy_bool*) PyArray_DATA((PyArrayObject *) py_bool_map); npy_intp dims[2] = { nx, ny }; PyObject *py_id = PyArray_ZEROS(2, dims, NPY_INT, 0); if (!py_id) return NULL; - npy_int *id = (npy_int *) PyArray_DATA(py_id); + npy_int *id = (npy_int *) PyArray_DATA((PyArrayObject *) py_id); int i, j, k = 0; npy_int p = 0; @@ -263,40 +264,40 @@ py_shortest_distance(PyObject *self, PyObject *args) PyObject *py_bool_fromc = NULL, *py_bool_fromp = NULL; PyObject *py_bool_to = NULL; - py_bool_fromc = PyArray_FROMANY(py_fromc, NPY_BOOL, 2, 2, NPY_C_CONTIGUOUS); + py_bool_fromc = PyArray_FROMANY(py_fromc, NPY_BOOL, 2, 2, NPY_ARRAY_C_CONTIGUOUS); if (!py_bool_fromc) return NULL; - py_bool_fromp = PyArray_FROMANY(py_fromp, NPY_BOOL, 2, 2, NPY_C_CONTIGUOUS); + py_bool_fromp = PyArray_FROMANY(py_fromp, NPY_BOOL, 2, 2, NPY_ARRAY_C_CONTIGUOUS); if (!py_bool_fromp) return NULL; - py_bool_to = PyArray_FROMANY(py_to, NPY_BOOL, 2, 2, NPY_C_CONTIGUOUS); + py_bool_to = PyArray_FROMANY(py_to, NPY_BOOL, 2, 2, NPY_ARRAY_C_CONTIGUOUS); if (!py_bool_to) return NULL; - npy_intp nx = PyArray_DIM(py_bool_fromc, 0); - npy_intp ny = PyArray_DIM(py_bool_fromc, 1); + npy_intp nx = PyArray_DIM((PyArrayObject *) py_bool_fromc, 0); + npy_intp ny = PyArray_DIM((PyArrayObject *) py_bool_fromc, 1); - if (PyArray_DIM(py_bool_fromp, 0) != nx || - PyArray_DIM(py_bool_fromp, 1) != ny) { + if (PyArray_DIM((PyArrayObject *) py_bool_fromp, 0) != nx || + PyArray_DIM((PyArrayObject *) py_bool_fromp, 1) != ny) { PyErr_SetString(PyExc_TypeError, "All three maps need to have identical dimensions."); return NULL; } - if (PyArray_DIM(py_bool_to, 0) != nx || PyArray_DIM(py_bool_to, 1) != ny) { + if (PyArray_DIM((PyArrayObject *) py_bool_to, 0) != nx || PyArray_DIM((PyArrayObject *) py_bool_to, 1) != ny) { PyErr_SetString(PyExc_TypeError, "All three maps need to have identical dimensions."); return NULL; } - npy_bool *fromc = (npy_bool*) PyArray_DATA(py_bool_fromc); - npy_bool *fromp = (npy_bool*) PyArray_DATA(py_bool_fromp); - npy_bool *to = (npy_bool*) PyArray_DATA(py_bool_to); + npy_bool *fromc = (npy_bool*) PyArray_DATA((PyArrayObject *) py_bool_fromc); + npy_bool *fromp = (npy_bool*) PyArray_DATA((PyArrayObject *) py_bool_fromp); + npy_bool *to = (npy_bool*) PyArray_DATA((PyArrayObject *) py_bool_to); npy_intp dims[2] = { nx, ny }; PyObject *py_dist = PyArray_ZEROS(2, dims, NPY_DOUBLE, 0); if (!py_dist) return NULL; - npy_double *dist = (npy_double *) PyArray_DATA(py_dist); + npy_double *dist = (npy_double *) PyArray_DATA((PyArrayObject *) py_dist); /* * Make sure there is something to find @@ -502,27 +503,27 @@ py_distance_map(PyObject *self, PyObject *args) PyObject *py_bool_map_xy = NULL; py_bool_map_xy = PyArray_FROMANY(py_map_xy, NPY_BOOL, 2, 2, - NPY_C_CONTIGUOUS); + NPY_ARRAY_C_CONTIGUOUS); if (!py_bool_map_xy) return NULL; - npy_intp nx = PyArray_DIM(py_bool_map_xy, 0); - npy_intp ny = PyArray_DIM(py_bool_map_xy, 1); + npy_intp nx = PyArray_DIM((PyArrayObject *) py_bool_map_xy, 0); + npy_intp ny = PyArray_DIM((PyArrayObject *) py_bool_map_xy, 1); - npy_bool *map_xy = (npy_bool*) PyArray_DATA(py_bool_map_xy); + npy_bool *map_xy = (npy_bool*) PyArray_DATA((PyArrayObject *) py_bool_map_xy); /* This stores the distance to the closest point on the contour */ npy_intp dims[2] = { nx, ny }; PyObject *py_dist_xy = PyArray_ZEROS(2, dims, NPY_DOUBLE, 0); if (!py_dist_xy) return NULL; - npy_double *dist_xy = (npy_double *) PyArray_DATA(py_dist_xy); + npy_double *dist_xy = (npy_double *) PyArray_DATA((PyArrayObject *) py_dist_xy); /* This stores the index of the closest point */ PyObject *py_next_xy = PyArray_ZEROS(2, dims, NPY_INT, 0); if (!py_next_xy) return NULL; - npy_int *next_xy = (npy_int *) PyArray_DATA(py_next_xy); + npy_int *next_xy = (npy_int *) PyArray_DATA((PyArrayObject *) py_next_xy); /* * Fill map with maximum distance @@ -567,25 +568,25 @@ py_correlation_function(PyObject *self, PyObject *args) PyObject *py_double_map1 = NULL, *py_double_map2 = NULL; py_double_map1 = PyArray_FROMANY(py_map1, NPY_DOUBLE, 2, 2, - NPY_C_CONTIGUOUS); + NPY_ARRAY_C_CONTIGUOUS); if (!py_double_map1) return NULL; py_double_map2 = PyArray_FROMANY(py_map2, NPY_DOUBLE, 2, 2, - NPY_C_CONTIGUOUS); + NPY_ARRAY_C_CONTIGUOUS); if (!py_double_map2) return NULL; - npy_intp nx = PyArray_DIM(py_double_map1, 0); - npy_intp ny = PyArray_DIM(py_double_map1, 1); + npy_intp nx = PyArray_DIM((PyArrayObject *) py_double_map1, 0); + npy_intp ny = PyArray_DIM((PyArrayObject *) py_double_map1, 1); - if (PyArray_DIM(py_double_map2, 0) != nx || - PyArray_DIM(py_double_map2, 1) != ny) { + if (PyArray_DIM((PyArrayObject *) py_double_map2, 0) != nx || + PyArray_DIM((PyArrayObject *) py_double_map2, 1) != ny) { PyErr_SetString(PyExc_TypeError, "Both maps need to have the identical dimensions."); } - npy_double *map1 = (npy_double*) PyArray_DATA(py_double_map1); - npy_double *map2 = (npy_double*) PyArray_DATA(py_double_map2); + npy_double *map1 = (npy_double*) PyArray_DATA((PyArrayObject *) py_double_map1); + npy_double *map2 = (npy_double*) PyArray_DATA((PyArrayObject *) py_double_map2); /* * Correlation function @@ -594,7 +595,7 @@ py_correlation_function(PyObject *self, PyObject *args) PyObject *py_c = PyArray_ZEROS(1, dims, NPY_DOUBLE, 0); if (!py_c) return NULL; - npy_double *c = (npy_double*) PyArray_DATA(py_c); + npy_double *c = (npy_double*) PyArray_DATA((PyArrayObject *) py_c); /* * Number of points found at a certain distance @@ -602,7 +603,7 @@ py_correlation_function(PyObject *self, PyObject *args) PyObject *py_n = PyArray_ZEROS(1, dims, NPY_INT, 0); if (!py_n) return NULL; - npy_int *n = (npy_int*) PyArray_DATA(py_n); + npy_int *n = (npy_int*) PyArray_DATA((PyArrayObject *) py_n); /* * Fill with zeros @@ -659,17 +660,17 @@ py_correlation_function(PyObject *self, PyObject *args) PyObject *py_r = PyArray_ZEROS(1, dims, NPY_DOUBLE, 0); if (!py_r) return NULL; - npy_double *r = (npy_double*) PyArray_DATA(py_r); + npy_double *r = (npy_double*) PyArray_DATA((PyArrayObject *) py_r); /* Correlation function */ PyObject *py_cc = PyArray_ZEROS(1, dims, NPY_DOUBLE, 0); if (!py_cc) return NULL; - npy_double *cc = (npy_double*) PyArray_DATA(py_cc); + npy_double *cc = (npy_double*) PyArray_DATA((PyArrayObject *) py_cc); /* Integrated correlation function */ PyObject *py_Icc = PyArray_ZEROS(1, dims, NPY_DOUBLE, 0); if (!py_Icc) return NULL; - npy_double *Icc = (npy_double*) PyArray_DATA(py_Icc); + npy_double *Icc = (npy_double*) PyArray_DATA((PyArrayObject *) py_Icc); /* * Normalize and integrate @@ -778,14 +779,14 @@ py_perimeter_length(PyObject *self, PyObject *args) PyObject *py_bool_map = NULL; - py_bool_map = PyArray_FROMANY(py_map, NPY_BOOL, 2, 2, NPY_C_CONTIGUOUS); + py_bool_map = PyArray_FROMANY(py_map, NPY_BOOL, 2, 2, NPY_ARRAY_C_CONTIGUOUS); if (!py_bool_map) return NULL; - npy_intp nx = PyArray_DIM(py_bool_map, 0); - npy_intp ny = PyArray_DIM(py_bool_map, 1); + npy_intp nx = PyArray_DIM((PyArrayObject *) py_bool_map, 0); + npy_intp ny = PyArray_DIM((PyArrayObject *) py_bool_map, 1); - npy_bool *map = (npy_bool*) PyArray_DATA(py_bool_map); + npy_bool *map = (npy_bool*) PyArray_DATA((PyArrayObject *) py_bool_map); double length = 0.0; diff --git a/c/matscipymodule.c b/c/matscipymodule.c index 4a31b593e..b4ce258ad 100644 --- a/c/matscipymodule.c +++ b/c/matscipymodule.c @@ -21,7 +21,7 @@ #include #define PY_ARRAY_UNIQUE_SYMBOL MATSCIPY_ARRAY_API -#define NPY_NO_DEPRECATED_API NPY_1_5_API_VERSION +#define NPY_NO_DEPRECATED_API NPY_2_0_API_VERSION #include #include diff --git a/c/neighbours.c b/c/neighbours.c index c6fd4d4ae..9e57998b7 100644 --- a/c/neighbours.c +++ b/c/neighbours.c @@ -22,7 +22,7 @@ #include #define PY_ARRAY_UNIQUE_SYMBOL MATSCIPY_ARRAY_API #define NO_IMPORT_ARRAY -#define NPY_NO_DEPRECATED_API NPY_1_5_API_VERSION +#define NPY_NO_DEPRECATED_API NPY_2_0_API_VERSION #include #include @@ -116,20 +116,20 @@ py_neighbour_list(PyObject *self, PyObject *args) /* Make sure our arrays are contiguous */ py_cell_origin = PyArray_FROMANY(py_cell_origin, NPY_DOUBLE, 1, 1, - NPY_C_CONTIGUOUS); + NPY_ARRAY_C_CONTIGUOUS); if (!py_cell_origin) return NULL; py_cell = PyArray_FROMANY(py_cell, NPY_DOUBLE, 2, 2, - NPY_C_CONTIGUOUS); + NPY_ARRAY_C_CONTIGUOUS); if (!py_cell) return NULL; py_inv_cell = PyArray_FROMANY(py_inv_cell, NPY_DOUBLE, 2, 2, - NPY_C_CONTIGUOUS); + NPY_ARRAY_C_CONTIGUOUS); if (!py_inv_cell) return NULL; - py_pbc = PyArray_FROMANY(py_pbc, NPY_BOOL, 1, 1, NPY_C_CONTIGUOUS); + py_pbc = PyArray_FROMANY(py_pbc, NPY_BOOL, 1, 1, NPY_ARRAY_C_CONTIGUOUS); if (!py_pbc) return NULL; - py_r = PyArray_FROMANY(py_r, NPY_DOUBLE, 2, 2, NPY_C_CONTIGUOUS); + py_r = PyArray_FROMANY(py_r, NPY_DOUBLE, 2, 2, NPY_ARRAY_C_CONTIGUOUS); if (!py_r) return NULL; if (py_types) { - py_types = PyArray_FROMANY(py_types, NPY_INT, 1, 1, NPY_C_CONTIGUOUS); + py_types = PyArray_FROMANY(py_types, NPY_INT, 1, 1, NPY_ARRAY_C_CONTIGUOUS); if (!py_types) return NULL; } @@ -149,7 +149,7 @@ py_neighbour_list(PyObject *self, PyObject *args) /* This must be an array of cutoffs */ py_cutoffs = PyArray_FROMANY(py_cutoffs, NPY_DOUBLE, 1, 2, - NPY_C_CONTIGUOUS); + NPY_ARRAY_C_CONTIGUOUS); if (!py_cutoffs) return NULL; ncutoffdims = PyArray_NDIM((PyArrayObject *) py_cutoffs); ncutoffs = PyArray_DIM((PyArrayObject *) py_cutoffs, 0); @@ -688,7 +688,7 @@ py_first_neighbours(PyObject *self, PyObject *args) return NULL; /* Make sure our arrays are contiguous */ - py_i = PyArray_FROMANY(py_i, NPY_INT, 1, 1, NPY_C_CONTIGUOUS); + py_i = PyArray_FROMANY(py_i, NPY_INT, 1, 1, NPY_ARRAY_C_CONTIGUOUS); if (!py_i) return NULL; /* Neighbour list size */ @@ -699,7 +699,7 @@ py_first_neighbours(PyObject *self, PyObject *args) PyObject *py_seed = PyArray_ZEROS(1, &n1, NPY_INT, 0); /* Construct seed array */ - first_neighbours(n, nn, PyArray_DATA(py_i), PyArray_DATA(py_seed)); + first_neighbours(n, nn, PyArray_DATA((PyArrayObject *) py_i), PyArray_DATA((PyArrayObject *) py_seed)); return py_seed; } @@ -722,7 +722,7 @@ py_triplet_list(PyObject *self, PyObject *args) npy_int *fi = NULL, *ij_t = NULL, *ik_t = NULL, *jk_t = NULL; - py_fi = PyArray_FROMANY(py_fi, NPY_INT, 1, 1, NPY_C_CONTIGUOUS); + py_fi = PyArray_FROMANY(py_fi, NPY_INT, 1, 1, NPY_ARRAY_C_CONTIGUOUS); fi = PyArray_DATA((PyArrayObject *) py_fi); if (!fi) return NULL; @@ -736,7 +736,7 @@ py_triplet_list(PyObject *self, PyObject *args) return NULL; } py_absdist = PyArray_FROMANY(py_absdist, NPY_DOUBLE, - 1, 1, NPY_C_CONTIGUOUS); + 1, 1, NPY_ARRAY_C_CONTIGUOUS); if (!py_absdist) { PyErr_SetString(PyExc_TypeError, "Distances must be an " "array of floats."); @@ -756,7 +756,7 @@ py_triplet_list(PyObject *self, PyObject *args) } /* guess initial triplet list size */ - npy_intp dim = (int) PyArray_SIZE(py_fi); + npy_intp dim = (int) PyArray_SIZE((PyArrayObject *) py_fi); dim *= 2; /* initialize triplet lists */ @@ -765,7 +765,7 @@ py_triplet_list(PyObject *self, PyObject *args) PyObject *py_ik_t = PyArray_ZEROS(1, &dim, NPY_INT, 0); ik_t = PyArray_DATA((PyArrayObject *) py_ik_t); - int init_length = (int) PyArray_SIZE(py_fi); + int init_length = (int) PyArray_SIZE((PyArrayObject *) py_fi); /* compute the triplet list */ int index_trip = 0; @@ -773,7 +773,7 @@ py_triplet_list(PyObject *self, PyObject *args) for (int ij= fi[r]; ij < fi[r+1]; ij++) { for (int ik = fi[r]; ik < fi[r+1]; ik++) { /* resize array if necessary */ - int length_trip = (int) PyArray_SIZE(py_ij_t); + int length_trip = (int) PyArray_SIZE((PyArrayObject *) py_ij_t); if (index_trip >= length_trip) { length_trip *= 2; if (py_ij_t && !(ij_t = resize_array(py_ij_t, length_trip))) @@ -798,7 +798,7 @@ py_triplet_list(PyObject *self, PyObject *args) if (py_ij_t && !(ij_t = resize_array(py_ij_t, index_trip))) goto fail; if (py_ik_t && !(ik_t = resize_array(py_ik_t, index_trip))) goto fail; - npy_intp d1 = (int) PyArray_SIZE(py_ij_t); + npy_intp d1 = (int) PyArray_SIZE((PyArrayObject *) py_ij_t); PyObject *py_jk_t = PyArray_ZEROS(1, &d1, NPY_INT, 0); jk_t = PyArray_DATA((PyArrayObject *) py_jk_t); index_trip++; @@ -840,11 +840,11 @@ py_get_jump_indicies(PyObject *self, PyObject *args) return NULL; /* Make sure our arrays are contiguous */ - py_sorted = PyArray_FROMANY(py_sorted, NPY_INT, 1, 1, NPY_C_CONTIGUOUS); + py_sorted = PyArray_FROMANY(py_sorted, NPY_INT, 1, 1, NPY_ARRAY_C_CONTIGUOUS); if (!py_sorted) return NULL; /* sorted imput array size */ - int nn = (int) PyArray_SIZE(py_sorted); + int nn = (int) PyArray_SIZE((PyArrayObject *) py_sorted); /* calculate number of jumps */ npy_int *sorted = PyArray_DATA((PyArrayObject *) py_sorted); @@ -861,7 +861,7 @@ py_get_jump_indicies(PyObject *self, PyObject *args) PyObject *py_seed = PyArray_ZEROS(1, &n1, NPY_INT, 0); /* Construct seed array */ - first_neighbours(n, nn, sorted, PyArray_DATA(py_seed)); + first_neighbours(n, nn, sorted, PyArray_DATA((PyArrayObject *) py_seed)); return py_seed; } diff --git a/c/ring_statistics.cpp b/c/ring_statistics.cpp index e808501fc..d75d1051f 100644 --- a/c/ring_statistics.cpp +++ b/c/ring_statistics.cpp @@ -22,7 +22,7 @@ #include #define PY_ARRAY_UNIQUE_SYMBOL MATSCIPY_ARRAY_API #define NO_IMPORT_ARRAY -#define NPY_NO_DEPRECATED_API NPY_1_5_API_VERSION +#define NPY_NO_DEPRECATED_API NPY_2_0_API_VERSION #include #include @@ -122,9 +122,9 @@ py_distances_on_graph(PyObject *self, PyObject *args) return NULL; /* Make sure our arrays are contiguous */ - py_i = PyArray_FROMANY(py_i, NPY_INT, 1, 1, NPY_C_CONTIGUOUS); + py_i = PyArray_FROMANY(py_i, NPY_INT, 1, 1, NPY_ARRAY_C_CONTIGUOUS); if (!py_i) return NULL; - py_j = PyArray_FROMANY(py_j, NPY_INT, 1, 1, NPY_C_CONTIGUOUS); + py_j = PyArray_FROMANY(py_j, NPY_INT, 1, 1, NPY_ARRAY_C_CONTIGUOUS); if (!py_j) return NULL; /* Check array shapes. */ @@ -135,7 +135,7 @@ py_distances_on_graph(PyObject *self, PyObject *args) } /* Get total number of atoms */ - npy_int *i = (npy_int *) PyArray_DATA(py_i); + npy_int *i = (npy_int *) PyArray_DATA((PyArrayObject *) py_i); int nat = *std::max_element(i, i+nneigh)+1; /* Construct seed array */ @@ -147,8 +147,8 @@ py_distances_on_graph(PyObject *self, PyObject *args) dims[1] = nat; PyObject *py_dist = PyArray_ZEROS(2, dims, NPY_INT, 0); - if (!distances_on_graph(nat, seed, (npy_int *) PyArray_DATA(py_j), - (npy_int *) PyArray_DATA(py_dist), NULL)) { + if (!distances_on_graph(nat, seed, (npy_int *) PyArray_DATA((PyArrayObject *) py_j), + (npy_int *) PyArray_DATA((PyArrayObject *) py_dist), NULL)) { Py_DECREF(py_dist); return NULL; } @@ -407,13 +407,13 @@ py_find_sp_rings(PyObject *self, PyObject *args) return NULL; /* Make sure our arrays are contiguous */ - py_i = PyArray_FROMANY(py_i, NPY_INT, 1, 1, NPY_C_CONTIGUOUS); + py_i = PyArray_FROMANY(py_i, NPY_INT, 1, 1, NPY_ARRAY_C_CONTIGUOUS); if (!py_i) return NULL; - py_j = PyArray_FROMANY(py_j, NPY_INT, 1, 1, NPY_C_CONTIGUOUS); + py_j = PyArray_FROMANY(py_j, NPY_INT, 1, 1, NPY_ARRAY_C_CONTIGUOUS); if (!py_j) return NULL; - py_r = PyArray_FROMANY(py_r, NPY_DOUBLE, 2, 2, NPY_C_CONTIGUOUS); + py_r = PyArray_FROMANY(py_r, NPY_DOUBLE, 2, 2, NPY_ARRAY_C_CONTIGUOUS); if (!py_r) return NULL; - py_dist = PyArray_FROMANY(py_dist, NPY_INT, 2, 2, NPY_C_CONTIGUOUS); + py_dist = PyArray_FROMANY(py_dist, NPY_INT, 2, 2, NPY_ARRAY_C_CONTIGUOUS); if (!py_dist) return NULL; /* Check array shapes. */ @@ -433,7 +433,7 @@ py_find_sp_rings(PyObject *self, PyObject *args) } /* Get total number of atoms */ - npy_int *i = (npy_int *) PyArray_DATA(py_i); + npy_int *i = (npy_int *) PyArray_DATA((PyArrayObject *) py_i); int nat = *std::max_element(i, i+nneigh)+1; /* Check shape of distance map */ @@ -457,9 +457,9 @@ py_find_sp_rings(PyObject *self, PyObject *args) first_neighbours(nat, nneigh, i, seed.data()); std::vector ringstat; - if (!find_sp_ring_vertices(nat, seed, nneigh, (int *) PyArray_DATA(py_j), - (npy_double *) PyArray_DATA(py_r), - (npy_int *) PyArray_DATA(py_dist), + if (!find_sp_ring_vertices(nat, seed, nneigh, (int *) PyArray_DATA((PyArrayObject *) py_j), + (npy_double *) PyArray_DATA((PyArrayObject *) py_r), + (npy_int *) PyArray_DATA((PyArrayObject *) py_dist), maxlength, ringstat)) { return NULL; } @@ -467,7 +467,7 @@ py_find_sp_rings(PyObject *self, PyObject *args) npy_intp ringstat_size = ringstat.size(); PyObject *py_ringstat = PyArray_ZEROS(1, &ringstat_size, NPY_INT, 0); std::copy(ringstat.begin(), ringstat.end(), - (npy_int *) PyArray_DATA(py_ringstat)); + (npy_int *) PyArray_DATA((PyArrayObject *) py_ringstat)); return py_ringstat; } diff --git a/c/tools.c b/c/tools.c index 8987a5562..049515c52 100644 --- a/c/tools.c +++ b/c/tools.c @@ -22,6 +22,7 @@ #include #define PY_ARRAY_UNIQUE_SYMBOL MATSCIPY_ARRAY_API #define NO_IMPORT_ARRAY +#define NPY_NO_DEPRECATED_API NPY_2_0_API_VERSION #include #include "tools.h" diff --git a/matscipy/calculators/calculator.py b/matscipy/calculators/calculator.py index c9547c41c..c4b5756c1 100644 --- a/matscipy/calculators/calculator.py +++ b/matscipy/calculators/calculator.py @@ -18,19 +18,20 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . # + +import warnings + import numpy as np from scipy.sparse.linalg import cg from ase.calculators.calculator import Calculator -from numpy import deprecate from ..elasticity import ( Voigt_6_to_full_3x3_stress, nonaffine_elastic_contribution, ) -from ..numerical import numerical_nonaffine_forces - +from ..compat import compat_cg_parameters from ..numpy_tricks import mabincount @@ -69,12 +70,12 @@ def calculate(self, atoms, properties, system_changes): 'nonaffine_forces': self.get_nonaffine_forces, 'born_constants': self.get_born_elastic_constants, 'stress_elastic_contribution': - self.get_stress_contribution_to_elastic_constants, + self.get_stress_contribution_to_elastic_constants, 'birch_coefficients': self.get_birch_coefficients, 'nonaffine_elastic_contribution': - self.get_non_affine_contribution_to_elastic_constants, + self.get_non_affine_contribution_to_elastic_constants, 'elastic_constants': - self.get_elastic_constants + self.get_elastic_constants } for prop in filter(lambda p: p in properties, properties_map): @@ -186,10 +187,10 @@ def get_stress_contribution_to_elastic_constants(self, atoms): stress_contribution = 0.5 * sum( np.einsum(einsum, stress_ab, delta_ab) for einsum in ( - 'am,bn', - 'an,bm', - 'bm,an', - 'bn,am', + 'am,bn', + 'an,bm', + 'bm,an', + 'bn,am', ) ) @@ -243,7 +244,7 @@ def get_elastic_constants(self, atoms, cg_parameters={ "x0": None, - "tol": 1e-5, + "rtol": 1e-5, "maxiter": None, "M": None, "callback": None, @@ -263,8 +264,8 @@ def get_elastic_constants(self, x0: {array, matrix} Starting guess for the solution. - tol/atol: float, optional - Tolerances for convergence, norm(residual) <= max(tol*norm(b), atol). + rtol/atol: float, optional + Tolerances for convergence, norm(residual) <= max(rtol*norm(b), atol). maxiter: int Maximum number of iterations. Iteration will stop after maxiter steps even if the specified tolerance has not been achieved. @@ -290,8 +291,10 @@ def get_elastic_constants(self, return C - @deprecate(new_name="elasticity.nonaffine_elastic_contribution") - def get_non_affine_contribution_to_elastic_constants(self, atoms, eigenvalues=None, eigenvectors=None, pc_parameters=None, cg_parameters={"x0": None, "tol": 1e-5, "maxiter": None, "M": None, "callback": None, "atol": 1e-5}): + def get_non_affine_contribution_to_elastic_constants(self, atoms, eigenvalues=None, eigenvectors=None, + pc_parameters=None, + cg_parameters={"x0": None, "rtol": 1e-5, "maxiter": None, + "M": None, "callback": None, "atol": 1e-5}): """ Compute the correction of non-affine displacements to the elasticity tensor. The computation of the occuring inverse of the Hessian matrix is bypassed by using a cg solver. @@ -316,8 +319,9 @@ def get_non_affine_contribution_to_elastic_constants(self, atoms, eigenvalues=No x0: {array, matrix} Starting guess for the solution. - tol/atol: float, optional - Tolerances for convergence, norm(residual) <= max(tol*norm(b), atol). + rtol/atol: float, optional + Tolerances for convergence, norm(residual) <= max( + rtol*norm(b), atol). maxiter: int Maximum number of iterations. Iteration will stop after maxiter steps even if the specified tolerance has not been achieved. @@ -359,6 +363,12 @@ def get_non_affine_contribution_to_elastic_constants(self, atoms, eigenvalues=No Dictionary containing additional expert options to SuperLU. """ + warnings.warn( + "This function is deprecated and will be removed in the future. Use 'elasticity.nonaffine_elastic_contribution' instead.", + DeprecationWarning) + + cg_parameters = compat_cg_parameters(cg_parameters) + nat = len(atoms) calc = self @@ -366,10 +376,10 @@ def get_non_affine_contribution_to_elastic_constants(self, atoms, eigenvalues=No if (eigenvalues is not None) and (eigenvectors is not None): naforces_icab = calc.get_nonaffine_forces(atoms) - G_incc = (eigenvectors.T).reshape(-1, 3*nat, 1, 1) * naforces_icab.reshape(1, 3*nat, 3, 3) - G_incc = (G_incc.T/np.sqrt(eigenvalues)).T - G_icc = np.sum(G_incc, axis=1) - C_abab = np.sum(G_icc.reshape(-1,3,3,1,1) * G_icc.reshape(-1,1,1,3,3), axis=0) + G_incc = (eigenvectors.T).reshape(-1, 3 * nat, 1, 1) * naforces_icab.reshape(1, 3 * nat, 3, 3) + G_incc = (G_incc.T / np.sqrt(eigenvalues)).T + G_icc = np.sum(G_incc, axis=1) + C_abab = np.sum(G_icc.reshape(-1, 3, 3, 1, 1) * G_icc.reshape(-1, 1, 1, 3, 3), axis=0) else: H_nn = calc.get_hessian(atoms) @@ -384,33 +394,19 @@ def get_non_affine_contribution_to_elastic_constants(self, atoms, eigenvalues=No operator_Hinv = LinearOperator(H_nn.shape, approx_Hinv.solve) cg_parameters["M"] = operator_Hinv - D_iab = np.zeros((3*nat, 3, 3)) + D_iab = np.zeros((3 * nat, 3, 3)) for i in range(3): for j in range(3): x, info = cg(H_nn, naforces_icab[:, :, i, j].flatten(), **cg_parameters) if info != 0: print("info: ", info) - raise RuntimeError(" info > 0: CG tolerance not achieved, info < 0: Exceeded number of iterations.") - D_iab[:,i,j] = x + raise RuntimeError( + " info > 0: CG tolerance not achieved, info < 0: Exceeded number of iterations.") + D_iab[:, i, j] = x - C_abab = np.sum(naforces_icab.reshape(3*nat, 3, 3, 1, 1) * D_iab.reshape(3*nat, 1, 1, 3, 3), axis=0) + C_abab = np.sum(naforces_icab.reshape(3 * nat, 3, 3, 1, 1) * D_iab.reshape(3 * nat, 1, 1, 3, 3), axis=0) # Symmetrize C_abab = (C_abab + C_abab.swapaxes(0, 1) + C_abab.swapaxes(2, 3) + C_abab.swapaxes(0, 1).swapaxes(2, 3)) / 4 - return -C_abab/atoms.get_volume() - - @deprecate(new_name='numerical.numerical_nonaffine_forces') - def get_numerical_non_affine_forces(self, atoms, d=1e-6): - """ - - Calculate numerical non-affine forces using central finite differences. - This is done by deforming the box, rescaling atoms and measure the force. - - Parameters - ---------- - atoms: ase.Atoms - Atomic configuration in a local or global minima. - - """ - return numerical_nonaffine_forces(atoms, d=d) + return -C_abab / atoms.get_volume() diff --git a/matscipy/compat.py b/matscipy/compat.py new file mode 100644 index 000000000..de5eb023b --- /dev/null +++ b/matscipy/compat.py @@ -0,0 +1,13 @@ +import scipy +from packaging.version import Version + + +def compat_cg_parameters(cg_parameters): + if Version(scipy.__version__) >= Version('1.12.0'): + return cg_parameters + else: + cg_parameters = cg_parameters.copy() + if 'rtol' in cg_parameters: + cg_parameters['tol'] = cg_parameters['rtol'] + del cg_parameters['rtol'] + return cg_parameters diff --git a/matscipy/elasticity.py b/matscipy/elasticity.py index 68b9cd586..f6cd670a1 100644 --- a/matscipy/elasticity.py +++ b/matscipy/elasticity.py @@ -37,6 +37,8 @@ import ase.units as units from ase.atoms import Atoms +from .compat import compat_cg_parameters + ### # The indices of the full stiffness matrix of (orthorhombic) interest @@ -1335,6 +1337,8 @@ def _sym(C_abab): symmetry_group = [(0, 1, 2, 3), (1, 0, 2, 3), (0, 1, 3, 2), (1, 0, 3, 2)] return 0.25 * np.add.reduce([C_abab.transpose(s) for s in symmetry_group]) + cg_parameters = compat_cg_parameters(cg_parameters) + nat = len(atoms) naforces_icab = atoms.calc.get_property('nonaffine_forces') diff --git a/matscipy/electrochemistry/poisson_nernst_planck_solver.py b/matscipy/electrochemistry/poisson_nernst_planck_solver.py index 78f88d404..45a61098b 100644 --- a/matscipy/electrochemistry/poisson_nernst_planck_solver.py +++ b/matscipy/electrochemistry/poisson_nernst_planck_solver.py @@ -54,7 +54,7 @@ def B(x): # "lazy" Ansatz for approximating Jacobian -def jacobian(f, x0, dx=np.NaN): +def jacobian(f, x0, dx=np.nan): """Naive way to construct N x N Jacobin Fij from N-valued function f of N-valued vector x0. diff --git a/matscipy/hessian_finite_differences.py b/matscipy/hessian_finite_differences.py deleted file mode 100644 index d2e3bef20..000000000 --- a/matscipy/hessian_finite_differences.py +++ /dev/null @@ -1,48 +0,0 @@ -# -# Copyright 2014-2015, 2017, 2021 Lars Pastewka (U. Freiburg) -# 2018, 2020 Jan Griesser (U. Freiburg) -# 2014, 2020 James Kermode (Warwick U.) -# 2018 Jacek Golebiowski (Imperial College London) -# -# matscipy - Materials science with Python at the atomic-scale -# https://github.com/libAtoms/matscipy -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# - -"""Deprecated module.""" - -from numpy import deprecate -from .numerical import numerical_hessian - - -@deprecate(new_name="numerical.numerical_hessian") -def fd_hessian(atoms, dx=1e-5, indices=None): - """ - - Compute the hessian matrix from Jacobian of forces via central differences. - - Parameters - ---------- - atoms: ase.Atoms - Atomic configuration in a local or global minima. - - dx: float - Displacement increment - - indices: - Compute the hessian only for these atom IDs - - """ - return numerical_hessian(atoms, dx=dx, indices=indices) diff --git a/matscipy/io/opls.py b/matscipy/io/opls.py index 6aeeecc8c..890b444e3 100644 --- a/matscipy/io/opls.py +++ b/matscipy/io/opls.py @@ -23,7 +23,7 @@ import copy import re -from looseversion import LooseVersion +from packaging.version import Version import numpy as np import ase @@ -359,7 +359,7 @@ def write_lammps_atoms(prefix, atoms, units='metal'): fileobj.write('%d dihedral types\n' % (len(dtypes))) # cell - if LooseVersion(ase_version_str) > LooseVersion('3.11.0'): + if Version(ase_version_str) > Version('3.11.0'): p = ase.calculators.lammpsrun.Prism(atoms.get_cell()) else: p = ase.calculators.lammpsrun.prism(atoms.get_cell()) @@ -385,9 +385,9 @@ def write_lammps_atoms(prefix, atoms, units='metal'): molid = [1] * len(atoms) pos = ase.calculators.lammpsrun.convert(atoms.get_positions(), 'distance', 'ASE', units) - if LooseVersion(ase_version_str) > LooseVersion('3.17.0'): + if Version(ase_version_str) > Version('3.17.0'): positions_lammps_str = p.vector_to_lammps(pos).astype(str) - elif LooseVersion(ase_version_str) > LooseVersion('3.13.0'): + elif Version(ase_version_str) > Version('3.13.0'): positions_lammps_str = p.positions_to_lammps_strs(pos) else: positions_lammps_str = map(p.pos_to_lammps_str, pos) diff --git a/matscipy/meson.build b/matscipy/meson.build index db67658a7..96478f3ad 100644 --- a/matscipy/meson.build +++ b/matscipy/meson.build @@ -14,13 +14,13 @@ python_sources = [ version_file, 'angle_distribution.py', 'atomic_strain.py', + 'compat.py', 'deformation.py', 'dislocation.py', 'drift.py', 'elasticity.py', 'ffi.py', 'gamma_surface.py', - 'hessian_finite_differences.py', 'hydrogenate.py', 'logger.py', 'molecules.py', diff --git a/meson.build b/meson.build index 97371eefb..ad427d5d0 100644 --- a/meson.build +++ b/meson.build @@ -2,6 +2,7 @@ project( 'matscipy', # Project name 'c', 'cpp', # Project type. We need a C and C++ compiler. + default_options : ['cpp_std=c++11'], version: run_command('python3', 'discover_version.py', check: true).stdout().strip(), # Project version ) diff --git a/pyproject.toml b/pyproject.toml index ace2031bb..4381a5a78 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,11 @@ [build-system] -requires = ["meson>=1.0.0", "meson-python>=0.13.0", "ninja", "oldest-supported-numpy"] +requires = [ + "meson>=1.0.0", + "meson-python>=0.13.0", + "ninja", + "oldest-supported-numpy; python_version=='3.8'", + "numpy>=2.0.0; python_version>='3.9'" +] build-backend = "mesonpy" [project] @@ -15,13 +21,13 @@ classifiers = [ "Development Status :: 5 - Production/Stable", "Programming Language :: Python" ] -requires-python = ">=3.7.0" +requires-python = ">=3.8.0" dynamic = ["version"] dependencies = [ "numpy>=1.16.0", "scipy>=1.2.3", - "ase>=3.16.0", - "looseversion" + "ase>=3.23.0", + "packaging" ] [project.optional-dependencies] diff --git a/tests/test_committee.py b/tests/test_committee.py index 5c681e7de..1b07657e7 100644 --- a/tests/test_committee.py +++ b/tests/test_committee.py @@ -211,9 +211,13 @@ def test_committeeuncertainty_calculate(committee_calibrated): test_data = ase.io.read(os.path.join(f'{os.path.dirname(__file__)}/committee_data/test_data.xyz'), ':') for atoms_i in test_data: calculator.calculate(atoms=atoms_i, properties=['energy', 'forces']) - for prop_j in ['energy', 'energy_uncertainty']: + for prop_j in ['energy', 'forces']: + # energy and forces are read into the results dictionary of a SinglePointCalculator + np.testing.assert_array_almost_equal(calculator.results[prop_j], atoms_i.calc.results[prop_j], decimal=6, + err_msg=f'Missmatch in property \'{prop_j}\'') + for prop_j in ['energy_uncertainty']: np.testing.assert_array_almost_equal(calculator.results[prop_j], atoms_i.info[prop_j], decimal=6, err_msg=f'Missmatch in property \'{prop_j}\'') - for prop_j in ['forces', 'forces_uncertainty']: + for prop_j in ['forces_uncertainty']: np.testing.assert_array_almost_equal(calculator.results[prop_j], atoms_i.arrays[prop_j], decimal=6, err_msg=f'Missmatch in property \'{prop_j}\'') diff --git a/tests/test_electrochemistry_cli.py b/tests/test_electrochemistry_cli.py index e78cbfaf8..c37905b7b 100644 --- a/tests/test_electrochemistry_cli.py +++ b/tests/test_electrochemistry_cli.py @@ -29,7 +29,7 @@ import tempfile import unittest -from looseversion import LooseVersion +from packaging.version import Version class ElectrochemistryCliTest(matscipytest.MatSciPyTestCase): @@ -116,7 +116,7 @@ def test_c2d_input_format_txt_output_format_xyz(self): == self.ref_xyz.get_initial_charges() ).all() ) self.assertTrue( ( xyz.cell == self.ref_xyz.cell ).all() ) - @unittest.skipUnless(LooseVersion(ase.__version__) > LooseVersion('3.19.0'), + @unittest.skipUnless(Version(ase.__version__) > Version('3.19.0'), """ LAMMPS data file won't work for ASE version up until 3.18.1, LAMMPS data file input broken in ASE 3.19.0, skipped""") def test_c2d_input_format_npz_output_format_lammps(self): diff --git a/tests/test_hessian_precon.py b/tests/test_hessian_precon.py index a5707d7c7..3f4c6d65d 100644 --- a/tests/test_hessian_precon.py +++ b/tests/test_hessian_precon.py @@ -13,7 +13,7 @@ from ase.build import bulk from ase.optimize.precon import PreconLBFGS from ase.optimize import ODE12r, LBFGS -from ase.neb import NEB, NEBOptimizer +from ase.mep.neb import NEB, NEBOptimizer from ase.geometry.geometry import get_distances