From d0198d122015651489d65a717362fad8cb2b7bf7 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Sun, 22 Oct 2023 16:11:32 +0200 Subject: [PATCH 1/7] Expose/test kepF and kepDE. --- CMakeLists.txt | 4 +- doc/changelog.rst | 9 +- heyoka/CMakeLists.txt | 2 + heyoka/_sympy_utils.py | 2 + heyoka/_test_celmec.py | 165 +++++++++++++++++ heyoka/_test_mp.py | 8 +- heyoka/_test_sympy.py | 325 ++++++++++++++++++++++++++++++++ heyoka/expose_expression.cpp | 191 +++++++++++++------ heyoka/setup_sympy.cpp | 10 +- heyoka/test.py | 347 +---------------------------------- 10 files changed, 657 insertions(+), 406 deletions(-) create mode 100644 heyoka/_test_celmec.py create mode 100644 heyoka/_test_sympy.py diff --git a/CMakeLists.txt b/CMakeLists.txt index 1cfa4d8d..65447515 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -11,7 +11,7 @@ if(NOT CMAKE_BUILD_TYPE) FORCE) endif() -project(heyoka.py VERSION 3.0.0 LANGUAGES CXX C) +project(heyoka.py VERSION 3.1.0 LANGUAGES CXX C) list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake" "${CMAKE_CURRENT_SOURCE_DIR}/cmake/yacma") @@ -118,7 +118,7 @@ find_package(fmt REQUIRED CONFIG) message(STATUS "fmt version: ${fmt_VERSION}") # heyoka. -find_package(heyoka 3.0.0 REQUIRED CONFIG) +find_package(heyoka 3.1.0 REQUIRED CONFIG) # Python. diff --git a/doc/changelog.rst b/doc/changelog.rst index 72bc6bca..37d4cfb8 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -3,9 +3,16 @@ Changelog ========= -3.0.1 (unreleased) +3.1.0 (unreleased) ------------------ +Changes +~~~~~~~ + +- heyoka.py now requires version 3.1.0 of the + heyoka C++ library + (`#140 `__). + Fix ~~~ diff --git a/heyoka/CMakeLists.txt b/heyoka/CMakeLists.txt index 736c4b3b..0fc87252 100644 --- a/heyoka/CMakeLists.txt +++ b/heyoka/CMakeLists.txt @@ -25,6 +25,8 @@ set(HEYOKA_PY_PYTHON_FILES _test_batch_integrator.py _test_ensemble.py _test_memcache.py + _test_celmec.py + _test_sympy.py model/__init__.py ) diff --git a/heyoka/_sympy_utils.py b/heyoka/_sympy_utils.py index ec0d7c8c..6be58131 100644 --- a/heyoka/_sympy_utils.py +++ b/heyoka/_sympy_utils.py @@ -177,6 +177,8 @@ def mul_wrapper(*args): retval[_spy.Mul] = mul_wrapper retval[_spy.Function("heyoka_kepE")] = core.kepE + retval[_spy.Function("heyoka_kepF")] = core.kepF + retval[_spy.Function("heyoka_kepDE")] = core.kepDE retval[_spy.Function("heyoka_time")] = lambda: core.time return retval diff --git a/heyoka/_test_celmec.py b/heyoka/_test_celmec.py new file mode 100644 index 00000000..86573102 --- /dev/null +++ b/heyoka/_test_celmec.py @@ -0,0 +1,165 @@ +# Copyright 2020, 2021, 2022, 2023 Francesco Biscani (bluescarni@gmail.com), Dario Izzo (dario.izzo@gmail.com) +# +# This file is part of the heyoka.py library. +# +# This Source Code Form is subject to the terms of the Mozilla +# Public License v. 2.0. If a copy of the MPL was not distributed +# with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +import unittest as _ut + + +class kepE_test_case(_ut.TestCase): + def test_expr(self): + from . import kepE, diff, make_vars, sin, cos, core + from .core import _ppc_arch + import numpy as np + + x, y = make_vars("x", "y") + + # Try a few overloads. + kepE(x, y) + kepE(0.1, y) + kepE(e=x, M=0.2) + + self.assertEqual( + diff(kepE(x, y), x), sin(kepE(x, y)) / (1.0 - x * cos(kepE(x, y))) + ) + self.assertEqual(diff(kepE(x, y), y), 1.0 / (1.0 - x * cos(kepE(x, y)))) + + if not _ppc_arch: + self.assertEqual( + diff(kepE(x, np.longdouble("1.1")), x), + sin(kepE(x, np.longdouble("1.1"))) + / (1.0 - x * cos(kepE(x, np.longdouble("1.1")))), + ) + self.assertEqual( + diff(kepE(np.longdouble("1.1"), y), y), + 1.0 / (1.0 - np.longdouble("1.1") * cos(kepE(np.longdouble("1.1"), y))), + ) + + kepE(np.longdouble(0.1), y) + kepE(x, np.longdouble(0.2)) + + with self.assertRaises(TypeError) as cm: + kepE(0.1, np.longdouble("1.1")) + self.assertTrue( + "At least one of the arguments of kepE() must be an expression" + in str(cm.exception) + ) + + with self.assertRaises(TypeError) as cm: + kepE(0.1, 0.2) + self.assertTrue( + "At least one of the arguments of kepE() must be an expression" + in str(cm.exception) + ) + + if not hasattr(core, "real128"): + return + + from .core import real128 + + self.assertEqual( + diff(kepE(x, real128("1.1")), x), + sin(kepE(x, real128("1.1"))) / (1.0 - x * cos(kepE(x, real128("1.1")))), + ) + self.assertEqual( + diff(kepE(real128("1.1"), y), y), + 1.0 / (1.0 - real128("1.1") * cos(kepE(real128("1.1"), y))), + ) + + +class kepF_test_case(_ut.TestCase): + def test_expr(self): + from . import kepF, make_vars, core + from .core import _ppc_arch + import numpy as np + + x, y, z = make_vars("x", "y", "z") + + # Try a few overloads. + kepF(x, y, z) + kepF(0.1, y, z) + kepF(h=0.1, k=0.2, lam=z) + + if not _ppc_arch: + kepF(x, y, np.longdouble("1.1")) + kepF(x, np.longdouble(".1"), np.longdouble("1.1")) + + with self.assertRaises(TypeError) as cm: + kepF(x, 0.1, np.longdouble("1.1")) + self.assertTrue( + "The numerical arguments of kepF() must be all of the same type" + in str(cm.exception) + ) + + with self.assertRaises(TypeError) as cm: + kepF(0.1, 0.2, 0.3) + self.assertTrue( + "At least one of the arguments of kepF() must be an expression" + in str(cm.exception) + ) + + if not hasattr(core, "real128"): + return + + from .core import real128 + + kepF(real128(0.1), y, z) + kepF(real128(0.1), real128(0.2), z) + + with self.assertRaises(TypeError) as cm: + kepF(x, 0.1, real128("1.1")) + self.assertTrue( + "The numerical arguments of kepF() must be all of the same type" + in str(cm.exception) + ) + + +class kepDE_test_case(_ut.TestCase): + def test_expr(self): + from . import kepDE, make_vars, core + from .core import _ppc_arch + import numpy as np + + x, y, z = make_vars("x", "y", "z") + + # Try a few overloads. + kepDE(x, y, z) + kepDE(0.1, y, z) + kepDE(s0=0.1, c0=0.2, DM=z) + + if not _ppc_arch: + kepDE(x, y, np.longdouble("1.1")) + kepDE(x, np.longdouble(".1"), np.longdouble("1.1")) + + with self.assertRaises(TypeError) as cm: + kepDE(x, 0.1, np.longdouble("1.1")) + self.assertTrue( + "The numerical arguments of kepDE() must be all of the same type" + in str(cm.exception) + ) + + with self.assertRaises(TypeError) as cm: + kepDE(0.1, 0.2, 0.3) + self.assertTrue( + "At least one of the arguments of kepDE() must be an expression" + in str(cm.exception) + ) + + if not hasattr(core, "real128"): + return + + from .core import real128 + + kepDE(real128(0.1), y, z) + kepDE(real128(0.1), real128(0.2), z) + + with self.assertRaises(TypeError) as cm: + kepDE(x, 0.1, real128("1.1")) + self.assertTrue( + "The numerical arguments of kepDE() must be all of the same type" + in str(cm.exception) + ) diff --git a/heyoka/_test_mp.py b/heyoka/_test_mp.py index bd47585d..f95af169 100644 --- a/heyoka/_test_mp.py +++ b/heyoka/_test_mp.py @@ -368,7 +368,7 @@ def test_expression(self): if not hasattr(core, "real"): return - from . import expression as ex, real, kepE, atan2 + from . import expression as ex, real, kepE, kepF, kepDE, atan2 self.assertEqual( str(ex(real("1.1", 128))), "1.100000000000000000000000000000000000001" @@ -415,6 +415,12 @@ def test_expression(self): kepE(ex("x"), real("1.1", 128)) kepE(real("1.1", 128), ex("x")) + kepF(ex("x"), real("1.1", 128), ex("y")) + kepF(real("1.1", 128), ex("y"), ex("x")) + + kepDE(ex("x"), real("1.1", 128), ex("y")) + kepDE(real("1.1", 128), ex("y"), ex("x")) + atan2(ex("x"), real("1.1", 128)) atan2(real("1.1", 128), ex("x")) diff --git a/heyoka/_test_sympy.py b/heyoka/_test_sympy.py new file mode 100644 index 00000000..8f6115b3 --- /dev/null +++ b/heyoka/_test_sympy.py @@ -0,0 +1,325 @@ +# Copyright 2020, 2021, 2022, 2023 Francesco Biscani (bluescarni@gmail.com), Dario Izzo (dario.izzo@gmail.com) +# +# This file is part of the heyoka.py library. +# +# This Source Code Form is subject to the terms of the Mozilla +# Public License v. 2.0. If a copy of the MPL was not distributed +# with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +import unittest as _ut + + +class sympy_test_case(_ut.TestCase): + def test_basic(self): + try: + import sympy + except ImportError: + return + + from . import from_sympy, make_vars, sum as hsum + + with self.assertRaises(TypeError) as cm: + from_sympy(3.5) + self.assertTrue( + "The 'ex' parameter must be a sympy expression but it is of type" + in str(cm.exception) + ) + + with self.assertRaises(TypeError) as cm: + from_sympy(sympy.Symbol("x"), []) + self.assertTrue( + "The 's_dict' parameter must be a dict but it is of type" + in str(cm.exception) + ) + + with self.assertRaises(TypeError) as cm: + from_sympy(sympy.Symbol("x"), {3.5: 3.5}) + self.assertTrue( + "The keys in 's_dict' must all be sympy expressions" in str(cm.exception) + ) + + with self.assertRaises(TypeError) as cm: + from_sympy(sympy.Symbol("x"), {sympy.Symbol("x"): 3.5}) + self.assertTrue( + "The values in 's_dict' must all be heyoka expressions" in str(cm.exception) + ) + + # Test the s_dict functionality of from_sympy(). + x, y = sympy.symbols("x y", real=True) + hx, hy, hz = make_vars("x", "y", "z") + + self.assertEqual( + from_sympy((x - y) * (x + y), s_dict={x - y: hz}), hsum([hx, hy]) * hz + ) + + def test_number_conversion(self): + try: + import sympy + except ImportError: + return + + from . import to_sympy, from_sympy, expression, core + from .core import _ppc_arch + from sympy import Float, Rational, Integer + from mpmath import workprec + import numpy as np + + with self.assertRaises(ValueError) as cm: + from_sympy(Rational(3, 5)) + self.assertTrue( + "Cannot convert from sympy a rational number whose denominator is not a power of 2" + in str(cm.exception) + ) + + # From integer. + self.assertEqual(from_sympy(Integer(-42)), expression(-42.0)) + + # From rational. + self.assertEqual(from_sympy(Rational(42, -2)), expression(-21.0)) + + # Double precision. + with workprec(53): + self.assertEqual(to_sympy(expression(1.1)), Float(1.1)) + self.assertEqual(from_sympy(Float(1.1)), expression(1.1)) + + self.assertEqual( + to_sympy(expression((2**40 + 1) / (2**128))), + Rational(2**40 + 1, 2**128), + ) + self.assertEqual( + from_sympy(Rational(2**40 + 1, 2**128)), + expression((2**40 + 1) / (2**128)), + ) + + # Long double precision. + if not _ppc_arch: + with workprec(np.finfo(np.longdouble).nmant + 1): + self.assertEqual( + to_sympy(expression(np.longdouble("1.1"))), + Float("1.1", precision=np.finfo(np.longdouble).nmant + 1), + ) + + # NOTE: on platforms where long double is not wider than + # double (e.g., MSVC), conversion from sympy will produce a double + # and these tests will fail. + if np.finfo(np.longdouble).nmant > np.finfo(float).nmant: + self.assertEqual( + from_sympy(Float("1.1")), expression(np.longdouble("1.1")) + ) + + expo = np.finfo(np.longdouble).nmant - 10 + self.assertEqual( + to_sympy( + expression( + np.longdouble(2**expo + 1) / np.longdouble(2**128) + ) + ), + Rational(2**expo + 1, 2**128), + ) + self.assertEqual( + from_sympy(Rational(2**expo + 1, 2**128)), + expression( + np.longdouble(2**expo + 1) / np.longdouble(2**128) + ), + ) + + # Too high precision. + if not hasattr(core, "real"): + with self.assertRaises(ValueError) as cm: + from_sympy(Integer(2**500 + 1)) + self.assertTrue("the required precision" in str(cm.exception)) + + if not hasattr(core, "real128") or _ppc_arch: + return + + from .core import real128 + + # Quad precision. + with workprec(113): + self.assertEqual( + to_sympy(expression(real128("1.1"))), Float("1.1", precision=113) + ) + self.assertEqual(from_sympy(Float("1.1")), expression(real128("1.1"))) + + expo = 100 + self.assertEqual( + to_sympy(expression(real128(2**expo + 1) / real128(2**128))), + Rational(2**expo + 1, 2**128), + ) + self.assertEqual( + from_sympy(Rational(2**expo + 1, 2**128)), + expression(real128(2**expo + 1) / real128(2**128)), + ) + + def test_sympar_conversion(self): + try: + import sympy + except ImportError: + return + + from . import to_sympy, from_sympy, expression, par + from sympy import Symbol + + self.assertEqual(Symbol("x", real=True), to_sympy(expression("x"))) + self.assertEqual(Symbol("par[0]", real=True), to_sympy(par[0])) + self.assertEqual(Symbol("par[9]", real=True), to_sympy(par[9])) + self.assertEqual(Symbol("par[123]", real=True), to_sympy(par[123])) + self.assertEqual( + Symbol("par[-123]", real=True), to_sympy(expression("par[-123]")) + ) + self.assertEqual(Symbol("par[]", real=True), to_sympy(expression("par[]"))) + + self.assertEqual(from_sympy(Symbol("x")), expression("x")) + self.assertEqual(from_sympy(Symbol("par[0]")), par[0]) + self.assertEqual(from_sympy(Symbol("par[9]")), par[9]) + self.assertEqual(from_sympy(Symbol("par[123]")), par[123]) + self.assertEqual(from_sympy(Symbol("par[-123]")), expression("par[-123]")) + self.assertEqual(from_sympy(Symbol("par[]")), expression("par[]")) + + def test_func_conversion(self): + try: + import sympy + except ImportError: + return + + import sympy as spy + + from . import core, make_vars, from_sympy, to_sympy, pi, sum as hsum + + from .model import nbody + + x, y, z, a, b, c = spy.symbols("x y z a b c", real=True) + hx, hy, hz, ha, hb, hc = make_vars("x", "y", "z", "a", "b", "c") + + self.assertEqual(core.acos(hx), from_sympy(spy.acos(x))) + self.assertEqual(to_sympy(core.acos(hx)), spy.acos(x)) + + self.assertEqual(core.acosh(hx), from_sympy(spy.acosh(x))) + self.assertEqual(to_sympy(core.acosh(hx)), spy.acosh(x)) + + self.assertEqual(core.asin(hx), from_sympy(spy.asin(x))) + self.assertEqual(to_sympy(core.asin(hx)), spy.asin(x)) + + self.assertEqual(core.asinh(hx), from_sympy(spy.asinh(x))) + self.assertEqual(to_sympy(core.asinh(hx)), spy.asinh(x)) + + self.assertEqual(core.atan(hx), from_sympy(spy.atan(x))) + self.assertEqual(to_sympy(core.atan(hx)), spy.atan(x)) + + self.assertEqual(core.atan2(hy, hx), from_sympy(spy.atan2(y, x))) + self.assertEqual(to_sympy(core.atan2(hy, hx)), spy.atan2(y, x)) + + self.assertEqual(core.atanh(hx), from_sympy(spy.atanh(x))) + self.assertEqual(to_sympy(core.atanh(hx)), spy.atanh(x)) + + self.assertEqual(core.cos(hx), from_sympy(spy.cos(x))) + self.assertEqual(to_sympy(core.cos(hx)), spy.cos(x)) + + self.assertEqual(core.cosh(hx), from_sympy(spy.cosh(x))) + self.assertEqual(to_sympy(core.cosh(hx)), spy.cosh(x)) + + self.assertEqual(core.erf(hx), from_sympy(spy.erf(x))) + self.assertEqual(to_sympy(core.erf(hx)), spy.erf(x)) + + self.assertEqual(core.exp(hx), from_sympy(spy.exp(x))) + self.assertEqual(to_sympy(core.exp(hx)), spy.exp(x)) + + self.assertEqual(core.log(hx), from_sympy(spy.log(x))) + self.assertEqual(to_sympy(core.log(hx)), spy.log(x)) + + self.assertEqual(core.sin(hx), from_sympy(spy.sin(x))) + self.assertEqual(to_sympy(core.sin(hx)), spy.sin(x)) + + self.assertEqual(core.sinh(hx), from_sympy(spy.sinh(x))) + self.assertEqual(to_sympy(core.sinh(hx)), spy.sinh(x)) + + self.assertEqual(core.sqrt(hx), from_sympy(spy.sqrt(x))) + self.assertEqual(to_sympy(core.sqrt(hx)), spy.sqrt(x)) + + self.assertEqual(core.tan(hx), from_sympy(spy.tan(x))) + self.assertEqual(to_sympy(core.tan(hx)), spy.tan(x)) + + self.assertEqual(core.tanh(hx), from_sympy(spy.tanh(x))) + self.assertEqual(to_sympy(core.tanh(hx)), spy.tanh(x)) + + self.assertEqual(hx**3.5, from_sympy(x**3.5)) + self.assertEqual(to_sympy(hx**3.5), x**3.5) + + self.assertEqual(hsum([hx, hy, hz]), from_sympy(x + y + z)) + self.assertEqual(to_sympy(hx + hy + hz), x + y + z) + self.assertEqual(to_sympy(hsum([hx, hy, hz])), x + y + z) + self.assertEqual(to_sympy(hsum([hx])), x) + self.assertEqual(to_sympy(hsum([])), 0.0) + self.assertEqual( + hsum([ha, hb, hc, hx, hy, hz]), from_sympy(x + y + z + a + b + c) + ) + self.assertEqual(to_sympy(ha + hb + hc + hx + hy + hz), x + y + z + a + b + c) + self.assertEqual( + to_sympy(hsum([ha, hb, hc, hx, hy, hz])), x + y + z + a + b + c + ) + + self.assertEqual(hx * hy * hz, from_sympy(x * y * z)) + self.assertEqual(to_sympy(hx * hy * hz), x * y * z) + self.assertEqual( + (ha * hb) * (hc * hx) * (hy * hz), from_sympy(x * y * z * a * b * c) + ) + self.assertEqual(to_sympy(ha * hb * hc * hx * hy * hz), x * y * z * a * b * c) + + self.assertEqual(hsum([hx, -1.0 * hy, -1.0 * hz]), from_sympy(x - y - z)) + self.assertEqual(to_sympy(hx - hy - hz), x - y - z) + + # Run a test in the vector form as well. + self.assertEqual(to_sympy([hx - hy - hz, hx * hy * hz]), [x - y - z, x * y * z]) + + self.assertEqual(hx * hz**-1.0, from_sympy(x / z)) + self.assertEqual(to_sympy(hx / hz), x / z) + + self.assertEqual( + core.kepE(hx, hy), from_sympy(spy.Function("heyoka_kepE")(x, y)) + ) + self.assertEqual(to_sympy(core.kepE(hx, hy)), spy.Function("heyoka_kepE")(x, y)) + + self.assertEqual( + core.kepF(hx, hy, hz), from_sympy(spy.Function("heyoka_kepF")(x, y, z)) + ) + self.assertEqual( + to_sympy(core.kepF(hx, hy, hz)), spy.Function("heyoka_kepF")(x, y, z) + ) + + self.assertEqual( + core.kepDE(hx, hy, hz), from_sympy(spy.Function("heyoka_kepDE")(x, y, z)) + ) + self.assertEqual( + to_sympy(core.kepDE(hx, hy, hz)), spy.Function("heyoka_kepDE")(x, y, z) + ) + + self.assertEqual(-1.0 * hx, from_sympy(-x)) + self.assertEqual(to_sympy(-hx), -x) + + self.assertEqual(to_sympy(core.sigmoid(hx + hy)), 1.0 / (1.0 + spy.exp(-x - y))) + + self.assertEqual(core.time, from_sympy(spy.Function("heyoka_time")())) + self.assertEqual(to_sympy(core.time), spy.Function("heyoka_time")()) + + with self.assertRaises(TypeError) as cm: + from_sympy(abs(x)) + self.assertTrue("Unable to convert the sympy object" in str(cm.exception)) + + # Test caching behaviour. + foo = hx + hy + bar = foo / (foo * hz + 1.0) + bar_spy = to_sympy(bar) + self.assertEqual( + id(bar_spy.args[1]), id(bar_spy.args[0].args[0].args[1].args[1]) + ) + + # pi constant. + self.assertEqual(to_sympy(pi), spy.pi) + self.assertEqual(from_sympy(spy.pi), pi) + self.assertEqual(to_sympy(from_sympy(spy.pi)), spy.pi) + + # nbody helper. + [to_sympy(_[1]) for _ in nbody(2)] + [to_sympy(_[1]) for _ in nbody(4)] + [to_sympy(_[1]) for _ in nbody(10)] diff --git a/heyoka/expose_expression.cpp b/heyoka/expose_expression.cpp index aafd8e08..2978b561 100644 --- a/heyoka/expose_expression.cpp +++ b/heyoka/expose_expression.cpp @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -50,6 +51,19 @@ namespace heyoka_py { +namespace detail +{ + +namespace +{ + +template +using uncvref_t = std::remove_cv_t>; + +} // namespace + +} // namespace detail + namespace py = pybind11; void expose_expression(py::module_ &m) @@ -263,9 +277,10 @@ void expose_expression(py::module_ &m) // Prod. m.def("prod", &hey::prod, "terms"_a); + // NOTE: need explicit casts for sqrt and exp due to the presence of overloads for number. m.def("sqrt", static_cast(&hey::sqrt)); + m.def("exp", static_cast(&hey::exp)); m.def("log", &hey::log); - m.def("exp", [](hey::expression e) { return hey::exp(std::move(e)); }); m.def("sin", &hey::sin); m.def("cos", &hey::cos); m.def("tan", &hey::tan); @@ -281,76 +296,134 @@ void expose_expression(py::module_ &m) m.def("sigmoid", &hey::sigmoid); m.def("erf", &hey::erf); - // kepE(). - m.def( - "kepE", [](hey::expression e, hey::expression M) { return hey::kepE(std::move(e), std::move(M)); }, "e"_a, - "M"_a); - m.def( - "kepE", [](double e, hey::expression M) { return hey::kepE(e, std::move(M)); }, "e"_a.noconvert(), "M"_a); - m.def( - "kepE", [](long double e, hey::expression M) { return hey::kepE(e, std::move(M)); }, "e"_a.noconvert(), "M"_a); + // NOTE: when exposing multivariate functions, we want to be able to pass + // in numerical arguments for convenience. Thus, we expose such functions taking + // in input a union of expression and supported numerical types. + using mvf_arg = std::variant; + // kepE(). m.def( - "kepE", [](hey::expression e, double M) { return hey::kepE(std::move(e), M); }, "e"_a, "M"_a.noconvert()); - m.def( - "kepE", [](hey::expression e, long double M) { return hey::kepE(std::move(e), M); }, "e"_a, "M"_a.noconvert()); -#if defined(HEYOKA_HAVE_REAL128) - m.def( - "kepE", [](hey::expression e, mppp::real128 M) { return hey::kepE(std::move(e), M); }, "e"_a, - "M"_a.noconvert()); -#endif -#if defined(HEYOKA_HAVE_REAL) - m.def( - "kepE", [](hey::expression e, mppp::real M) { return hey::kepE(std::move(e), std::move(M)); }, "e"_a, - "M"_a.noconvert()); -#endif + "kepE", + [](const mvf_arg &e, const mvf_arg &M) { + return std::visit( + [](const auto &a, const auto &b) -> hey::expression { + using tp1 = detail::uncvref_t; + using tp2 = detail::uncvref_t; + + if constexpr (!std::is_same_v && !std::is_same_v) { + py_throw(PyExc_TypeError, "At least one of the arguments of kepE() must be an expression"); + } else { + return hey::kepE(a, b); + } + }, + e, M); + }, + "e"_a, "M"_a); - // atan2(). + // kepF(). m.def( - "atan2", [](hey::expression y, hey::expression x) { return hey::atan2(std::move(y), std::move(x)); }, "y"_a, - "x"_a); + "kepF", + [](const mvf_arg &h, const mvf_arg &k, const mvf_arg &lam) { + return std::visit( + [](const auto &a, const auto &b, const auto &c) -> hey::expression { + using tp1 = detail::uncvref_t; + using tp2 = detail::uncvref_t; + using tp3 = detail::uncvref_t; + + constexpr auto tp1_num = static_cast(!std::is_same_v); + constexpr auto tp2_num = static_cast(!std::is_same_v); + constexpr auto tp3_num = static_cast(!std::is_same_v); + + constexpr auto n_num = tp1_num + tp2_num + tp3_num; + + if constexpr (n_num == 3) { + py_throw(PyExc_TypeError, "At least one of the arguments of kepF() must be an expression"); + } else if constexpr (n_num == 2) { + constexpr auto flag = tp1_num + (tp2_num << 1) + (tp3_num << 2); + + if constexpr (flag == 6 && std::is_same_v) { + return hey::kepF(a, b, c); + } else if constexpr (flag == 5 && std::is_same_v) { + return hey::kepF(a, b, c); + } else if constexpr (flag == 3 && std::is_same_v) { + return hey::kepF(a, b, c); + } else { + py_throw(PyExc_TypeError, "The numerical arguments of kepF() must be all of the same type"); + } + } else { + return hey::kepF(a, b, c); + } + }, + h, k, lam); + }, + "h"_a, "k"_a, "lam"_a); + // kepDE(). m.def( - "atan2", [](double y, hey::expression x) { return hey::atan2(y, std::move(x)); }, "y"_a.noconvert(), "x"_a); - m.def( - "atan2", [](long double y, hey::expression x) { return hey::atan2(y, std::move(x)); }, "y"_a.noconvert(), - "x"_a); -#if defined(HEYOKA_HAVE_REAL128) - m.def( - "atan2", [](mppp::real128 y, hey::expression x) { return hey::atan2(y, std::move(x)); }, "y"_a.noconvert(), - "x"_a); -#endif -#if defined(HEYOKA_HAVE_REAL) - m.def( - "atan2", [](mppp::real y, hey::expression x) { return hey::atan2(std::move(y), std::move(x)); }, - "y"_a.noconvert(), "x"_a); -#endif + "kepDE", + [](const mvf_arg &s0, const mvf_arg &c0, const mvf_arg &DM) { + return std::visit( + [](const auto &a, const auto &b, const auto &c) -> hey::expression { + using tp1 = detail::uncvref_t; + using tp2 = detail::uncvref_t; + using tp3 = detail::uncvref_t; + + constexpr auto tp1_num = static_cast(!std::is_same_v); + constexpr auto tp2_num = static_cast(!std::is_same_v); + constexpr auto tp3_num = static_cast(!std::is_same_v); + + constexpr auto n_num = tp1_num + tp2_num + tp3_num; + + if constexpr (n_num == 3) { + py_throw(PyExc_TypeError, "At least one of the arguments of kepDE() must be an expression"); + } else if constexpr (n_num == 2) { + constexpr auto flag = tp1_num + (tp2_num << 1) + (tp3_num << 2); + + if constexpr (flag == 6 && std::is_same_v) { + return hey::kepDE(a, b, c); + } else if constexpr (flag == 5 && std::is_same_v) { + return hey::kepDE(a, b, c); + } else if constexpr (flag == 3 && std::is_same_v) { + return hey::kepDE(a, b, c); + } else { + py_throw(PyExc_TypeError, + "The numerical arguments of kepDE() must be all of the same type"); + } + } else { + return hey::kepDE(a, b, c); + } + }, + s0, c0, DM); + }, + "s0"_a, "c0"_a, "DM"_a); + // atan2(). m.def( - "atan2", [](hey::expression y, double x) { return hey::atan2(std::move(y), x); }, "y"_a, "x"_a.noconvert()); - m.def( - "atan2", [](hey::expression y, long double x) { return hey::atan2(std::move(y), x); }, "y"_a, - "x"_a.noconvert()); -#if defined(HEYOKA_HAVE_REAL128) - m.def( - "atan2", [](hey::expression y, mppp::real128 x) { return hey::atan2(std::move(y), x); }, "y"_a, - "x"_a.noconvert()); -#endif -#if defined(HEYOKA_HAVE_REAL) - m.def( - "atan2", [](hey::expression y, mppp::real x) { return hey::atan2(std::move(y), std::move(x)); }, "y"_a, - "x"_a.noconvert()); -#endif + "atan2", + [](const mvf_arg &y, const mvf_arg &x) { + return std::visit( + [](const auto &a, const auto &b) -> hey::expression { + using tp1 = detail::uncvref_t; + using tp2 = detail::uncvref_t; + + if constexpr (!std::is_same_v && !std::is_same_v) { + py_throw(PyExc_TypeError, "At least one of the arguments of atan2() must be an expression"); + } else { + return hey::atan2(a, b); + } + }, + y, x); + }, + "y"_a, "x"_a); // Time. m.attr("time") = hey::time; diff --git a/heyoka/setup_sympy.cpp b/heyoka/setup_sympy.cpp index facbf850..ad515349 100644 --- a/heyoka/setup_sympy.cpp +++ b/heyoka/setup_sympy.cpp @@ -297,11 +297,17 @@ void setup_sympy(py::module &m) } }; - // kepE. - // NOTE: this will remain an unevaluated binary function. + // kepE, kepF, kepDE. + // NOTE: these will remain unevaluated functions. auto sympy_kepE = py::object(detail::spy->attr("Function")("heyoka_kepE")); detail::fmap[typeid(hy::detail::kepE_impl)] = sympy_kepE; + auto sympy_kepF = py::object(detail::spy->attr("Function")("heyoka_kepF")); + detail::fmap[typeid(hy::detail::kepF_impl)] = sympy_kepF; + + auto sympy_kepDE = py::object(detail::spy->attr("Function")("heyoka_kepDE")); + detail::fmap[typeid(hy::detail::kepDE_impl)] = sympy_kepDE; + // sigmoid. detail::fmap[typeid(hy::detail::sigmoid_impl)] = [](std::unordered_map &func_map, const hy::func &f) { diff --git a/heyoka/test.py b/heyoka/test.py index 20e378c0..8ad5e73f 100644 --- a/heyoka/test.py +++ b/heyoka/test.py @@ -1873,345 +1873,6 @@ def cb3(ta, mr, d_sgn): ) -class kepE_test_case(_ut.TestCase): - def test_expr(self): - from . import kepE, diff, make_vars, sin, cos, core - from .core import _ppc_arch - import numpy as np - - x, y = make_vars("x", "y") - self.assertEqual( - diff(kepE(x, y), x), sin(kepE(x, y)) / (1.0 - x * cos(kepE(x, y))) - ) - self.assertEqual(diff(kepE(x, y), y), 1.0 / (1.0 - x * cos(kepE(x, y)))) - - if not _ppc_arch: - self.assertEqual( - diff(kepE(x, np.longdouble("1.1")), x), - sin(kepE(x, np.longdouble("1.1"))) - / (1.0 - x * cos(kepE(x, np.longdouble("1.1")))), - ) - self.assertEqual( - diff(kepE(np.longdouble("1.1"), y), y), - 1.0 / (1.0 - np.longdouble("1.1") * cos(kepE(np.longdouble("1.1"), y))), - ) - - if not hasattr(core, "real128"): - return - - from .core import real128 - - self.assertEqual( - diff(kepE(x, real128("1.1")), x), - sin(kepE(x, real128("1.1"))) / (1.0 - x * cos(kepE(x, real128("1.1")))), - ) - self.assertEqual( - diff(kepE(real128("1.1"), y), y), - 1.0 / (1.0 - real128("1.1") * cos(kepE(real128("1.1"), y))), - ) - - -class sympy_test_case(_ut.TestCase): - def test_basic(self): - try: - import sympy - except ImportError: - return - - from . import from_sympy, make_vars, sum as hsum - - with self.assertRaises(TypeError) as cm: - from_sympy(3.5) - self.assertTrue( - "The 'ex' parameter must be a sympy expression but it is of type" - in str(cm.exception) - ) - - with self.assertRaises(TypeError) as cm: - from_sympy(sympy.Symbol("x"), []) - self.assertTrue( - "The 's_dict' parameter must be a dict but it is of type" - in str(cm.exception) - ) - - with self.assertRaises(TypeError) as cm: - from_sympy(sympy.Symbol("x"), {3.5: 3.5}) - self.assertTrue( - "The keys in 's_dict' must all be sympy expressions" in str(cm.exception) - ) - - with self.assertRaises(TypeError) as cm: - from_sympy(sympy.Symbol("x"), {sympy.Symbol("x"): 3.5}) - self.assertTrue( - "The values in 's_dict' must all be heyoka expressions" in str(cm.exception) - ) - - # Test the s_dict functionality of from_sympy(). - x, y = sympy.symbols("x y", real=True) - hx, hy, hz = make_vars("x", "y", "z") - - self.assertEqual( - from_sympy((x - y) * (x + y), s_dict={x - y: hz}), hsum([hx, hy]) * hz - ) - - def test_number_conversion(self): - try: - import sympy - except ImportError: - return - - from . import to_sympy, from_sympy, expression, core - from .core import _ppc_arch - from sympy import Float, Rational, Integer - from mpmath import workprec - import numpy as np - - with self.assertRaises(ValueError) as cm: - from_sympy(Rational(3, 5)) - self.assertTrue( - "Cannot convert from sympy a rational number whose denominator is not a power of 2" - in str(cm.exception) - ) - - # From integer. - self.assertEqual(from_sympy(Integer(-42)), expression(-42.0)) - - # From rational. - self.assertEqual(from_sympy(Rational(42, -2)), expression(-21.0)) - - # Double precision. - with workprec(53): - self.assertEqual(to_sympy(expression(1.1)), Float(1.1)) - self.assertEqual(from_sympy(Float(1.1)), expression(1.1)) - - self.assertEqual( - to_sympy(expression((2**40 + 1) / (2**128))), - Rational(2**40 + 1, 2**128), - ) - self.assertEqual( - from_sympy(Rational(2**40 + 1, 2**128)), - expression((2**40 + 1) / (2**128)), - ) - - # Long double precision. - if not _ppc_arch: - with workprec(np.finfo(np.longdouble).nmant + 1): - self.assertEqual( - to_sympy(expression(np.longdouble("1.1"))), - Float("1.1", precision=np.finfo(np.longdouble).nmant + 1), - ) - - # NOTE: on platforms where long double is not wider than - # double (e.g., MSVC), conversion from sympy will produce a double - # and these tests will fail. - if np.finfo(np.longdouble).nmant > np.finfo(float).nmant: - self.assertEqual( - from_sympy(Float("1.1")), expression(np.longdouble("1.1")) - ) - - expo = np.finfo(np.longdouble).nmant - 10 - self.assertEqual( - to_sympy( - expression( - np.longdouble(2**expo + 1) / np.longdouble(2**128) - ) - ), - Rational(2**expo + 1, 2**128), - ) - self.assertEqual( - from_sympy(Rational(2**expo + 1, 2**128)), - expression( - np.longdouble(2**expo + 1) / np.longdouble(2**128) - ), - ) - - # Too high precision. - if not hasattr(core, "real"): - with self.assertRaises(ValueError) as cm: - from_sympy(Integer(2**500 + 1)) - self.assertTrue("the required precision" in str(cm.exception)) - - if not hasattr(core, "real128") or _ppc_arch: - return - - from .core import real128 - - # Quad precision. - with workprec(113): - self.assertEqual( - to_sympy(expression(real128("1.1"))), Float("1.1", precision=113) - ) - self.assertEqual(from_sympy(Float("1.1")), expression(real128("1.1"))) - - expo = 100 - self.assertEqual( - to_sympy(expression(real128(2**expo + 1) / real128(2**128))), - Rational(2**expo + 1, 2**128), - ) - self.assertEqual( - from_sympy(Rational(2**expo + 1, 2**128)), - expression(real128(2**expo + 1) / real128(2**128)), - ) - - def test_sympar_conversion(self): - try: - import sympy - except ImportError: - return - - from . import to_sympy, from_sympy, expression, par - from sympy import Symbol - - self.assertEqual(Symbol("x", real=True), to_sympy(expression("x"))) - self.assertEqual(Symbol("par[0]", real=True), to_sympy(par[0])) - self.assertEqual(Symbol("par[9]", real=True), to_sympy(par[9])) - self.assertEqual(Symbol("par[123]", real=True), to_sympy(par[123])) - self.assertEqual( - Symbol("par[-123]", real=True), to_sympy(expression("par[-123]")) - ) - self.assertEqual(Symbol("par[]", real=True), to_sympy(expression("par[]"))) - - self.assertEqual(from_sympy(Symbol("x")), expression("x")) - self.assertEqual(from_sympy(Symbol("par[0]")), par[0]) - self.assertEqual(from_sympy(Symbol("par[9]")), par[9]) - self.assertEqual(from_sympy(Symbol("par[123]")), par[123]) - self.assertEqual(from_sympy(Symbol("par[-123]")), expression("par[-123]")) - self.assertEqual(from_sympy(Symbol("par[]")), expression("par[]")) - - def test_func_conversion(self): - try: - import sympy - except ImportError: - return - - import sympy as spy - - from . import core, make_vars, from_sympy, to_sympy, pi, sum as hsum - - from .model import nbody - - x, y, z, a, b, c = spy.symbols("x y z a b c", real=True) - hx, hy, hz, ha, hb, hc = make_vars("x", "y", "z", "a", "b", "c") - - self.assertEqual(core.acos(hx), from_sympy(spy.acos(x))) - self.assertEqual(to_sympy(core.acos(hx)), spy.acos(x)) - - self.assertEqual(core.acosh(hx), from_sympy(spy.acosh(x))) - self.assertEqual(to_sympy(core.acosh(hx)), spy.acosh(x)) - - self.assertEqual(core.asin(hx), from_sympy(spy.asin(x))) - self.assertEqual(to_sympy(core.asin(hx)), spy.asin(x)) - - self.assertEqual(core.asinh(hx), from_sympy(spy.asinh(x))) - self.assertEqual(to_sympy(core.asinh(hx)), spy.asinh(x)) - - self.assertEqual(core.atan(hx), from_sympy(spy.atan(x))) - self.assertEqual(to_sympy(core.atan(hx)), spy.atan(x)) - - self.assertEqual(core.atan2(hy, hx), from_sympy(spy.atan2(y, x))) - self.assertEqual(to_sympy(core.atan2(hy, hx)), spy.atan2(y, x)) - - self.assertEqual(core.atanh(hx), from_sympy(spy.atanh(x))) - self.assertEqual(to_sympy(core.atanh(hx)), spy.atanh(x)) - - self.assertEqual(core.cos(hx), from_sympy(spy.cos(x))) - self.assertEqual(to_sympy(core.cos(hx)), spy.cos(x)) - - self.assertEqual(core.cosh(hx), from_sympy(spy.cosh(x))) - self.assertEqual(to_sympy(core.cosh(hx)), spy.cosh(x)) - - self.assertEqual(core.erf(hx), from_sympy(spy.erf(x))) - self.assertEqual(to_sympy(core.erf(hx)), spy.erf(x)) - - self.assertEqual(core.exp(hx), from_sympy(spy.exp(x))) - self.assertEqual(to_sympy(core.exp(hx)), spy.exp(x)) - - self.assertEqual(core.log(hx), from_sympy(spy.log(x))) - self.assertEqual(to_sympy(core.log(hx)), spy.log(x)) - - self.assertEqual(core.sin(hx), from_sympy(spy.sin(x))) - self.assertEqual(to_sympy(core.sin(hx)), spy.sin(x)) - - self.assertEqual(core.sinh(hx), from_sympy(spy.sinh(x))) - self.assertEqual(to_sympy(core.sinh(hx)), spy.sinh(x)) - - self.assertEqual(core.sqrt(hx), from_sympy(spy.sqrt(x))) - self.assertEqual(to_sympy(core.sqrt(hx)), spy.sqrt(x)) - - self.assertEqual(core.tan(hx), from_sympy(spy.tan(x))) - self.assertEqual(to_sympy(core.tan(hx)), spy.tan(x)) - - self.assertEqual(core.tanh(hx), from_sympy(spy.tanh(x))) - self.assertEqual(to_sympy(core.tanh(hx)), spy.tanh(x)) - - self.assertEqual(hx**3.5, from_sympy(x**3.5)) - self.assertEqual(to_sympy(hx**3.5), x**3.5) - - self.assertEqual(hsum([hx, hy, hz]), from_sympy(x + y + z)) - self.assertEqual(to_sympy(hx + hy + hz), x + y + z) - self.assertEqual(to_sympy(hsum([hx, hy, hz])), x + y + z) - self.assertEqual(to_sympy(hsum([hx])), x) - self.assertEqual(to_sympy(hsum([])), 0.0) - self.assertEqual( - hsum([ha, hb, hc, hx, hy, hz]), from_sympy(x + y + z + a + b + c) - ) - self.assertEqual(to_sympy(ha + hb + hc + hx + hy + hz), x + y + z + a + b + c) - self.assertEqual( - to_sympy(hsum([ha, hb, hc, hx, hy, hz])), x + y + z + a + b + c - ) - - self.assertEqual(hx * hy * hz, from_sympy(x * y * z)) - self.assertEqual(to_sympy(hx * hy * hz), x * y * z) - self.assertEqual( - (ha * hb) * (hc * hx) * (hy * hz), from_sympy(x * y * z * a * b * c) - ) - self.assertEqual(to_sympy(ha * hb * hc * hx * hy * hz), x * y * z * a * b * c) - - self.assertEqual(hsum([hx, -1.0 * hy, -1.0 * hz]), from_sympy(x - y - z)) - self.assertEqual(to_sympy(hx - hy - hz), x - y - z) - - # Run a test in the vector form as well. - self.assertEqual(to_sympy([hx - hy - hz, hx * hy * hz]), [x - y - z, x * y * z]) - - self.assertEqual(hx * hz**-1.0, from_sympy(x / z)) - self.assertEqual(to_sympy(hx / hz), x / z) - - self.assertEqual( - core.kepE(hx, hy), from_sympy(spy.Function("heyoka_kepE")(x, y)) - ) - self.assertEqual(to_sympy(core.kepE(hx, hy)), spy.Function("heyoka_kepE")(x, y)) - - self.assertEqual(-1.0 * hx, from_sympy(-x)) - self.assertEqual(to_sympy(-hx), -x) - - self.assertEqual(to_sympy(core.sigmoid(hx + hy)), 1.0 / (1.0 + spy.exp(-x - y))) - - self.assertEqual(core.time, from_sympy(spy.Function("heyoka_time")())) - self.assertEqual(to_sympy(core.time), spy.Function("heyoka_time")()) - - with self.assertRaises(TypeError) as cm: - from_sympy(abs(x)) - self.assertTrue("Unable to convert the sympy object" in str(cm.exception)) - - # Test caching behaviour. - foo = hx + hy - bar = foo / (foo * hz + 1.0) - bar_spy = to_sympy(bar) - self.assertEqual( - id(bar_spy.args[1]), id(bar_spy.args[0].args[0].args[1].args[1]) - ) - - # pi constant. - self.assertEqual(to_sympy(pi), spy.pi) - self.assertEqual(from_sympy(spy.pi), pi) - self.assertEqual(to_sympy(from_sympy(spy.pi)), spy.pi) - - # nbody helper. - [to_sympy(_[1]) for _ in nbody(2)] - [to_sympy(_[1]) for _ in nbody(4)] - [to_sympy(_[1]) for _ in nbody(10)] - - class llvm_state_test_case(_ut.TestCase): def test_copy(self): from . import make_vars, sin, taylor_adaptive @@ -2913,6 +2574,8 @@ def run_test_suite(): _test_batch_integrator, _test_ensemble, _test_memcache, + _test_celmec, + _test_sympy, ) import numpy as np from .model import nbody @@ -2947,8 +2610,10 @@ def run_test_suite(): suite.addTest(tl.loadTestsFromTestCase(llvm_state_test_case)) suite.addTest(tl.loadTestsFromTestCase(event_classes_test_case)) suite.addTest(tl.loadTestsFromTestCase(event_detection_test_case)) - suite.addTest(tl.loadTestsFromTestCase(kepE_test_case)) - suite.addTest(tl.loadTestsFromTestCase(sympy_test_case)) + suite.addTest(tl.loadTestsFromTestCase(_test_celmec.kepE_test_case)) + suite.addTest(tl.loadTestsFromTestCase(_test_celmec.kepF_test_case)) + suite.addTest(tl.loadTestsFromTestCase(_test_celmec.kepDE_test_case)) + suite.addTest(tl.loadTestsFromTestCase(_test_sympy.sympy_test_case)) suite.addTest(tl.loadTestsFromTestCase(_test_memcache.memcache_test_case)) test_result = _ut.TextTestRunner(verbosity=2).run(suite) From d3433721dc913e48161cbbfb7307d5dd0401064e Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Sun, 22 Oct 2023 16:13:17 +0200 Subject: [PATCH 2/7] Smal doc update. --- doc/notebooks/vsop2013.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/notebooks/vsop2013.ipynb b/doc/notebooks/vsop2013.ipynb index 14be61be..74616860 100644 --- a/doc/notebooks/vsop2013.ipynb +++ b/doc/notebooks/vsop2013.ipynb @@ -210,7 +210,7 @@ "\n", "A possible issue in astrodynamical applications is that VSOP2013 does not directly provide formulae for the state vector of the Sun. The barycentric position of the Sun can be estimated via the heliocentric planetary formulae after imposing that the system's barycentre is fixed in the origin. It is not clear at this time if this approach is 100% accurate (as VSOP2013 includes perturbations on the planets by asteroids, whose state vectors are not provided in the solution), but it should be adequate for most practical purposes.\n", "\n", - "A better alternative is to formulate the dynamical equations of motion (e.g., of a spacecraft or a small body) in a heliocentric reference frame (i.e., in the framework of the so-called (N+1)-body problem)." + "Another option is to formulate the dynamical equations of motion (e.g., of a spacecraft or a small body) in a heliocentric reference frame (i.e., in the framework of the so-called (N+1)-body problem)." ] } ], From f3ff143ef28e2cf7f7a30211c0d6b266b36a9bbc Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Mon, 30 Oct 2023 19:07:43 +0100 Subject: [PATCH 3/7] Expose and test the gradient/jacobian properties. --- heyoka/_test_dtens.py | 29 +++++++++++++++++++++++++++++ heyoka/expose_expression.cpp | 9 +++++++++ 2 files changed, 38 insertions(+) diff --git a/heyoka/_test_dtens.py b/heyoka/_test_dtens.py index a1856212..30c5507a 100644 --- a/heyoka/_test_dtens.py +++ b/heyoka/_test_dtens.py @@ -10,6 +10,35 @@ class dtens_test_case(_ut.TestCase): + def test_gradient(self): + from . import diff_tensors, make_vars, expression as ex + + x, y = make_vars("x", "y") + + dt = diff_tensors([x - y]) + self.assertEqual(dt.gradient, [ex(1.0), ex(-1.0)]) + + def test_jacobian(self): + from . import diff_tensors, make_vars, expression as ex + import numpy as np + + x, y = make_vars("x", "y") + + dt = diff_tensors([x - y, -x + y]) + self.assertTrue( + np.all(dt.jacobian == np.array([[ex(1.0), ex(-1.0)], [ex(-1.0), ex(1.0)]])) + ) + + dt = diff_tensors([x - y, -x + y, -x - y]) + self.assertTrue( + np.all( + dt.jacobian + == np.array( + [[ex(1.0), ex(-1.0)], [ex(-1.0), ex(1.0)], [ex(-1.0), ex(-1.0)]] + ) + ) + ) + def test_basic(self): from . import dtens from sys import getrefcount diff --git a/heyoka/expose_expression.cpp b/heyoka/expose_expression.cpp index 2978b561..8e31c607 100644 --- a/heyoka/expose_expression.cpp +++ b/heyoka/expose_expression.cpp @@ -506,6 +506,15 @@ void expose_expression(py::module_ &m) return std::vector(sr.begin(), sr.end()); }, "diff_order"_a, "component"_a = py::none{}); + // Gradient. + dtens_cl.def_property_readonly("gradient", &hey::dtens::get_gradient); + // Jacobian. + dtens_cl.def_property_readonly("jacobian", [](const hey::dtens &dt) { + auto jac = py::array(py::cast(dt.get_jacobian())); + + return jac.reshape(py::array::ShapeContainer{boost::numeric_cast(dt.get_nouts()), + boost::numeric_cast(dt.get_nvars())}); + }); // Copy/deepcopy. dtens_cl.def("__copy__", copy_wrapper); dtens_cl.def("__deepcopy__", deepcopy_wrapper, "memo"_a); From 48fc77ea2a97f3ea2e5296b61036377eb38316c4 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Tue, 31 Oct 2023 08:51:19 +0100 Subject: [PATCH 4/7] Sphinx fix. --- doc/conf.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/conf.py b/doc/conf.py index 1a9912ae..f56c2290 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -75,6 +75,8 @@ "binderhub_url": "https://mybinder.org", "notebook_interface": "jupyterlab", }, + # See: https://github.com/pydata/pydata-sphinx-theme/issues/1492 + "navigation_with_keys": False, } nb_execution_mode = "force" From b20e65bf5810e8430322beda1ac4c84e23130a4a Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Tue, 31 Oct 2023 11:02:17 +0100 Subject: [PATCH 5/7] Add notebook for Lagrange propagation. --- doc/examples.rst | 1 + doc/notebooks/ex_system_revisited.ipynb | 2 + doc/notebooks/lagrangian_propagator.ipynb | 485 ++++++++++++++++++++++ 3 files changed, 488 insertions(+) create mode 100644 doc/notebooks/lagrangian_propagator.ipynb diff --git a/doc/examples.rst b/doc/examples.rst index 537b48b4..b3cbedad 100644 --- a/doc/examples.rst +++ b/doc/examples.rst @@ -42,6 +42,7 @@ Celestial mechanics and astrodynamics notebooks/ttv notebooks/vsop2013 notebooks/tides_spokes + notebooks/lagrangian_propagator Event detection --------------- diff --git a/doc/notebooks/ex_system_revisited.ipynb b/doc/notebooks/ex_system_revisited.ipynb index 59eea554..2aaa8346 100644 --- a/doc/notebooks/ex_system_revisited.ipynb +++ b/doc/notebooks/ex_system_revisited.ipynb @@ -863,6 +863,8 @@ "id": "b19b9377-a4d3-49f7-a341-1a290650dd79", "metadata": {}, "source": [ + "(computing_derivatives)=\n", + "\n", "## Computing derivatives\n", "\n", "heyoka.py provides two ways of computing the derivatives of an expression. The first one is the ``diff()`` function:" diff --git a/doc/notebooks/lagrangian_propagator.ipynb b/doc/notebooks/lagrangian_propagator.ipynb new file mode 100644 index 00000000..0a0f92ed --- /dev/null +++ b/doc/notebooks/lagrangian_propagator.ipynb @@ -0,0 +1,485 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "c4c78436-2cfa-4b10-ac2a-58e17aaf85d7", + "metadata": {}, + "source": [ + "# Lagrange propagation and the state transition matrix\n", + "\n", + "In the gravitational [two-body problem](https://en.wikipedia.org/wiki/Two-body_problem) it is possible to compute the state of the system at an arbitrary time from an initial state $\\left( \\boldsymbol{r}_0, \\boldsymbol{v}_0 \\right)$ via the so-called Lagrange coefficients $F$, $G$, $F_t$ and $G_t$:\n", + "\n", + "\\begin{equation}\n", + "\\begin{cases}\n", + "\\boldsymbol{r} & = F \\boldsymbol{r}_0 + G \\boldsymbol{v}_0 \\\\\n", + "\\boldsymbol{v} & = F_t \\boldsymbol{r}_0 + G_t \\boldsymbol{v}_0\n", + "\\end{cases}.\n", + "\\end{equation}\n", + "\n", + "Analytical expressions for these coefficients are available in terms of anomalies differences between the end state and the initial state. The details on these analytical expressions and their derivation can be found, for example, in the seminal book by Richard Battin [\"An introduction to the mathematics and methods of astrodynamics\"](https://ui.adsabs.harvard.edu/abs/1987aiaa.rept.....B/abstract) (section 4.3). See [here](https://orbital-mechanics.space/time-since-periapsis-and-keplers-equation/the-lagrange-coefficients.html) for another derivation.\n", + "\n", + "In this notebook, we will first show how to implement a Lagrange propagator using heyoka.py's expression system. Because the propagator will be implemented in terms of analytical formulae, we will then be able to differentiate it and effortlessly construct the state transtition matrix (i.e., the Jacobian of the propagator with respect to the initial state).\n", + "\n", + "## The Lagrange propagator\n", + "\n", + "```{note}\n", + "\n", + "The propagator presented here is implemented in terms of eccentric anomalies, and thus it is limited to elliptic orbits.\n", + "```\n", + "\n", + "We begin by introducing the symbolic variables corresponding to the inputs of the propagator:\n", + "\n", + "- the initial Cartesian position $\\boldsymbol{r}_0=\\left( x_0, y_0, z_0 \\right)$,\n", + "- the initial Cartesian velocity $\\boldsymbol{v}_0=\\left( v_{x0}, v_{y0}, v_{z0} \\right)$,\n", + "- the gravitational parameter of the system $\\mu$,\n", + "- the propagation time $t$." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "90a93d15-3a4f-4408-85dc-dc9917e33dba", + "metadata": {}, + "outputs": [], + "source": [ + "import heyoka as hy\n", + "import numpy as np\n", + "\n", + "x0, y0, z0 = hy.make_vars(\"x0\", \"y0\", \"z0\")\n", + "vx0, vy0, vz0 = hy.make_vars(\"vx0\", \"vy0\", \"vz0\")\n", + "mu, tm = hy.make_vars(\"mu\", \"t\")\n", + "\n", + "# Package initial position and velocity into\n", + "# arrays for later use.\n", + "pos_0 = np.array([x0, y0, z0])\n", + "vel_0 = np.array([vx0, vy0, vz0])" + ] + }, + { + "cell_type": "markdown", + "id": "9474f6df-0b83-4d38-8030-eeb1db3b5f0d", + "metadata": {}, + "source": [ + "Next, we compute the semi-major axis $a$ from the [specific orbital energy](https://en.wikipedia.org/wiki/Specific_orbital_energy) $\\epsilon$:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "ed7a873f-4bc6-4b5b-bb04-e813c017838a", + "metadata": {}, + "outputs": [], + "source": [ + "v02 = vx0**2+vy0**2+vz0**2\n", + "r0 = hy.sqrt(x0**2+y0**2+z0**2)\n", + "eps = v02 * 0.5 - mu/r0\n", + "a = -mu/(2.*eps)" + ] + }, + { + "cell_type": "markdown", + "id": "259225a2-ed98-4a2e-acf0-37a262c63aec", + "metadata": {}, + "source": [ + "Now we compute the quantities $\\sigma_0=\\frac{\\boldsymbol{r}_0 \\cdot \\boldsymbol{v}_0}{\\sqrt{\\mu}}$, $s_0=\\frac{\\sigma_0}{\\sqrt{a}}$ and $c_0=1-\\frac{r_0}{a}$:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "40295794-9b1e-4719-9e97-1ce1bbc96fe9", + "metadata": {}, + "outputs": [], + "source": [ + "sigma0 = np.dot(pos_0, vel_0) / hy.sqrt(mu)\n", + "s0 = sigma0 / hy.sqrt(a)\n", + "c0 = 1. - r0 / a" + ] + }, + { + "cell_type": "markdown", + "id": "675933ae-bcf7-4951-87a8-2e1d8dc3b6c8", + "metadata": {}, + "source": [ + "We can now calculate the difference in mean anomaly $\\Delta M$ from the mean motion $n$ and the propagation time $t$," + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "5a2fc25b-b346-440c-b3ea-4e32a1d5c0bd", + "metadata": {}, + "outputs": [], + "source": [ + "n = hy.sqrt(mu / (a * a * a))\n", + "DM = n*tm" + ] + }, + { + "cell_type": "markdown", + "id": "242b2f6e-ab3e-41d9-bd15-bdd903549927", + "metadata": {}, + "source": [ + "and then proceed to convert it to a difference in eccentric anomaly $\\Delta E$ via the ``kepDE()`` function:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "860b4070-f0e3-4be8-af72-118987efe45c", + "metadata": {}, + "outputs": [], + "source": [ + "DE = hy.kepDE(s0, c0, DM)\n", + "\n", + "# Compute cos(DE) and sin(DE).\n", + "cDE = hy.cos(DE)\n", + "sDE = hy.sin(DE)" + ] + }, + { + "cell_type": "markdown", + "id": "69d193e4-6651-4b6d-83c9-5a66fbe707c8", + "metadata": {}, + "source": [ + "We can now calculate $r(t)$," + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "abb11aab-6eb0-4077-b74e-75267ae9bb5c", + "metadata": {}, + "outputs": [], + "source": [ + "r = a+(r0-a)*cDE+sigma0*hy.sqrt(a)*sDE" + ] + }, + { + "cell_type": "markdown", + "id": "cc867251-a10b-4651-9476-d5770e1eb849", + "metadata": {}, + "source": [ + "and the Lagrange coefficients $F$, $G$, $F_t$ and $G_t$:" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "0b018d77-4e19-4df0-84bb-4f70583315f5", + "metadata": {}, + "outputs": [], + "source": [ + "F = 1. - a/r0*(1. - cDE)\n", + "G = a*sigma0/hy.sqrt(mu)*(1. - cDE) + r0*hy.sqrt(a/mu)*sDE\n", + "Ft = -hy.sqrt(mu*a)/(r*r0)*sDE\n", + "Gt = 1-a/r*(1.-cDE)" + ] + }, + { + "cell_type": "markdown", + "id": "fe0bf65b-e3b4-4404-97f6-cf17569ce721", + "metadata": {}, + "source": [ + "Finally, we can calculate the position and velocity vectors at time $t$:" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "95c178dd-20dc-42b8-8b56-8b09c8ec7840", + "metadata": {}, + "outputs": [], + "source": [ + "pos = F*pos_0 + G*vel_0\n", + "vel = Ft*pos_0 + Gt*vel_0\n", + "\n", + "# Concatenate position and velocity\n", + "# into a single state vector.\n", + "pos_vel = np.hstack([pos, vel])" + ] + }, + { + "cell_type": "markdown", + "id": "78d7802f-4a9d-4f47-867c-ace54bc20ea9", + "metadata": {}, + "source": [ + "We can now proceed to create a [compiled function](<./compiled_functions.ipynb>) for the evaluation of the state vector at time $t$:" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "d508036c-b678-41f4-9e91-6882079859f4", + "metadata": {}, + "outputs": [], + "source": [ + "cf = hy.make_cfunc(pos_vel,\n", + " # Specify the order in which the input\n", + " # variables are passed to the compiled\n", + " # function.\n", + " vars=[x0, y0, z0, vx0, vy0, vz0, mu, tm])" + ] + }, + { + "cell_type": "markdown", + "id": "56b81ce7-3be7-45ec-ab1d-a1324bb0415e", + "metadata": {}, + "source": [ + "We can now run a quick test for our propagator. We set up a circular orbit with $\\boldsymbol{r}_0=\\left( 1, 0, 0 \\right)$, $\\boldsymbol{v}_0=\\left( 0, 1, 0 \\right)$ and $\\mu = 1$, and we ask for the state vector at $t=\\pi$ (i.e., half period):" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "f468c825-783d-4138-b3f6-6c17f1134321", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([-1.0000000e+00, 1.2246468e-16, 0.0000000e+00, -1.2246468e-16,\n", + " -1.0000000e+00, -0.0000000e+00])" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cf([\n", + " # r0.\n", + " 1., 0., 0.,\n", + " # v0.\n", + " 0., 1., 0.,\n", + " # mu and t.\n", + " 1., np.pi])" + ] + }, + { + "cell_type": "markdown", + "id": "1c04b904-68d5-4beb-a121-df60624f89d7", + "metadata": {}, + "source": [ + "Indeed, as expected, $\\boldsymbol{r}\\left(\\pi\\right) = \\left( -1, 0, 0 \\right)$ and $\\boldsymbol{v}\\left(\\pi\\right) = \\left( 0, -1, 0 \\right)$ (plus/minus epsilon).\n", + "\n", + "Recall from the [compiled functions tutorial](<./compiled_functions.ipynb>) that the propagator is fully vectorised, and that it takes advantage of SIMD instructions. For instance, on a modern x86 machine we can propagate four different trajectories at the cost of one single propagation by passing in a two-dimensional matrix of initial conditions like this:" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "0035d29f-753b-4f5e-a10f-93c552c390b6", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[-1.00000000e+00, -1.03804031e+00, -1.05314825e+00,\n", + " -1.04768800e+00],\n", + " [ 1.22464680e-16, 1.88508928e-01, 3.79275881e-01,\n", + " 5.68044550e-01],\n", + " [ 0.00000000e+00, -8.32873901e-03, -1.29590841e-02,\n", + " -1.35748147e-02],\n", + " [-1.22464680e-16, -1.56913323e-01, -2.92251543e-01,\n", + " -4.03032933e-01],\n", + " [-1.00000000e+00, -9.54125221e-01, -8.82265186e-01,\n", + " -7.93231703e-01],\n", + " [-0.00000000e+00, -1.08925347e-02, -2.25868602e-02,\n", + " -3.38565463e-02]])" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cf([\n", + " # x0.\n", + " [1., 1.01, 1.02, 1.03],\n", + " # y0.\n", + " [0., 0.01, 0.02, 0.03],\n", + " # z0.\n", + " [0., 0.01, 0.02, 0.03],\n", + " # vx0.\n", + " [0., 0.01, 0.02, 0.03],\n", + " # vy0.\n", + " [1., 1.01, 1.02, 1.03],\n", + " # vz0.\n", + " [0., 0.01, 0.02, 0.03],\n", + " # mu.\n", + " [1., 1.01, 1.02, 1.03],\n", + " # t.\n", + " [np.pi, np.pi+0.01, np.pi+0.02, np.pi+0.03]])" + ] + }, + { + "cell_type": "markdown", + "id": "1ff0b7e3-aae5-457c-832e-8876a66867f6", + "metadata": {}, + "source": [ + "## Constructing the STM\n", + "\n", + "We can now proceed to the construction of an analytical expression for the state transition matrix (STM).\n", + "\n", + "The STM is nothing but the Jacobian of the Lagrange propagator with respect to the initial conditions. In order to compute the derivatives, we employ the ``diff_tensors()`` function as explained {ref}`here `:" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "7c86e255-8bf8-4938-bae7-c54109df938f", + "metadata": {}, + "outputs": [], + "source": [ + "dt = hy.diff_tensors(pos_vel,\n", + " diff_args=[x0, y0, z0, vx0, vy0, vz0],\n", + " diff_order=1\n", + " )" + ] + }, + { + "cell_type": "markdown", + "id": "5e4e0c5a-c060-48a6-bc65-29ee83558aef", + "metadata": {}, + "source": [ + "We can then extract the Jacobian from ``dt``," + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "fa4904b2-3703-4eae-a950-576b03dc023a", + "metadata": {}, + "outputs": [], + "source": [ + "jac = dt.jacobian" + ] + }, + { + "cell_type": "markdown", + "id": "d608edfc-021c-4fc7-9f23-6baefb16aae2", + "metadata": {}, + "source": [ + "and proceed to the creation of a compiled function for the numerical evaluation of the STM:" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "39d8c8c3-2088-405c-b4a2-c8e1e65b4ff2", + "metadata": {}, + "outputs": [], + "source": [ + "cf_stm = hy.make_cfunc(jac.flatten(),\n", + " # Specify the order in which the input\n", + " # variables are passed to the compiled\n", + " # function.\n", + " vars=[x0, y0, z0, vx0, vy0, vz0, mu, tm])" + ] + }, + { + "cell_type": "markdown", + "id": "9b90449b-87cf-4131-b930-6ab94b8aa932", + "metadata": {}, + "source": [ + "Let us take a look at the STM for our test circular orbit:" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "da100f5c-b555-486e-a455-694132057eba", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[-3.00000000e+00, 2.44929360e-16, 0.00000000e+00,\n", + " 3.67394040e-16, -4.00000000e+00, 0.00000000e+00],\n", + " [ 9.42477796e+00, 3.00000000e+00, 0.00000000e+00,\n", + " 4.00000000e+00, 9.42477796e+00, 0.00000000e+00],\n", + " [ 0.00000000e+00, 0.00000000e+00, -1.00000000e+00,\n", + " 0.00000000e+00, 0.00000000e+00, 1.22464680e-16],\n", + " [-9.42477796e+00, -2.00000000e+00, 0.00000000e+00,\n", + " -3.00000000e+00, -9.42477796e+00, 0.00000000e+00],\n", + " [ 2.00000000e+00, 3.67394040e-16, 0.00000000e+00,\n", + " 4.89858720e-16, 3.00000000e+00, 0.00000000e+00],\n", + " [ 0.00000000e+00, 0.00000000e+00, -1.22464680e-16,\n", + " 0.00000000e+00, 0.00000000e+00, -1.00000000e+00]])" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cf_stm([1., 0., 0., 0., 1., 0., 1., np.pi]).reshape((6,-1))" + ] + }, + { + "cell_type": "markdown", + "id": "8c1d0775-7eb2-40c8-b7f5-89169638975d", + "metadata": {}, + "source": [ + "We can notice that the top-left element of the STM is the value $-3$. If we slightly perturb by $10^{-5}$ the value of $x_0$ and re-evaluate the state of the system at $t=\\pi$," + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "b05667c0-95d1-43ce-8f12-e8b6f4595581", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([-1.00003000e+00, 9.42473082e-05, 0.00000000e+00, -9.42435384e-05,\n", + " -9.99979996e-01, -0.00000000e+00])" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cf([1 + 1e-5, 0., 0., 0., 1., 0., 1., np.pi])" + ] + }, + { + "cell_type": "markdown", + "id": "c9b02e7c-2f12-4e63-b631-88ced911e6be", + "metadata": {}, + "source": [ + "we can indeed see that the the initial perturbation of $10^{-5}$ has been amplified by a factor of $3$ in the final state of $x$ as predicted by the STM.\n", + "\n", + "Like the Lagrange propagator, the compiled function for the STM is also fully vectorised and SIMD-enabled. Note also that it is also possible via ``diff_tensors()`` to compute not only the Jacobian, but also higher-order tensors of derivatives (e.g., such as the Hessians)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From a639eb23bd9ad97fb1e6ada698ddc4483fa359dd Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Tue, 31 Oct 2023 11:26:56 +0100 Subject: [PATCH 6/7] Small tutorial update. --- doc/notebooks/ex_system_revisited.ipynb | 55 ++++++++++++++++++++----- 1 file changed, 44 insertions(+), 11 deletions(-) diff --git a/doc/notebooks/ex_system_revisited.ipynb b/doc/notebooks/ex_system_revisited.ipynb index 2aaa8346..6c35540f 100644 --- a/doc/notebooks/ex_system_revisited.ipynb +++ b/doc/notebooks/ex_system_revisited.ipynb @@ -1140,6 +1140,39 @@ "dt.get_derivatives(diff_order=1, component=0)" ] }, + { + "cell_type": "markdown", + "id": "9aaae8d3-8f36-4757-b9f6-982533c57483", + "metadata": {}, + "source": [ + "Starting with heyoka.py 3.1, the convenience properties ``gradient`` and ``jacobian`` can be used to access the gradient and Jacobian from a ``dtens`` object. For instance:" + ] + }, + { + "cell_type": "code", + "execution_count": 39, + "id": "e962103f-8a9d-4a3a-88c8-854beb9e0c2b", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[1.0000000000000000, {({z} * {-sin((y * z))})},\n", + " {({y} * {-sin((y * z))})}],\n", + " [{exp((x - z))}, {y**-1.0000000000000000}, {-{exp((x - z))}}]],\n", + " dtype=object)" + ] + }, + "execution_count": 39, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Fetch from dt the Jacobian as a 2D array.\n", + "dt.jacobian" + ] + }, { "cell_type": "markdown", "id": "19457ff7-1b91-40e4-9c74-75ca3f8ad39b", @@ -1166,7 +1199,7 @@ }, { "cell_type": "code", - "execution_count": 39, + "execution_count": 40, "id": "14912d7e-50ce-4831-978d-d207d8cf0b49", "metadata": {}, "outputs": [ @@ -1176,7 +1209,7 @@ "(x_1 * x_2 * x_3 * x_4 * x_5 * x_6 * x_7 * x_8)" ] }, - "execution_count": 39, + "execution_count": 40, "metadata": {}, "output_type": "execute_result" } @@ -1197,7 +1230,7 @@ }, { "cell_type": "code", - "execution_count": 40, + "execution_count": 41, "id": "517f3c2d-b2a9-4fb7-b646-2bd5fa785772", "metadata": {}, "outputs": [ @@ -1214,7 +1247,7 @@ " (x_1 * x_2 * x_3 * x_4 * x_5 * x_6 * x_7)]" ] }, - "execution_count": 40, + "execution_count": 41, "metadata": {}, "output_type": "execute_result" } @@ -1236,7 +1269,7 @@ }, { "cell_type": "code", - "execution_count": 41, + "execution_count": 42, "id": "eb05a503-123c-44c0-91c3-3a3c9022f1e3", "metadata": {}, "outputs": [ @@ -1253,7 +1286,7 @@ " (u_0 * u_1 * u_2 * u_3 * u_4 * u_5 * u_7)]" ] }, - "execution_count": 41, + "execution_count": 42, "metadata": {}, "output_type": "execute_result" } @@ -1273,7 +1306,7 @@ }, { "cell_type": "code", - "execution_count": 42, + "execution_count": 43, "id": "e6bd5955-c51a-4a9e-b077-ba80ffa30d63", "metadata": {}, "outputs": [ @@ -1290,7 +1323,7 @@ " {({x_7} * {({(x_5 * x_6)} * {((x_1 * x_2) * (x_3 * x_4))})})}]" ] }, - "execution_count": 42, + "execution_count": 43, "metadata": {}, "output_type": "execute_result" } @@ -1311,7 +1344,7 @@ }, { "cell_type": "code", - "execution_count": 43, + "execution_count": 44, "id": "baab73a4-7e7b-40dc-a76a-4696e49278b6", "metadata": {}, "outputs": [ @@ -1338,7 +1371,7 @@ " (u_2 * u_17)]" ] }, - "execution_count": 43, + "execution_count": 44, "metadata": {}, "output_type": "execute_result" } @@ -1358,7 +1391,7 @@ }, { "cell_type": "code", - "execution_count": 44, + "execution_count": 45, "id": "48690c49-990b-4fe5-8ffb-77ee80ad9d3a", "metadata": {}, "outputs": [ From a668fb03599b9281af782c48879372ea211a0601 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Tue, 31 Oct 2023 12:03:43 +0100 Subject: [PATCH 7/7] Update changelog, update docs/scripts for heyoka 3.1. --- doc/changelog.rst | 14 ++++++++++++++ doc/install.rst | 2 +- tools/gha_manylinux.sh | 2 +- 3 files changed, 16 insertions(+), 2 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 37d4cfb8..1883996d 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -6,6 +6,20 @@ Changelog 3.1.0 (unreleased) ------------------ +New +~~~ + +- Implement the eccentric longitude :math:`F` in the expression + system (`#140 `__). +- Implement the delta eccentric anomaly :math:`\Delta E` in the expression + system (`#140 `__). + Taylor derivatives are not implemented yet. +- Implement convenience properties to fetch the gradient/Jacobian + from a ``dtens`` object + (`#140 `__). +- New example notebook implementing Lagrange propagation + (`#140 `__). + Changes ~~~~~~~ diff --git a/doc/install.rst b/doc/install.rst index 544509a2..b78ba70d 100644 --- a/doc/install.rst +++ b/doc/install.rst @@ -9,7 +9,7 @@ Dependencies heyoka.py has several Python and C++ dependencies. On the C++ side, heyoka.py depends on: * the `heyoka C++ library `__, - version 3.0.x (**mandatory**), + version 3.1.x (**mandatory**), * the `Boost `__ C++ libraries (**mandatory**), * the `{fmt} `__ library (**mandatory**), * the `TBB `__ library (**mandatory**), diff --git a/tools/gha_manylinux.sh b/tools/gha_manylinux.sh index 32834556..1319966f 100644 --- a/tools/gha_manylinux.sh +++ b/tools/gha_manylinux.sh @@ -38,7 +38,7 @@ echo "PYTHON_DIR: ${PYTHON_DIR}" export NUMPY_VERSION="1.24.*" # The heyoka version to be used for releases. -export HEYOKA_VERSION_RELEASE="3.0.0" +export HEYOKA_VERSION_RELEASE="3.1.0" # Check if this is a release build. if [[ "${GITHUB_REF}" == "refs/tags/v"* ]]; then