From b5f684b49bc1105c6e81cedcb6a8ff060b748e12 Mon Sep 17 00:00:00 2001 From: Luca Fiorito Date: Wed, 21 Aug 2024 13:29:04 +0200 Subject: [PATCH] update installation scripts (#331) * update * update * update * update * update * update --------- Co-authored-by: Luca Fiorito --- .github/workflows/test_notebooks.yml | 45 +++--- .github/workflows/unit_test.yml | 62 +++++--- notebooks/notebook_sampling_decay_data.ipynb | 152 +++---------------- requirements.txt | 5 - requirements_conda.txt | 14 -- sandy/__init__.py | 1 + sandy/core/cov.py | 4 +- sandy/core/endf6.py | 6 + sandy/core/records.py | 17 +-- sandy/fy.py | 49 +++--- sandy/sections/mf32.py | 26 ++-- setup.cfg | 23 ++- setup.py | 4 - 13 files changed, 159 insertions(+), 249 deletions(-) delete mode 100755 requirements.txt delete mode 100644 requirements_conda.txt delete mode 100755 setup.py diff --git a/.github/workflows/test_notebooks.yml b/.github/workflows/test_notebooks.yml index 6b812412..8b97e944 100644 --- a/.github/workflows/test_notebooks.yml +++ b/.github/workflows/test_notebooks.yml @@ -9,46 +9,53 @@ on: # Allows you to run this workflow manually from the Actions tab workflow_dispatch: +# Environment variables accessible to all jobs env: NJOY: ${GITHUB_WORKSPACE}/NJOY2016/build/njoy GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} -# A workflow run is made up of one or more jobs that can run sequentially or in parallel +# Define the jobs to be run jobs: build: + # Use the latest Ubuntu environment runs-on: ubuntu-latest steps: - # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it + # Check out the repository to the GitHub workspace - uses: actions/checkout@v3 - with: - # The branch, tag or SHA to checkout. When checking out the repository that - # triggered a workflow, this defaults to the reference or SHA for that event. - # Otherwise, defaults to `master`. - ref: 'v1.1' + + # Set up the latest Python 3 version - uses: actions/setup-python@v3 with: python-version: "3.x" - # Run commands using the runners shell - - name: clone and install njoy - run: git clone https://github.com/njoy/NJOY2016.git && (cd NJOY2016 && mkdir build && cd build && cmake -DPython3_EXECUTABLE=$(which python3) .. && make) - - name: remove njoy tests from unit testing + # Clone and build NJOY2016 + - name: Clone and build NJOY2016 + run: | + git clone https://github.com/njoy/NJOY2016.git + cd NJOY2016 + mkdir build + cd build + cmake -DPython3_EXECUTABLE=$(which python3) .. + make + + # Remove NJOY2016 tests from the repository to avoid conflicts with SANDY tests + - name: Remove NJOY2016 tests run: rm -rf NJOY2016/tests - - name: install general python dependencies - run: pip install sphinx sphinx_rtd_theme codecov numpydoc coveralls pytest-cov nbval setuptools - - name: install sandy's requirements - run: pip install -r requirements.txt - - name: install jupyter packages to run notebooks - run: pip install jupyterlab matplotlib seaborn scikit-learn serpentTools - - name: install sandy - run: python setup.py install --user + + # Install the sandy package + - name: Install sandy + run: pip install --no-cache-dir --user .[test,notebook,doc] + + # Run all the jupyter notebooks - name: run notebooks run: | cd notebooks mkdir executed_notebooks jupyter nbconvert --to notebook --execute *.ipynb --ExecutePreprocessor.kernel_name='python3' --inplace mv *.ipynb executed_notebooks/ + + # Push rendered notebooks to sandy_notebooks page - name: Pushes to another repository id: push_directory uses: cpina/github-action-push-to-another-repository@main diff --git a/.github/workflows/unit_test.yml b/.github/workflows/unit_test.yml index d689bacf..46d969aa 100644 --- a/.github/workflows/unit_test.yml +++ b/.github/workflows/unit_test.yml @@ -1,46 +1,64 @@ -# This is a basic workflow to help you get started with Actions +# CI workflow for automated testing and documentation generation name: CI # Controls when the workflow will run on: - # Triggers the workflow on push or pull request events but only for the develop branch + # Triggers the workflow on push or pull request events for the develop and v1.1 branches push: - branches: [ develop, v1.1 ] + branches: + - develop + - v1.1 pull_request: - branches: [ develop, v1.1 ] + branches: + - develop + - v1.1 - # Allows you to run this workflow manually from the Actions tab + # Allows manual triggering of the workflow from the Actions tab workflow_dispatch: +# Environment variables accessible to all jobs env: - NJOY: ${GITHUB_WORKSPACE}/NJOY2016/build/njoy + NJOY: ${{ github.workspace }}/NJOY2016/build/njoy GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} -# A workflow run is made up of one or more jobs that can run sequentially or in parallel +# Define the jobs to be run jobs: build: + # Use the latest Ubuntu environment runs-on: ubuntu-latest steps: - # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it + # Check out the repository to the GitHub workspace - uses: actions/checkout@v3 - - uses: "actions/setup-python@v3" + + # Set up the latest Python 3 version + - uses: actions/setup-python@v3 with: - python-version: "3.7.15" + python-version: "3.x" + + # Clone and build NJOY2016 + - name: Clone and build NJOY2016 + run: | + git clone https://github.com/njoy/NJOY2016.git + cd NJOY2016 + mkdir build + cd build + cmake -DPython3_EXECUTABLE=$(which python3) .. + make - # Run commands using the runners shell - - name: clone and install njoy - run: git clone https://github.com/njoy/NJOY2016.git && (cd NJOY2016 && mkdir build && cd build && cmake -DPython3_EXECUTABLE=$(which python3) .. && make) - - name: remove njoy tests from unit testing + # Remove NJOY2016 tests from the repository to avoid conflicts with SANDY tests + - name: Remove NJOY2016 tests run: rm -rf NJOY2016/tests - - name: install general python dependencies - run: pip install sphinx sphinx_rtd_theme codecov numpydoc coveralls pytest-cov nbval - - name: install sandy's requirements - run: pip install -r requirements.txt - - name: install sandy - run: python setup.py install --user - - name: pytest + + # Install the sandy package + - name: Install sandy + run: pip install --no-cache-dir --user .[test,doc] + + # Run tests with coverage reporting + - name: Run tests with pytest run: pytest --cov=sandy sandy - - name: make docs + + # Build documentation + - name: Build documentation run: bash make_docs.sh html diff --git a/notebooks/notebook_sampling_decay_data.ipynb b/notebooks/notebook_sampling_decay_data.ipynb index eebf4cdf..eef6f760 100644 --- a/notebooks/notebook_sampling_decay_data.ipynb +++ b/notebooks/notebook_sampling_decay_data.ipynb @@ -10,7 +10,7 @@ }, { "cell_type": "code", - "execution_count": 49, + "execution_count": null, "id": "5911301f-43c6-4b41-b22e-996b6c9a9594", "metadata": {}, "outputs": [], @@ -38,7 +38,7 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": null, "id": "5fcc5280-36da-482a-a5a6-88e728bc51ce", "metadata": {}, "outputs": [], @@ -56,7 +56,7 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": null, "id": "fe661f82-0bad-417b-86f3-c0fae9ae48f5", "metadata": {}, "outputs": [], @@ -74,21 +74,10 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": null, "id": "3a895b58-7dff-4bb0-bede-e45aaf135b7f", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - ":128: RuntimeWarning: 'sandy.sampling' found in sys.modules after import of package 'sandy', but prior to execution of 'sandy.sampling'; this may result in unpredictable behaviour\n", - "INFO: processing file: 'decay_data.jeff33'\n", - "INFO: writing to file 'PERT_MF8_MT457.xlsx'...\n", - "INFO: Total running time: 0.40 sec\n" - ] - } - ], + "outputs": [], "source": [ "!python -m sandy.sampling decay_data.jeff33 --samples 3 --processes 1" ] @@ -103,20 +92,10 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": null, "id": "226e9dde-1f17-45f0-8550-a7e2dc36c391", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO: processing file: 'decay_data.jeff33'\n", - "INFO: writing to file 'PERT_MF8_MT457.xlsx'...\n", - "INFO: Total running time: 0.17 sec\n" - ] - } - ], + "outputs": [], "source": [ "cli = \"decay_data.jeff33 --samples 3 --processes 1\"\n", "sandy.sampling.run(cli.split())" @@ -132,21 +111,10 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": null, "id": "cc016094-a987-4825-a20e-0ff8e2194b55", "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "['decay_data.jeff33', 'decay_data_0', 'decay_data_1', 'decay_data_2']" - ] - }, - "execution_count": 24, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "glob.glob(\"decay_data*\")" ] @@ -171,96 +139,40 @@ }, { "cell_type": "code", - "execution_count": 43, + "execution_count": null, "id": "1f4d66a2-c827-4359-bbc4-72731ab2eae3", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - " 5.713800+4 1.367220+2 0 0 0 51904 8457 1\n", - " 3.26930+18 6.31139+16 0 0 6 01904 8457 2\n", - " 3.625050+4 9.621470+2 1.221250+6 1.389240+4 0.000000+0 0.000000+01904 8457 3\n", - " 5.000000+0 1.000000+0 0 0 12 21904 8457 4\n", - " 2.000000+0 0.000000+0 1.740000+6 3.400000+3 6.520000-1 6.000000-31904 8457 5\n", - " 1.000000+0 0.000000+0 1.051700+6 4.000000+3 3.480000-1 6.000000-31904 8457 6\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "print(sandy.Endf6.from_file(\"decay_data.jeff33\").data[(1904, 8, 457)][:(66+15)*6])" ] }, { "cell_type": "code", - "execution_count": 40, + "execution_count": null, "id": "5565f8c7-ef55-4c1e-a59d-979785759f58", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - " 57138.0000 136.722000 0 0 0 51904 8457 1\n", - " 3.28594+18 6.31139+16 0 0 6 01904 8457 2\n", - " 35181.5427 962.147000 1209406.86 13892.4000 0.00000000 0.000000001904 8457 3\n", - " 5.00000000 1.00000000 0 0 12 21904 8457 4\n", - " 2.00000000 0.00000000 1740000.00 3400.00000 6.497487-1 6.000000-31904 8457 5\n", - " 1.00000000 0.00000000 1051700.00 4000.00000 3.502513-1 6.000000-31904 8457 6\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "print(sandy.Endf6.from_file(\"decay_data_0\").data[(1904, 8, 457)][:(66+15)*6])" ] }, { "cell_type": "code", - "execution_count": 41, + "execution_count": null, "id": "dc904ea8-531b-4b89-b351-e77964499c33", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - " 57138.0000 136.722000 0 0 0 51904 8457 1\n", - " 3.20214+18 6.31139+16 0 0 6 01904 8457 2\n", - " 36241.4375 962.147000 1205306.58 13892.4000 0.00000000 0.000000001904 8457 3\n", - " 5.00000000 1.00000000 0 0 12 21904 8457 4\n", - " 2.00000000 0.00000000 1740000.00 3400.00000 6.565008-1 6.000000-31904 8457 5\n", - " 1.00000000 0.00000000 1051700.00 4000.00000 3.434992-1 6.000000-31904 8457 6\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "print(sandy.Endf6.from_file(\"decay_data_1\").data[(1904, 8, 457)][:(66+15)*6])" ] }, { "cell_type": "code", - "execution_count": 42, + "execution_count": null, "id": "615fba10-5549-482f-837c-5e1aaf11c5d8", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - " 57138.0000 136.722000 0 0 0 51904 8457 1\n", - " 3.24972+18 6.31139+16 0 0 6 01904 8457 2\n", - " 35698.2336 962.147000 1224660.65 13892.4000 0.00000000 0.000000001904 8457 3\n", - " 5.00000000 1.00000000 0 0 12 21904 8457 4\n", - " 2.00000000 0.00000000 1740000.00 3400.00000 6.514371-1 6.000000-31904 8457 5\n", - " 1.00000000 0.00000000 1051700.00 4000.00000 3.485629-1 6.000000-31904 8457 6\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "print(sandy.Endf6.from_file(\"decay_data_2\").data[(1904, 8, 457)][:(66+15)*6])" ] @@ -277,42 +189,20 @@ }, { "cell_type": "code", - "execution_count": 47, + "execution_count": null, "id": "e912729b-18cc-49d3-8114-96b2e8ab93c9", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - " 1.00400+03 3.99320+00 0 0 0 0 5 8457 1\n", - " 1.39000-22 1.00000-23 0 0 6 0 5 8457 2\n", - " 0.00000+00 0.00000+00 0.00000+00 0.00000+00 2.87890+06 0.00000+00 5 8457 3\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "print(sandy.Endf6.from_file(\"decay_data.jeff33\").data[(5, 8, 457)][:(66+15)*3])" ] }, { "cell_type": "code", - "execution_count": 46, + "execution_count": null, "id": "b34ef81b-ca11-40ab-bc56-6cd46d38a790", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - " 1004.00000 3.99320000 0 0 0 0 5 8457 1\n", - " 1.36726-22 1.00000-23 0 0 6 0 5 8457 2\n", - " 0.00000000 0.00000000 0.00000000 0.00000000 3021056.87 0.00000000 5 8457 3\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "print(sandy.Endf6.from_file(\"decay_data_2\").data[(5, 8, 457)][:(66+15)*3])" ] diff --git a/requirements.txt b/requirements.txt deleted file mode 100755 index bc15f7bc..00000000 --- a/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -numpy -pandas<=2.0.3 -pyyaml -scipy -openpyxl diff --git a/requirements_conda.txt b/requirements_conda.txt deleted file mode 100644 index 14bd012a..00000000 --- a/requirements_conda.txt +++ /dev/null @@ -1,14 +0,0 @@ -h5py -jupyterlab -matplotlib >= 2.2.23 -numba -numpy -numpydoc -pandas >= 1 -pytest >= 3.3 -pyyaml -scipy -sphinx -seaborn >= 0.9 -statsmodels -pytables diff --git a/sandy/__init__.py b/sandy/__init__.py index 9be9de94..3a0f491a 100755 --- a/sandy/__init__.py +++ b/sandy/__init__.py @@ -21,6 +21,7 @@ from .core import * # from .sampling import * # don't do this from .spectra import * + import sandy.mcnp import sandy.aleph2 import sandy.tools diff --git a/sandy/core/cov.py b/sandy/core/cov.py index f8b9275e..ea7f33ac 100644 --- a/sandy/core/cov.py +++ b/sandy/core/cov.py @@ -661,7 +661,7 @@ def sampling(self, nsmp, seed=None, pdf='lognormal', relative=True, return sandy.Samples(samples) def gls_cov_update(self, S, Vy_extra=None): - """ + r""" Perform GlS update for a given covariance matrix, sensitivity and covariance matrix of the extra information: .. math:: @@ -733,7 +733,7 @@ def gls_cov_update(self, S, Vy_extra=None): return self.__class__(Vx_post) def sandwich(self, s): - """ + r""" Apply the "sandwich formula" to the CategoryCov object for a given sensitivity. According with http://dx.doi.org/10.1016/j.anucene.2015.10.027, the moment propagation equation is implemented as: diff --git a/sandy/core/endf6.py b/sandy/core/endf6.py index cd7d2e8d..c04f39e0 100644 --- a/sandy/core/endf6.py +++ b/sandy/core/endf6.py @@ -343,6 +343,12 @@ def get_endf6_file(library, kind, zam, to_file=False): >>> tape = sandy.get_endf6_file("jeff_33", 'decay', [380900, 551370, 541350]) >>> assert type(tape) is sandy.Endf6 + Import all decay data for JEFF-3.3. + This test also ensures that module appendix was correclty installed. + + >>> tape = sandy.get_endf6_file("jeff_33", 'decay', 'all') + >>> assert type(tape) is sandy.Endf6 + Thermal Neutron Scattering Data from ENDF/B-VII.1. >>> tape = sandy.get_endf6_file("endfb_71", 'tsl', [1, 2, 3]) diff --git a/sandy/core/records.py b/sandy/core/records.py index 0b5ed277..6745ae51 100644 --- a/sandy/core/records.py +++ b/sandy/core/records.py @@ -384,16 +384,13 @@ def line_numbers(length): Examples -------- - >>> np.array(line_numbers(1.0e6)).max() - 99999 - >>> np.array(line_numbers(1.0e6+1)).min() - 1 - >>> np.array(line_numbers(1.0e4+1)).max() - 10001 - >>> len(sandy.records.line_numbers(1.0e6)) - 1000000 - >>> len(sandy.records.line_numbers(1.0e6+1)) - 1000001 + Some tests. + + >>> assert max(line_numbers(1.0e6)) == 99999 + >>> assert min(line_numbers(1.0e6+1)) == 1 + >>> assert max(line_numbers(1.0e4+1)) == 10001 + >>> assert len(line_numbers(1.0e6)) == 1000000 + >>> assert len(line_numbers(1.0e6+1)) == 1000001 """ iend = 1 + length ilines = np.tile(np.arange(1, 1e5, dtype=int), int(iend//99999)+1) diff --git a/sandy/fy.py b/sandy/fy.py index 85028550..c87f6b66 100644 --- a/sandy/fy.py +++ b/sandy/fy.py @@ -1,4 +1,4 @@ -""" +r""" This module contains all classes and functions specific for processing fission yield data. """ @@ -38,7 +38,7 @@ ) def get_chain_yields(): - """ + r""" Import chain yields information from data stored in sandy. The information was taken from 'https://www-nds.iaea.org/endf349/la-ur-94-3106.pdf', page 18-29. @@ -76,7 +76,7 @@ def get_chain_yields(): 'appendix D.txt', 'appendix E.txt', 'appendix F.txt'] # path = join(dirname(__file__), 'appendix', 'chain_yields') - df = pd.concat([pd.read_csv(join(path, file), sep="\s+", index_col=0) for file in files], axis=1) + df = pd.concat([pd.read_csv(join(path, file), sep=r"\s+", index_col=0) for file in files], axis=1) df.columns.name, df.index.name = "ISO", "A" df = df.stack().rename("Y").reset_index("ISO") # @@ -95,7 +95,7 @@ def get_chain_yields(): class Fy(): - """ + r""" Object for fission yield data. Attributes @@ -133,7 +133,7 @@ def __init__(self, df, **kwargs): @property def data(self): - """ + r""" Dataframe of fission yield data with the following columns: - `MAT` : MAT number @@ -166,7 +166,7 @@ def data(self, data): self._data = data[self._columns] def energy_table(self, key, by="ZAM", kind="independent"): - """ + r""" Pivot dataframe of tabulated fission yields as a function of energy. Columns are determined by keyword argument `'by'`. @@ -221,7 +221,7 @@ def energy_table(self, key, by="ZAM", kind="independent"): ).fillna(0.) def _expand_zap(self): - """ + r""" Produce dataframe with three extra columns containing the `Z`, `A` and `M` numbers of the **parent** (fissioning) nuclide. @@ -245,7 +245,7 @@ def _expand_zap(self): return self.data.assign(Z=zam.Z, A=zam.A, M=zam.M) def _expand_zam(self): - """ + r""" Produce dataframe with three extra columns containing the `Z`, `A` and `M` numbers of the **daughter** nuclide (fission product). @@ -268,7 +268,7 @@ def _expand_zam(self): return self.data.assign(Z=zam.Z, A=zam.A, M=zam.M) def get_mass_yield(self, zam, e): - """ + r""" Obtain mass yields from the following model: ChY = S * IFY Parameters @@ -299,7 +299,7 @@ def get_mass_yield(self, zam, e): return mass_yield.rename('mass yield') def get_chain_yield(self, zam, e, decay_data, **kwargs): - """ + r""" Obtain chain yields from the following model: ChY = S * IFY Parameters @@ -311,22 +311,23 @@ def get_chain_yield(self, zam, e, decay_data, **kwargs): decay_data : `sandy.DecayData` Radioactive nuclide data from where to obtain chain sensitivities. kwargs : `dict` - keyword arguments for method `get_decay_chains` + Keyword arguments for method :obj:`~sandy.decay.DecayData.get_decay_chains`. Returns ------- - `pandas.Series` + `pd.Series` Chain yield obtained from ChY = S * IFY Examples -------- + >>> zam = [591480, 591481, 601480, 561480, 571480, 571490, 581480] >>> decay_minimal = sandy.get_endf6_file("jeff_33", 'decay', zam) >>> decay_fytest = sandy.DecayData.from_endf6(decay_minimal) >>> tape_nfpy = sandy.get_endf6_file("jeff_33", 'nfpy', 922350) >>> nfpy = Fy.from_endf6(tape_nfpy) - >>> nfpy.get_chain_yield(922350, 0.0253, decay_fytest).loc[148] - 0.01692277272 + >>> result_value = float(nfpy.get_chain_yield(922350, 0.0253, decay_fytest).loc[148]) # Convert to native Python float + >>> assert result_value == 0.01692277272 """ # Filter FY data: conditions = {'ZAM': zam, "E": e, 'MT': 454} @@ -337,7 +338,7 @@ def get_chain_yield(self, zam, e, decay_data, **kwargs): return chain_yield.rename('chain yield') def get_mass_yield_sensitivity(self): - """ + r""" Obtain the mass yield sensitivity matrix based only on the information given in the `Fy` object (no decay data). @@ -376,7 +377,7 @@ def get_mass_yield_sensitivity(self): return groups.reset_index().pivot_table(index='A', columns='ZAP', values="COUNT", aggfunc="sum").fillna(0) def custom_perturbation(self, zam, mt, e, pert): - """ + r""" Apply a custom perturbation to a given fission yield. Parameters @@ -426,7 +427,7 @@ def custom_perturbation(self, zam, mt, e, pert): return self.__class__(df) def apply_bmatrix(self, zam, energy, decay_data, keep_fy_index=False): - """ + r""" Perform IFY = (1-B) * CFY equation to calculate IFY in a given zam for a given energy and apply into the original data. @@ -524,7 +525,7 @@ def apply_bmatrix(self, zam, energy, decay_data, keep_fy_index=False): return self.__class__(data) def apply_qmatrix(self, zam, energy, decay_data, cut_hl=True, keep_fy_index=False): - """ + r""" Perform CFY = Q * IFY equation to calculate CFY in a given zam for a given energy and apply into the original data. @@ -620,7 +621,7 @@ def apply_qmatrix(self, zam, energy, decay_data, cut_hl=True, keep_fy_index=Fals return self.__class__(data) def gls_update(self, zam, energy, S, y_extra, Vy_extra=None): - """ + r""" Perform the GLS update of fission yields and their related covariance matrix, according with https://doi.org/10.1016/j.anucene.2015.10.027. @@ -726,7 +727,7 @@ def gls_update(self, zam, energy, S, y_extra, Vy_extra=None): def ishikawa_factor(self, zam, e, Vy_extra, kind='mass yield', decay_data=None): - """ + r""" Ishikawa factor to determine whether the experiment from where we obtain model sensitivity is useful to reduce the IFY uncertainty @@ -808,7 +809,7 @@ def ishikawa_factor(self, zam, e, Vy_extra, return ishikawa def _filters(self, conditions): - """ + r""" Apply several condition to source data and return filtered results. Parameters @@ -836,7 +837,7 @@ def _filters(self, conditions): return out def filter_by(self, key, value): - """ + r""" Apply condition to source data and return filtered results. Parameters @@ -875,7 +876,7 @@ def filter_by(self, key, value): @classmethod def from_endf6(cls, endf6, verbose=False): - """ + r""" Extract fission yields from `Endf6` instance. Parameters @@ -928,7 +929,7 @@ def from_endf6(cls, endf6, verbose=False): return cls(df) def to_endf6(self, endf6): - """ + r""" Update fission yields in `Endf6` instance with those available in a `Fy` instance. diff --git a/sandy/sections/mf32.py b/sandy/sections/mf32.py index 79eb9d1b..8dd80b38 100644 --- a/sandy/sections/mf32.py +++ b/sandy/sections/mf32.py @@ -170,7 +170,7 @@ def read_mf32(tape, mat): LCOMP = 2 LRF = 3 >>> tape = sandy.get_endf6_file("jeff_33", "xs", 902320) >>> dic = sandy.read_mf32(tape, 9040) - >>> dic['NIS'][90232]["NER"][(1e-05, 4000.0)]["INTG"][350] + >>> dic['NIS'][90232]["NER"][(1e-05, 4000.0)]["INTG"][350] {'II': 795, 'JJ': 793, 'KIJ': array([44, 69, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])} @@ -587,8 +587,8 @@ def read_mf32(tape, mat): for j in range(NM): I, i = read_intg(tape, mat, NDIGIT, i) add_intg = { - "II": I.II, - "JJ": I.JJ, + "II": int(I.II), + "JJ": int(I.JJ), "KIJ": I.KIJ, } INTG.update({j: add_intg}) @@ -626,8 +626,8 @@ def read_mf32(tape, mat): for j in range(NM): I, i = read_intg(tape, mat, NDIGIT, i) add_intg = { - "II": I.II, - "JJ": I.JJ, + "II": int(I.II), + "JJ": int(I.JJ), "KIJ": I.KIJ, } INTG.update({j: add_intg}) @@ -681,8 +681,8 @@ def read_mf32(tape, mat): for j in range(NM): I, i = read_intg(tape, mat, NDIGIT, i) add_intg = { - "II": I.II, - "JJ": I.JJ, + "II": int(I.II), + "JJ": int(I.JJ), "KIJ": I.KIJ, } INTG.update({j: add_intg}) @@ -731,8 +731,8 @@ def read_mf32(tape, mat): for j in range(NM): I, i = read_intg(tape, mat, NDIGIT, i) add_intg = { - "II": I.II, - "JJ": I.JJ, + "II": int(I.II), + "JJ": int(I.JJ), "KIJ": I.KIJ, } INTG.update({j: add_intg}) @@ -804,8 +804,8 @@ def read_mf32(tape, mat): for j in range(NM): I, i = read_intg(tape, mat, NDIGIT, i) add_intg = { - "II": I.II, - "JJ": I.JJ, + "II": int(I.II), + "JJ": int(I.JJ), "KIJ": I.KIJ, } INTG.update({j: add_intg}) @@ -867,8 +867,8 @@ def read_mf32(tape, mat): for j in range(NM): I, i = read_intg(tape, mat, NDIGIT, i) add_intg = { - "II": I.II, - "JJ": I.JJ, + "II": int(I.II), + "JJ": int(I.JJ), "KIJ": I.KIJ, } INTG.update({j: add_intg}) diff --git a/setup.cfg b/setup.cfg index f5465299..101dfa80 100644 --- a/setup.cfg +++ b/setup.cfg @@ -17,21 +17,34 @@ include_package_data = True packages = find: python_requires = >=3.7 install_requires = - numpy>=1.9 + numpy scipy pandas pyyaml openpyxl [options.extras_require] -test = - pytest +test = numpydoc + nbval + codecov + coveralls + pytest-cov notebook = - jupyter>=1.0.0 + jupyterlab matplotlib seaborn -doc = sphinx + scikit-learn + jupyter_nbextensions_configurator + jupyter_contrib_nbextensions + serpentTools +doc = + sphinx + sphinx_rtd_theme +pack = + build + twine + [options.packages.find] exclude = diff --git a/setup.py b/setup.py deleted file mode 100755 index 57c026bf..00000000 --- a/setup.py +++ /dev/null @@ -1,4 +0,0 @@ -from setuptools import setup - -if __name__ == "__main__": - setup() \ No newline at end of file