Skip to content

Commit

Permalink
V0.0.8 (#25)
Browse files Browse the repository at this point in the history
* dead time correction and normalization

* Serpent output mass normalization

* Sepernt mass normalization SI correction

* Version up

* Version up

* test Serpent mass correction in SI correction

* lines del

* less significan digits
  • Loading branch information
GrimFe authored Oct 25, 2024
1 parent e1108e6 commit b7b18f7
Show file tree
Hide file tree
Showing 6 changed files with 77 additions and 27 deletions.
4 changes: 3 additions & 1 deletion nerea/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,6 @@

from .comparisons import *

__version__ = '0.0.7.2'
from .constants import *

__version__ = '0.0.8'
16 changes: 10 additions & 6 deletions nerea/calculated.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import pandas as pd

from .utils import _make_df, ratio_v_u
from .constants import ATOMIC_MASS

__all__ = ['_Calculated',
'CalculatedSpectralIndex',
Expand All @@ -22,7 +23,7 @@ def calculate(self) -> None:
class CalculatedSpectralIndex(_Calculated):
data: pd.DataFrame
model_id: str
deposit_ids: list[str]
deposit_ids: list[str] # 0: num, 1: den

@classmethod
def from_sts(cls, file: str, detector_name: str, **kwargs):
Expand All @@ -48,12 +49,12 @@ def from_sts(cls, file: str, detector_name: str, **kwargs):
>>> c_instance = CalculatedSpectralIndex.from_sts('file_det0.m',
'SI_detector', model_id='Model1')
"""
## works with relative uncertainties for the moment.
## Shall be homogenized with the rest of the API
v, u = sts.read(file).detectors[detector_name].bins[0][-2:]
mass_norm = ATOMIC_MASS[kwargs['deposit_ids'][1]]['value'] / ATOMIC_MASS[kwargs['deposit_ids'][0]]['value']
# Serpent detector uncertainty is relative
kwargs['data'] = _make_df(v, u * v).assign(VAR_FRAC_C_n=None,
VAR_FRAC_C_d=None)
kwargs['data'] = _make_df(v * mass_norm, u * v * mass_norm
).assign(VAR_FRAC_C_n=None,
VAR_FRAC_C_d=None)
return cls(**kwargs)

@classmethod
Expand Down Expand Up @@ -85,7 +86,10 @@ def from_sts_detectors(cls, file: str, detector_names: dict[str, str], **kwargs)
v2, u2_ = sts.read(file).detectors[detector_names['denominator']].bins[0][-2:]
# Serpent detector uncertainty is relative
u1, u2 = u1_ * v1, u2_ * v2
v, u = ratio_v_u(_make_df(v=v1, u=u1), _make_df(v=v2, u=u2))
v, u = ratio_v_u(_make_df(v=v1 / ATOMIC_MASS[kwargs['deposit_ids'][0]]['value'],
u=u1 / ATOMIC_MASS[kwargs['deposit_ids'][0]]['value']),
_make_df(v=v2 / ATOMIC_MASS[kwargs['deposit_ids'][1]]['value'],
u=u2 / ATOMIC_MASS[kwargs['deposit_ids'][1]]['value']))
S1, S2= 1 / v2, v1 / v2 **2
kwargs['data'] = _make_df(v, u).assign(VAR_FRAC_C_n=(S1 * u1) **2,
VAR_FRAC_C_d=(S2 * u2) **2)
Expand Down
25 changes: 25 additions & 0 deletions nerea/constants.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
import pandas as pd
import numpy as np

from .utils import _make_df

AVOGADRO = 6.02214076e23

# The isotopic mass data is from G. Audi, A. H. Wapstra Nucl. Phys A. 1993, 565, 1-65 and G. Audi, A. H. Wapstra Nucl. Phys A. 1995,
# 595, 409-480. The percent natural abundance data is from the 1997 report of the IUPAC Subcommittee for Isotopic
# Abundance Measurements by K.J.R. Rosman, P.D.P. Taylor Pure Appl. Chem. 1999, 71, 1593-1607.
# WHEN AVAILABLE, nucleon number otherwise.
ATOMIC_MASS = pd.DataFrame(
{"U233": [233.0, 0.0],
"U234": [234.040916, 0.0],
"U235": [235.043923, 0.0],
"U236": [236.0, 0.0],
"U238": [238.050783, 0.0],
"Np237": [237.048167, 0.0],
"Pu238": [238.0, 0.0],
"Pu239": [239.0521634, 0.0],
"Pu240": [240.053807, 0.0],
"Pu241": [241.0, 0.0],
"Pu242": [242.0, 0.0],
"Am241": [241.0, 0.0]
}, index=['value', 'uncertainty'])
24 changes: 17 additions & 7 deletions nerea/experimental.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
from .effective_mass import EffectiveMass
from .reaction_rate import ReactionRate, ReactionRates
from .utils import ratio_v_u, product_v_u, _make_df
from .constants import ATOMIC_MASS

import pandas as pd
import numpy as np
Expand Down Expand Up @@ -166,10 +167,10 @@ def _time_normalization(self) -> pd.DataFrame:
pd.DataFrame
with normalization value and uncertainty
"""
r, l = self.fission_fragment_spectrum.real_time, self.fission_fragment_spectrum.life_time
v = r / l**2
u = np.sqrt((1 / l **2 * self.fission_fragment_spectrum.real_time_uncertainty) **2 +
(2 * r / l **3 * self.fission_fragment_spectrum.life_time_uncertainty)**2)
l = self.fission_fragment_spectrum.life_time
v = 1 / l
u = np.sqrt((1 / self.fission_fragment_spectrum.life_time **2 \
* self.fission_fragment_spectrum.life_time_uncertainty)**2)
return _make_df(v, u)

@property
Expand Down Expand Up @@ -429,17 +430,26 @@ def _compute_correction(self, one_g_xs: pd.DataFrame) -> pd.DataFrame:
"""
imp = self.numerator.effective_mass.composition_.set_index('nuclide')
imp.columns = ['value', 'uncertainty']
# normalize impurities and one group xs

# normalize Serpent output per unit mass
v = one_g_xs['value'] / ATOMIC_MASS.T['value']
u = one_g_xs['uncertainty'] / ATOMIC_MASS.T['value']
xs = pd.concat([v.dropna(), u.dropna()], axis=1)
xs.columns = ["value", "uncertainty"]

# normalize impurities and one group xs to the numerator deposit
imp_v, imp_u = ratio_v_u(imp,
imp.loc[self.numerator.deposit_id])
xs_v, xs_u = ratio_v_u(one_g_xs,
one_g_xs.loc[self.denominator.deposit_id])
xs_v, xs_u = ratio_v_u(xs,
xs.loc[self.denominator.deposit_id])

# remove information on the main isotope
# will sum over all impurities != self.numerator.deposit_id
imp_v = imp_v.drop(self.numerator.deposit_id)
imp_u = imp_u.drop(self.numerator.deposit_id)
xs_v = xs_v.drop(self.numerator.deposit_id)
xs_u = xs_u.drop(self.numerator.deposit_id)

# compute correction and its uncertainty
if not all([i in xs_v.index] for i in imp_v.index):
warnings.warn("Not all impurities were provided with a cross section.")
Expand Down
2 changes: 1 addition & 1 deletion tests/test_NormalizedFissionFragmentSpectrum.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ def test_time_normalization(nffs):
tmp = nffs
tmp.fission_fragment_spectrum.life_time_uncertainty = .1
tmp.fission_fragment_spectrum.real_time_uncertainty = .1
pd.testing.assert_frame_equal(tmp._time_normalization, _make_df(.1, np.sqrt(5) * 1e-3))
pd.testing.assert_frame_equal(tmp._time_normalization, _make_df(.1, 1/10 **2 * .1))

def test_power_normalization(nffs):
pd.testing.assert_frame_equal(nffs._power_normalization,
Expand Down
33 changes: 21 additions & 12 deletions tests/test_SpectralIndex.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,26 +38,26 @@ def sample_spectrum_1(sample_spectrum_data):
return FissionFragmentSpectrum(start_time=datetime(2024, 5, 18, 20, 30, 15),
life_time=10, real_time=10,
data=sample_spectrum_data, campaign_id="A", experiment_id="B",
detector_id="C1", deposit_id="D1", location_id="E", measurement_id="F1")
detector_id="C1", deposit_id="U238", location_id="E", measurement_id="F1")

@pytest.fixture
def sample_spectrum_2(sample_spectrum_data):
return FissionFragmentSpectrum(start_time=datetime(2024, 5, 18, 20, 30, 15),
life_time=10, real_time=10,
data=sample_spectrum_data, campaign_id="A", experiment_id="B",
detector_id="C2", deposit_id="D2", location_id="E", measurement_id="F2")
detector_id="C2", deposit_id="U235", location_id="E", measurement_id="F2")

@pytest.fixture
def effective_mass_1(sample_integral_data):
data = pd.DataFrame({'a1': [0.1, 0.01], 'a2': [0.2, 0.02], 'D1': [0.7, 0.07]}).T.reset_index()
data = pd.DataFrame({'U236': [0.1, 0.01], 'U234': [0.2, 0.02], 'U238': [0.7, 0.07]}).T.reset_index()
data.columns = ['nuclide', 'share', 'uncertainty']
return EffectiveMass(deposit_id="D1", detector_id="C1", data=sample_integral_data, bins=42, composition=data)
return EffectiveMass(deposit_id="U238", detector_id="C1", data=sample_integral_data, bins=42, composition=data)

@pytest.fixture
def effective_mass_2(sample_integral_data):
data = pd.DataFrame({'b1': [0.15, 0.015], '12': [0.25, 0.025], 'D1': [0.6, 0.06]}).T.reset_index()
data = pd.DataFrame({'U234': [0.15, 0.015], 'U238': [0.25, 0.025], 'U235': [0.6, 0.06]}).T.reset_index()
data.columns = ['nuclide', 'share', 'uncertainty']
return EffectiveMass(deposit_id="D2", detector_id="C2", data=sample_integral_data, bins=42, composition=data)
return EffectiveMass(deposit_id="U235", detector_id="C2", data=sample_integral_data, bins=42, composition=data)

@pytest.fixture
def power_monitor(sample_power_monitor_data):
Expand All @@ -77,13 +77,13 @@ def si(rr_1, rr_2):

@pytest.fixture
def synthetic_one_g_xs_data():
data = pd.DataFrame({'a1': [0.07, 0.001], 'a2': [0.08, 0.002],
'D1': [0.9, 0.003], 'D2': [0.6, 0.004]}).T.reset_index()
data = pd.DataFrame({'U236': [0.07, 0.001], 'U234': [0.08, 0.002],
'U238': [0.9, 0.003], 'U235': [0.6, 0.004]}).T.reset_index()
data.columns = ['nuclide', 'value', 'uncertainty']
return data.set_index('nuclide')

def test_deposit_ids(si):
assert si.deposit_ids == ['D1', 'D2']
assert si.deposit_ids == ['U238', 'U235']

def test_get_long_output(si):
expected_df = pd.DataFrame({'FFS_n': 8.79100000e+02,
Expand Down Expand Up @@ -138,7 +138,7 @@ def test_process(si):

def test_compute_correction(si, synthetic_one_g_xs_data):
w1, uw1, w2, uw2, wd, uwd = .1, .01, .2, .02, .7, .07
x1, ux1, x2, ux2, xd, uxd = .07, .001, .08, .002, .6, .004
x1, ux1, x2, ux2, xd, uxd = .07 / 236., .001 / 236., .08 / 234., .002 / 234., .6 / 235.043923, .004 / 235.043923
v = (w1/wd * x1/xd) + (w2/wd * x2/xd)

W1, X1 = w1 / wd, x1 / xd
Expand All @@ -149,7 +149,12 @@ def test_compute_correction(si, synthetic_one_g_xs_data):

data = pd.DataFrame({'value': [v], 'uncertainty': [u], 'uncertainty [%]': u / v * 100}, index=['value'])

pd.testing.assert_frame_equal(data, si._compute_correction(synthetic_one_g_xs_data))
nerea_ = si._compute_correction(synthetic_one_g_xs_data)

np.testing.assert_equal(data.index.values, nerea_.index.values)
np.testing.assert_equal(data.columns.values, nerea_.columns.values)
np.testing.assert_almost_equal(data['value'].values, nerea_['value'].values, decimal=5)
np.testing.assert_almost_equal(data['uncertainty'].values, nerea_['uncertainty'].values, decimal=6)

def test_compute_with_correction(si, synthetic_one_g_xs_data):
w1, uw1, w2, uw2, wd, uwd = .1, .01, .2, .02, .7, .07
Expand All @@ -167,4 +172,8 @@ def test_compute_with_correction(si, synthetic_one_g_xs_data):

data = pd.DataFrame({'value': [v_], 'uncertainty': [u_], 'uncertainty [%]': u_ / v_ * 100}, index=['value'])

pd.testing.assert_frame_equal(data, si.process(synthetic_one_g_xs_data)[['value', 'uncertainty', 'uncertainty [%]']])
nerea_ = si.process(synthetic_one_g_xs_data)
np.testing.assert_equal(data.index.values, nerea_.index.values)
np.testing.assert_equal(data.columns.values, nerea_[['value', 'uncertainty', 'uncertainty [%]']].columns.values)
np.testing.assert_almost_equal(data['value'].values, nerea_['value'].values, decimal=4)
np.testing.assert_almost_equal(data['uncertainty'].values, nerea_['uncertainty'].values, decimal=5)

0 comments on commit b7b18f7

Please sign in to comment.