From 46d9f4c3b7d2ad65080d167841d2232213412ff8 Mon Sep 17 00:00:00 2001 From: Luca Fiorito Date: Fri, 15 Mar 2024 10:37:42 +0100 Subject: [PATCH] update --- sandy/aleph2/output_file.py | 4 +++- sandy/core/samples.py | 3 ++- sandy/core/xs.py | 5 +++-- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/sandy/aleph2/output_file.py b/sandy/aleph2/output_file.py index 66b85096..97fb82a2 100644 --- a/sandy/aleph2/output_file.py +++ b/sandy/aleph2/output_file.py @@ -549,13 +549,15 @@ def parse_table_nuclide(text, index="ZAM", data_row=3, **kwargs): if len(lines[begin:]) == 0: df = pd.DataFrame(columns=columns) else: + # NEW IN ALEPH_2.9.2, energy is reported in decay heat table + start = 2 if "E (MeV)"in lines[begin-1] else 1 string = io.StringIO("\n".join(lines[begin:])) df = pd.read_csv( string, sep="\s+", header=None, index_col=index_col, - ).iloc[:, 1:] + ).iloc[:, start:] df.index.name = index df.columns = columns return df.T diff --git a/sandy/core/samples.py b/sandy/core/samples.py index 8f8864dc..a932425b 100644 --- a/sandy/core/samples.py +++ b/sandy/core/samples.py @@ -203,7 +203,8 @@ def iterate_xs_samples(self): df = self.data.unstack(level=levels) # -- Iterate over samples - for n, p in df.groupby(axis=1, level=self._columnsname): + for n, p_ in df.T.groupby(level=self._columnsname): + p = p_.T s = p.droplevel(self._columnsname, axis=1) adds = [] for mat in s.columns.get_level_values("MAT").unique(): diff --git a/sandy/core/xs.py b/sandy/core/xs.py index b06a3233..c0c6312d 100644 --- a/sandy/core/xs.py +++ b/sandy/core/xs.py @@ -415,7 +415,8 @@ def reconstruct_sums(self, drop=True): >>> assert xsr.data[(9543, 4)].equals(xsr.data[9543].loc[:, 50:91].sum(axis=1)) """ df = self.data.copy() - for mat, group in df.groupby("MAT", axis=1): +# for mat, group in df.groupby("MAT", axis=1): + for mat, group in df.T.groupby(level="MAT"): # starting from the lat redundant cross section, find daughters and sum them for parent, daughters in sorted(sandy.redundant_xs.items(), reverse=True): @@ -426,7 +427,7 @@ def reconstruct_sums(self, drop=True): # keep only mts present in the original file if drop: - keep = group[mat].columns + keep = group[mat].index todrop = df[mat].columns.difference(keep) df.drop( pd.MultiIndex.from_product([[mat], todrop]),