From b59ff2f99750cbcc45e2f47211efb48d168e3c65 Mon Sep 17 00:00:00 2001 From: Matthew Newville Date: Tue, 11 Jul 2023 14:27:15 -0500 Subject: [PATCH] more cleanups for robusly copying array-likes --- larch/io/gse_escan.py | 5 ++--- larch/io/gse_mcafile.py | 2 +- larch/math/convolution1D.py | 2 +- larch/math/utils.py | 6 +++--- larch/wxlib/xrfdisplay.py | 17 +++++++++-------- larch/xafs/autobk.py | 4 ++-- larch/xafs/feffdat.py | 2 +- larch/xafs/feffit.py | 2 +- larch/xafs/pre_edge.py | 16 ++++++++-------- larch/xafs/prepeaks.py | 8 ++++---- larch/xray/background.py | 4 ++-- 11 files changed, 34 insertions(+), 34 deletions(-) diff --git a/larch/io/gse_escan.py b/larch/io/gse_escan.py index 7702e5330..f5442ed4f 100644 --- a/larch/io/gse_escan.py +++ b/larch/io/gse_escan.py @@ -2,7 +2,6 @@ import os import sys -import copy import time import gc @@ -369,8 +368,8 @@ def _make_arrays(self, tmp_dat, col_legend, col_details): while len(s) < nsums: s.append(-1) # finally, icr/ocr corrected sums - self.det_corr = self.det.copy() - self.sums_corr = self.sums.copy() + self.det_corr = self.det[:]*1.0 + self.sums_corr = self.sums[:]*1.0 if self.correct_deadtime: idet = -1 diff --git a/larch/io/gse_mcafile.py b/larch/io/gse_mcafile.py index 9faa08851..64b002af4 100644 --- a/larch/io/gse_mcafile.py +++ b/larch/io/gse_mcafile.py @@ -244,7 +244,7 @@ def readtext(self, text, bad=None): self.counts = self.get_counts() self.raw = self.get_counts(dt_correct=False) self.name = 'mcasum' - self.energy = mca0.energy.copy() + self.energy = mca0.energy[:]*1.0 self.environ = mca0.environ self.real_time = mca0.real_time self.live_time = mca0.live_time diff --git a/larch/math/convolution1D.py b/larch/math/convolution1D.py index d0271f38f..1f8cdf715 100644 --- a/larch/math/convolution1D.py +++ b/larch/math/convolution1D.py @@ -170,7 +170,7 @@ def conv(x, y, gammas, e_cut=None, kernel="gaussian"): """ assert e_cut is not None, "starting energy for the convolution not given" - f = y.copy() + f = y[:]*1.0 z = np.zeros_like(f) # ief = index_nearest(x, e_cut) ief = np.argmin(np.abs(x - e_cut)) diff --git a/larch/math/utils.py b/larch/math/utils.py index 4fb67db17..a0401e177 100644 --- a/larch/math/utils.py +++ b/larch/math/utils.py @@ -78,7 +78,7 @@ def complex_phase(arr): "return phase, modulo 2pi jumps" phase = np.arctan2(arr.imag, arr.real) d = np.diff(phase)/np.pi - out = phase.copy() + out = phase[:]*1.0 out[1:] -= np.pi*(np.round(abs(d))*np.sign(d)).cumsum() return out @@ -414,7 +414,7 @@ def boxcar(data, nrepeats=1): ----- This does a 3-point smoothing, that can be repeated - out = data.copy() + out = data[:]*1.0 for i in range(nrepeats): qdat = out/4.0 left = 1.0*qdat @@ -425,7 +425,7 @@ def boxcar(data, nrepeats=1): return out """ - out = data.copy() + out = data[:]*1.0 for i in range(nrepeats): qdat = out/4.0 left = 1.0*qdat diff --git a/larch/wxlib/xrfdisplay.py b/larch/wxlib/xrfdisplay.py index 2eabe7ee3..341a8570e 100644 --- a/larch/wxlib/xrfdisplay.py +++ b/larch/wxlib/xrfdisplay.py @@ -1172,9 +1172,10 @@ def plot(self, x, y=None, mca=None, init=False, with_rois=True, **kws): 'color': self.conf.spectra_color} kwargs.update(kws) - self.xdata = x.copy() - self.ydata = y.copy() - ydat = nycopy() + 1.e-9 + self.xdata = 1.0*x[:] + self.ydata = 1.0*y[:] + self.ydata[np.where(self.ydata<1.e-9)] = 1.e-9 + ydat = self.ydata kwargs['ymax'] = max(ydat)*1.25 kwargs['ymin'] = 0.9 kwargs['xmax'] = max(self.xdata) @@ -1215,9 +1216,9 @@ def update_mca(self, counts, energy=None, with_rois=True, if is_mca2: mca = self.mca2 ix = 2 - mca.counts = counts.copy() + mca.counts = 1.0*counts[:] if energy is not None: - mca.energy = energy.copy() + mca.energy = 1.0*energy[:] xnpts = 1.0/len(energy) nrois = len(mca.rois) if not is_mca2 and with_rois and nrois > 0: @@ -1241,7 +1242,7 @@ def update_mca(self, counts, energy=None, with_rois=True, self.panel.axes.set_ylim(0.9, 1.25*max(max_counts, max_counts2)) if mca == self.mca: - self.ydata = counts.copy() + self.ydata = 1.0*counts[:] self.update_status() if draw: self.draw() @@ -1250,8 +1251,8 @@ def oplot(self, x, y, color='darkgreen', label='spectrum2', if mca is not None: self.mca2 = mca - self.x2data = x.copy() - self.y2data = y.copy() + self.x2data = 1.0*x[:] + self.y2data = 1.0*y[:] if hasattr(self, 'ydata'): ymax = max(max(self.ydata), max(y))*1.25 else: diff --git a/larch/xafs/autobk.py b/larch/xafs/autobk.py index f194c1a17..7695362e2 100644 --- a/larch/xafs/autobk.py +++ b/larch/xafs/autobk.py @@ -205,7 +205,7 @@ def autobk(energy, mu=None, group=None, rbkg=1, nknots=None, e0=None, ek0=None, coefs = [result.params[FMT_COEF % i].value for i in range(len(coefs))] bkg, chi = spline_eval(kraw[:iemax-iek0+1], mu[iek0:iemax+1], knots, coefs, order, kout) - obkg = mu.copy() + obkg = mu[:]*1.0 obkg[iek0:iek0+len(bkg)] = bkg # outputs to group @@ -220,7 +220,7 @@ def autobk(energy, mu=None, group=None, rbkg=1, nknots=None, e0=None, ek0=None, details = Group(kmin=kmin, kmax=kmax, irbkg=irbkg, nknots=len(spl_k), knots_k=knots, init_knots_y=spl_y, nspl=nspl, init_chi=initchi/edge_step, report=fit_report(result)) - details.init_bkg = mu.copy() + details.init_bkg = mu[:]*1.0 details.init_bkg[iek0:iek0+len(bkg)] = initbkg details.knots_y = np.array([coefs[i] for i in range(nspl)]) group.autobk_details = details diff --git a/larch/xafs/feffdat.py b/larch/xafs/feffdat.py index 8940b97c0..52ebfb6d7 100644 --- a/larch/xafs/feffdat.py +++ b/larch/xafs/feffdat.py @@ -633,7 +633,7 @@ def ff2chi(paths, group=None, paramgroup=None, k=None, kmax=None, return path.create_path_params(params=params) path._calc_chi(k=k, kstep=kstep, kmax=kmax) - k = pathlist[0].k.copy() + k = pathlist[0].k[:]*1.0 out = np.zeros_like(k) for path in pathlist: out += path.chi diff --git a/larch/xafs/feffit.py b/larch/xafs/feffit.py index 206685317..f2cca181d 100644 --- a/larch/xafs/feffit.py +++ b/larch/xafs/feffit.py @@ -247,7 +247,7 @@ def __init__(self, data=None, paths=None, transform=None, self.data = Group(__name__='Feffit DatasSet from %s' % repr(data), groupname=getattr(data, 'groupname', repr(data)), filename=getattr(data, 'filename', repr(data)), - k=data.k.copy(), chi=data.chi.copy()) + k=data.k[:]*1.0, chi=data.chi[:]*1.0) if hasattr(data, 'config'): self.data.config = deepcopy(data.config) else: diff --git a/larch/xafs/pre_edge.py b/larch/xafs/pre_edge.py index 18a2c3ab0..b74adab9d 100644 --- a/larch/xafs/pre_edge.py +++ b/larch/xafs/pre_edge.py @@ -425,16 +425,16 @@ def energy_align(group, reference, array='dmude', emin=-15, emax=35): en = getattr(reference, 'energy') reference.dmude = gradient(mu)/gradient(en) - xdat = group.energy.copy() - xref = reference.energy.copy() - ydat = group.dmude.copy() - yref = reference.dmude.copy() + xdat = group.energy[:]*1.0 + xref = reference.energy[:]*1.0 + ydat = group.dmude[:]*1.0 + yref = reference.dmude[:]*1.0 if array == 'mu': - ydat = group.mu.copy() - yref = reference.mu.copy() + ydat = group.mu[:]*1.0 + yref = reference.mu[:]*1.0 elif array == 'norm': - ydat = group.norm.copy() - yref = reference.norm.copy() + ydat = group.norm[:]*1.0 + yref = reference.norm[:]*1.0 xdat, ydat = remove_nans2(xdat, ydat) xref, yref = remove_nans2(xref, yref) diff --git a/larch/xafs/prepeaks.py b/larch/xafs/prepeaks.py index e0d4e6056..9b13186b8 100644 --- a/larch/xafs/prepeaks.py +++ b/larch/xafs/prepeaks.py @@ -66,17 +66,17 @@ def prepeaks_setup(energy, norm=None, arrayname=None, group=None, emin=None, ema ydat = None if norm is not None and arrayname in (None, 'aspassed'): arrayname = 'aspassed' - ydat = norm.copy() + ydat = norm[:]*1.0 energy, norm, group = parse_group_args(energy, members=('energy', 'norm'), defaults=(norm,), group=group, fcn_name='pre_edge_baseline') if arrayname == 'flat' and hasattr(group, 'flat'): - ydat = group.flat.copy() + ydat = group.flat[:]*1.0 elif arrayname == 'deconv' and hasattr(group, 'deconv'): - ydat = group.deconv.copy() + ydat = group.deconv[:]*1.0 if ydat is None: - ydat = norm.copy() + ydat = norm[:]*1.0 if len(energy.shape) > 1: energy = energy.squeeze() diff --git a/larch/xray/background.py b/larch/xray/background.py index 21de5dc38..72f360426 100644 --- a/larch/xray/background.py +++ b/larch/xray/background.py @@ -201,7 +201,7 @@ def calc(self, data=None, slope=1.0, type_int=False): nchans = len(data) self.bgr = np.zeros(nchans, dtype=np.int32) - scratch = data.copy() + scratch = 1.0*data[:] # Compress scratch spectrum if compress > 1: @@ -214,7 +214,7 @@ def calc(self, data=None, slope=1.0, type_int=False): nchans = len(scratch) #nchans / compress # Copy scratch spectrum to background spectrum - bckgnd = scratch.copy() + bckgnd = 1.0*scratch[:] # Find maximum counts in input spectrum. This information is used to # limit the size of the function lookup table