Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

EELS Widget #171

Merged
merged 10 commits into from
Oct 30, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion .github/workflows/actions.yml
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,6 @@ jobs:
env:
PYPI_TOKEN_PASSWORD: ${{ secrets.API_TOKEN_PYPI2 }}
run: |

pip install twine
pip install build
python -m build --sdist --wheel
Expand Down
1,828 changes: 1,723 additions & 105 deletions notebooks/Spectroscopy/Analyse_Low_Loss.ipynb

Large diffs are not rendered by default.

316 changes: 290 additions & 26 deletions notebooks/Spectroscopy/EDS.ipynb

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion pyTEMlib/animation.py
Original file line number Diff line number Diff line change
Expand Up @@ -386,7 +386,7 @@ def deficient_kikuchi_line(s_g=0., color_b='black'):
plt.gca().add_patch(deviation_angle)

plt.gca().set_aspect('equal')
plt.gca().set_xlabel('angle (1/$\AA$)')
plt.gca().set_xlabel(r'angle (1/$\AA$)')
plt.gca().set_ylim(-.1, k_0[1] * 2.2)
plt.gca().set_xlim(-.2, 1.03)

Expand Down
608 changes: 336 additions & 272 deletions pyTEMlib/core_loss_widget.py

Large diffs are not rendered by default.

25 changes: 15 additions & 10 deletions pyTEMlib/eels_dialog.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ def get_core_loss_sidebar():


class CompositionWidget(object):
def __init__(self, datasets=None, key=None):
def __init__(self, datasets=None):

if not isinstance(datasets, dict):
raise TypeError('dataset or first item has to be a sidpy dataset')
Expand All @@ -204,7 +204,7 @@ def __init__(self, datasets=None, key=None):
self.model = []
self.sidebar = get_core_loss_sidebar()

self.set_dataset(key)
self.set_dataset()

self.periodic_table = eels_dialog_utilities.PeriodicTableWidget(self.energy_scale)
self.elements_cancel_button = ipywidgets.Button(description='Cancel')
Expand Down Expand Up @@ -305,7 +305,7 @@ def show_edges(self):



def set_dataset(self, set_key):
def set_dataset(self, set_key=None):
spectrum_list = []
self.spectrum_keys_list = []
reference_list =[('None', -1)]
Expand All @@ -316,13 +316,18 @@ def set_dataset(self, set_key):
if 'SPECTR' in self.datasets[key].data_type.name:
spectrum_list.append((f'{key}: {self.datasets[key].title}', index))
self.spectrum_keys_list.append(key)
if key == self.parent.coreloss_key:
self.key = key
self.coreloss_key = self.key
dataset_index = len(spectrum_list)-1

reference_list.append((f'{key}: {self.datasets[key].title}', index))

if set_key in self.spectrum_keys_list:
self.key = set_key
else:
self.key = self.spectrum_keys_list[-1]
self.dataset = self.datasets[self.key]
self.sidebar[0, 0].options = spectrum_list
self.sidebar[0, 0].value = dataset_index

if self.coreloss_key is None:
return
self.dataset = self.datasets[self.coreloss_key]

self.spec_dim = self.dataset.get_spectral_dims(return_axis=True)[0]

Expand Down Expand Up @@ -740,7 +745,7 @@ def set_action(self):

self.sidebar[11, 0].on_click(self.do_fit)
self.sidebar[12, 2].observe(self.plot, names='value')
self.sidebar[0, 0].observe(self.plot, names='value')
self.sidebar[0, 0].observe(self.set_dataset, names='value')
self.sidebar[12,0].observe(self.set_y_scale, names='value')
self.sidebar[13,0].observe(self.do_all_button_click, names='value')

Expand Down
142 changes: 104 additions & 38 deletions pyTEMlib/eels_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,8 @@
from scipy.ndimage import gaussian_filter
from scipy.optimize import curve_fit, leastsq

from numba import jit, float64

import requests

# ## And we use the image tool library of pyTEMlib
Expand Down Expand Up @@ -247,7 +249,7 @@ def model_smooth(x, p, only_positive_intensity=False):

return y


@jit
def gauss(x, p): # p[0]==mean, p[1]= amplitude p[2]==fwhm,
"""Gaussian Function

Expand Down Expand Up @@ -319,9 +321,9 @@ def get_zero_loss_energy(dataset):
start = startx - i
if spectrum[startx + i] < 0.3 * spectrum[startx]:
end = startx + i
if end - start < 3:
end = startx + 2
start = startx - 2
if end - start < 7:
end = startx + 4
start = startx - 4
width = int((end-start)/2+0.5)

energy = dataset.get_spectral_dims(return_axis=True)[0].values
Expand Down Expand Up @@ -558,7 +560,7 @@ def fit_plasmon(dataset: Union[sidpy.Dataset, np.ndarray], startFitEnergy: float
"""
# define Drude function for plasmon fitting
def energy_loss_function(E: np.ndarray, Ep: float, Ew: float, A: float) -> np.ndarray:
E = E/E.max()

eps = 1 - Ep**2/(E**2+Ew**2) + 1j * Ew * Ep**2/E/(E**2+Ew**2)
elf = (-1/eps).imag
return A*elf
Expand All @@ -579,10 +581,15 @@ def energy_loss_function(E: np.ndarray, Ep: float, Ew: float, A: float) -> np.nd
fit_dset = np.array(dataset[start_fit_pixel:end_fit_pixel])
guess_pos = np.argmax(fit_dset)
guess_amplitude = fit_dset[guess_pos]
guess_width = (endFitEnergy - startFitEnergy)/2
popt, pcov = curve_fit(energy_loss_function, energy, dataset,
guess_width = 8
popt, pcov = curve_fit(energy_loss_function, energy[start_fit_pixel:end_fit_pixel], fit_dset,
p0=[guess_pos, guess_width, guess_amplitude])
return popt

plasmon = dataset.like_data(energy_loss_function(energy, popt[0], popt[1], popt[2]))
start_plasmon = np.searchsorted(energy, 0)+1
plasmon[:start_plasmon] = 0.
plasmon.metadata['plasmon'] = popt
return plasmon

# if it can be parallelized:
fitter = SidFitter(fit_dset, energy_loss_function, num_workers=number_workers,
Expand Down Expand Up @@ -875,7 +882,7 @@ def get_x_sections(z: int=0) -> dict:
return 0


def list_all_edges(z: Union[str, int]=0, verbose=False)->[str, dict]:
def list_all_edges(z: Union[str, int]=0, verbose=False)->list[str, dict]:
"""List all ionization edges of an element with atomic number z

Parameters
Expand Down Expand Up @@ -1042,9 +1049,10 @@ def second_derivative(dataset: sidpy.Dataset, sensitivity: float=2.5) -> None:

noise_level_start = sensitivity * np.std(second_dif[3:50])
noise_level_end = sensitivity * np.std(second_dif[start_end_noise: start_end_noise + 50])
slope = (noise_level_end - noise_level_start) / (len(energy_scale) - 400)
noise_level = noise_level_start + np.arange(len(energy_scale)) * slope
return second_dif, noise_level
#slope = (noise_level_end - noise_level_start) / (len(energy_scale) - 400)
#noise_level = noise_level_start #+ np.arange(len(energy_scale)) * slope
return second_dif , noise_level



def find_edges(dataset: sidpy.Dataset, sensitivity: float=2.5) -> None:
Expand Down Expand Up @@ -1615,19 +1623,43 @@ def get_spectrum(dataset, x=0, y=0, bin_x=1, bin_y=1):
spectrum.data_type = 'Spectrum'
return spectrum

def find_peaks(dataset, fit_start, fit_end, sensitivity=2):
def find_peaks(dataset, energy_scale): #, fit_start, fit_end, sensitivity=2):
"""find peaks in spectrum"""

peaks, prop = scipy.signal.find_peaks(dataset, width=5)
results_half = scipy.signal.peak_widths(dataset, peaks, rel_height=0.5)[0]

"""peaks = []
for index in indices:
# if start_channel < index < end_channel:
peaks.append(index) # - start_channel)
"""
disp = energy_scale[1] - energy_scale[0]
if len(peaks) > 0:
p_in = np.ravel([[energy_scale[peaks[i]], dataset[peaks[i]], results_half[i]*disp] for i in range(len(peaks))])

#p_in = np.ravel([[energy_scale[i], dataset[i], .7] for i in peaks])

return p_in # model, p_in

def nothing():
pass
"""
if dataset.data_type.name == 'SPECTRAL_IMAGE':
spectrum = dataset.view.get_spectrum()
if hasattr(dataset.view, 'get_spectrum'):
spectrum = dataset.view.get_spectrum()
else:
spectrum = np.array(dataset[0,0])

else:
spectrum = np.array(dataset)

energy_scale = dataset.get_spectral_dims(return_axis=True)[0].values

second_dif, noise_level = second_derivative(dataset, sensitivity=sensitivity)
[indices, _] = scipy.signal.find_peaks(-second_dif, noise_level)

"""


"""
start_channel = np.searchsorted(energy_scale, fit_start)
end_channel = np.searchsorted(energy_scale, fit_end)
peaks = []
Expand All @@ -1636,34 +1668,26 @@ def find_peaks(dataset, fit_start, fit_end, sensitivity=2):
peaks.append(index - start_channel)

if 'model' in dataset.metadata:
model = dataset.metadata['model'][start_channel:end_channel]
model = dataset.metadata['model']

elif energy_scale[0] > 0:
if 'edges' not in dataset.metadata:
return
if 'model' not in dataset.metadata['edges']:
return
model = dataset.metadata['edges']['model']['spectrum'][start_channel:end_channel]
model = dataset.metadata['edges']['model']['spectrum']

else:
model = np.zeros(end_channel - start_channel)
model = np.zeros(len(energy_scale))

energy_scale = energy_scale[start_channel:end_channel]

difference = np.array(spectrum)[start_channel:end_channel] - model
difference = np.array(spectrum - model)[start_channel:end_channel]
fit = np.zeros(len(energy_scale))
p_out = []
if len(peaks) > 0:
p_in = np.ravel([[energy_scale[i], difference[i], .7] for i in peaks])
[p_out, _] = scipy.optimize.leastsq(residuals_smooth, p_in, ftol=1e-3, args=(energy_scale,
difference,
False))
fit = fit + model_smooth(energy_scale, p_out, False)

peak_model = np.zeros(len(spectrum))
peak_model[start_channel:end_channel] = fit

return peak_model, p_out
"""



def find_maxima(y, number_of_peaks):
Expand Down Expand Up @@ -1691,7 +1715,17 @@ def find_maxima(y, number_of_peaks):
peak_indices = np.argsort(peaks)
return peaks[peak_indices]


@jit
def gmm(x, p):
y = np.zeros(len(x))
number_of_peaks= int(len(p)/3)
for i in range(number_of_peaks):
index = i*3
p[index + 1] = abs(p[index + 1])
# print(p[index + 1])
p[index + 2] = abs(p[index + 2])
y = y + gauss(x, p[index:])
return y
#
def model3(x, p, number_of_peaks, peak_shape, p_zl, pin=None, restrict_pos=0, restrict_width=0):
""" model for fitting low-loss spectrum"""
Expand Down Expand Up @@ -1764,18 +1798,50 @@ def add_peaks(x, y, peaks, pin_in=None, peak_shape_in=None, shape='Gaussian'):

return pin, peak_shape

@jit
def residuals3(pp, xx, yy):
err = (yy - gmm(xx, pp)) / np.sqrt(yy) *20 #
return err

def gaussian_mixture_model(dataset, p_in=None):
peak_model = None
if isinstance(dataset, sidpy.Dataset):
if dataset.data_type.name == 'SPECTRAL_IMAGE':
if hasattr(dataset.view, 'get_spectrum'):
spectrum = dataset.view.get_spectrum()
else:
spectrum = dataset[0,0]
else:
spectrum = dataset
spectrum.data_type == 'SPECTRUM'
energy_scale = dataset.get_spectral_dims(return_axis=True)[0].values
else:
spectrum = np.array(dataset)
energy_scale = np.arange(len(spectrum))
spectrum = np.array(spectrum)
spectrum -= np.min(spectrum)-1
if p_in is None:
p_in = find_peaks(spectrum, energy_scale)

p = fit_gmm(energy_scale, np.array(spectrum), p_in)
peak_model = gmm(energy_scale, p)
return peak_model, p

def fit_gmm(x, y, pin):
"""fit a Gaussian mixture model to a spectrum"""

[p, _] = leastsq(residuals3, pin, args=(x, y),maxfev = 10000)
return p


def fit_model(x, y, pin, number_of_peaks, peak_shape, p_zl, restrict_pos=0, restrict_width=0):
"""model for fitting low-loss spectrum"""

pin_original = pin.copy()

def residuals3(pp, xx, yy):
err = (yy - model3(xx, pp, number_of_peaks, peak_shape, p_zl, pin_original, restrict_pos,
restrict_width)) / np.sqrt(np.abs(yy))
return err


[p, _] = leastsq(residuals3, pin, args=(x, y))
[p, _] = scipy.optimize.leastsq(residuals3, pin, args=(x, y),maxfev = 19400)
# p2 = p.tolist()
# p3 = np.reshape(p2, (number_of_peaks, 3))
# sort_pin = np.argsort(p3[:, 0])
Expand Down Expand Up @@ -2028,4 +2094,4 @@ def get_spectrum_eels_db(formula=None, edge=None, title=None, element=None):
print(parameters['TITLE'])
print(f'found {len(reference_spectra.keys())} spectra in EELS database)')

return reference_spectra
return reference_spectra
Loading
Loading