From a36878837de63c7fd876523a8cdfda5caba97171 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Wed, 24 May 2023 19:51:44 -0400 Subject: [PATCH 01/63] generalized_opt_routine --- .../generalized_optimization.py | 134 ++++++++++++++++++ 1 file changed, 134 insertions(+) create mode 100644 hnn_core/opt_toy_example/generalized_optimization.py diff --git a/hnn_core/opt_toy_example/generalized_optimization.py b/hnn_core/opt_toy_example/generalized_optimization.py new file mode 100644 index 000000000..71e3a0ee2 --- /dev/null +++ b/hnn_core/opt_toy_example/generalized_optimization.py @@ -0,0 +1,134 @@ +# Author: + +import os.path as op +import numpy as np +import matplotlib.pyplot as plt + +import hnn_core +from hnn_core import read_params, jones_2009_model, simulate_dipole +from skopt import gp_minimize +from skopt.plots import plot_convergence, plot_objective + +from scipy.optimize import fmin_cobyla + +error_values_cobyla = list() + +def reduced_network(): + # load default params + hnn_core_root = op.dirname(hnn_core.__file__) + params_fname = op.join(hnn_core_root, 'param', 'default.json') + params = read_params(params_fname) + + # update params + params.update({'N_pyr_x': 3, 'N_pyr_y': 3}) + net_obj = jones_2009_model(params, add_drives_from_params=True) + net_obj.clear_drives() + return net_obj + +def sim_dipole(net_obj, mu): + tstop = 10. + n_trials = 1 + synaptic_delays = {'L2_basket': 0.1, + 'L2_pyramidal': 0.1, + 'L5_basket': 1., + 'L5_pyramidal': 1.} + weights_ampa = {'L2_basket': 0.5, + 'L2_pyramidal': 0.5, + 'L5_basket': 0.5, + 'L5_pyramidal': 0.5} + net_obj.add_evoked_drive('evprox1', + mu=mu, + sigma=2, + numspikes=1, + location='proximal', + weights_ampa=weights_ampa, + synaptic_delays=synaptic_delays) + dpl = simulate_dipole(net_obj, tstop=tstop, n_trials=n_trials)[0] + return dpl + +def calculate_rmse(mu_predicted): + dpl_estimate = sim_dipole(net.copy(), mu_predicted) + rmse = np.sqrt( ((dpl_estimate.data['agg'] - dpl_true.data['agg'])**2).sum() / len(dpl_estimate.times) ) + error_values_cobyla.append(rmse) + print('New mu: ', mu_predicted, ' RMSE: ', rmse) + return rmse + + +net = reduced_network() +mu_true = 2 +mu_offset = 4 +dpl_true = sim_dipole(net.copy(), mu_true) +dpl_offset = sim_dipole(net.copy(), mu_offset) +max_iter = 15 + +#----------------------------------------------Bayesian optimization------------------------------------ +bayesian_optimization_results = gp_minimize(calculate_rmse, + [(0.01, 10.)], + acq_func='EI', + n_calls=max_iter, + x0=[mu_offset], + random_state=64) + +mu_bayesian = bayesian_optimization_results.x[0] +dpl_bayesian = sim_dipole(net.copy(), mu_bayesian) + +print('Bayesian Optimization Results \n-----------------------------') +print('True mu: ', mu_true, '\nOffset mu: ', mu_offset, '\nOptimized mu: ', mu_bayesian) +# plot_convergence(bayesian_optimization_results) +error_values_bayesian = [np.min(bayesian_optimization_results.func_vals[:i]) for i in range(1, max_iter + 1)] + +#--------------------------------------------COBYLA optimization--------------------------------------- +cons = list() +x0 = list() +cons.append(lambda x, idx=0: 10. - x[0]) +cons.append(lambda x, ind=0: x[0] - 0.01) +x0.append(mu_offset) +error_values_cobyla.clear() +cobyla_optimization_results = fmin_cobyla(calculate_rmse, + cons=cons, + rhobeg=0.1, + rhoend=1e-4, + x0=x0, + maxfun=max_iter, + catol=0.0) +dpl_cobyla = sim_dipole(net.copy(), cobyla_optimization_results[0]) + +print('COBYLA Optimization Results \n---------------------------') +print('True mu: ', mu_true, '\nOffset mu: ', mu_offset, '\nOptimized mu: ', cobyla_optimization_results[0]) + +#--------------------------------------------Plot------------------------------------------------------- +x_axis = list(range(1, max_iter + 1)) +fig, axs = plt.subplots(2, 2, figsize=(10,4)) +ax1 = axs[0, 0] +ax2 = axs[0, 1] +ax3 = axs[1, 0] +ax4 = axs[1, 1] +y_max = max([max(error_values_bayesian), max(error_values_cobyla)]) + min([min(error_values_bayesian), min(error_values_cobyla)]) + +# bayesian +dpl_true.plot(ax=ax1, color='black') +dpl_offset.plot(ax=ax1, color='blue') +dpl_bayesian.plot(ax=ax1, color='orange') +ax1.set_title('Bayesian optimization') +ax1.legend(['true', 'offset', 'opt'], frameon=False) +ax3.plot(x_axis, error_values_bayesian, color='black') +ax3.set_title('Convergence') +ax3.set_xlabel('Number of calls') +ax3.set_ylabel('RMSE') +ax3.set_ylim([0, y_max]) +ax3.text(9, 0.007, 'RMSE: ' + str(round(min(error_values_bayesian), 4)), style ='italic', fontsize = 15, color = 'red') + +# COBYLA +dpl_true.plot(ax=ax2, color='black') +dpl_offset.plot(ax=ax2, color='blue') +dpl_cobyla.plot(ax=ax2, color='orange') +ax2.set_title('COBYLA optimization') +ax4.plot(x_axis, error_values_cobyla, color='black') +ax4.set_title('Convergence') +ax4.set_xlabel('Number of calls') +ax4.set_ylabel('RMSE') +ax4.set_ylim([0, y_max]) +ax4.text(9, 0.007, 'RMSE: ' + str(round(min(error_values_cobyla), 4)), style = 'italic', fontsize = 15, color = 'red') + +plt.tight_layout() + From a7a2f51b3b1534b2262f5241e24137a7a1caec62 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Thu, 1 Jun 2023 13:28:41 -0400 Subject: [PATCH 02/63] add syn weights, drives, and increase time & iter --- .../generalized_optimization.py | 193 ++++++++++++------ 1 file changed, 126 insertions(+), 67 deletions(-) diff --git a/hnn_core/opt_toy_example/generalized_optimization.py b/hnn_core/opt_toy_example/generalized_optimization.py index 71e3a0ee2..307a84ade 100644 --- a/hnn_core/opt_toy_example/generalized_optimization.py +++ b/hnn_core/opt_toy_example/generalized_optimization.py @@ -1,17 +1,18 @@ # Author: - import os.path as op import numpy as np import matplotlib.pyplot as plt +from copy import deepcopy import hnn_core from hnn_core import read_params, jones_2009_model, simulate_dipole -from skopt import gp_minimize -from skopt.plots import plot_convergence, plot_objective +# from hnn_core import JoblibBackend +from hnn_core import MPIBackend from scipy.optimize import fmin_cobyla - -error_values_cobyla = list() +from skopt import gp_minimize +# from skopt.plots import plot_convergence, plot_objective +# from hnn_core.optimization import optimize_evoked def reduced_network(): # load default params @@ -25,67 +26,116 @@ def reduced_network(): net_obj.clear_drives() return net_obj -def sim_dipole(net_obj, mu): - tstop = 10. +def sim_dipole(net_obj, params): + tstop = 100. n_trials = 1 - synaptic_delays = {'L2_basket': 0.1, + + # prox input + synaptic_delays_prox = {'L2_basket': 0.1, 'L2_pyramidal': 0.1, 'L5_basket': 1., 'L5_pyramidal': 1.} - weights_ampa = {'L2_basket': 0.5, - 'L2_pyramidal': 0.5, - 'L5_basket': 0.5, - 'L5_pyramidal': 0.5} + + weights_ampa_prox = {'L2_basket': params[2], + 'L2_pyramidal': params[3], + 'L5_basket': params[4], + 'L5_pyramidal': params[5]} + net_obj.add_evoked_drive('evprox1', - mu=mu, + mu=params[0], sigma=2, numspikes=1, location='proximal', - weights_ampa=weights_ampa, - synaptic_delays=synaptic_delays) - dpl = simulate_dipole(net_obj, tstop=tstop, n_trials=n_trials)[0] - return dpl + weights_ampa=weights_ampa_prox, + synaptic_delays=synaptic_delays_prox) + + # dist input + synaptic_delays_dist = {'L2_basket': 0.1, + 'L2_pyramidal': 0.1, + 'L5_pyramidal': 1.} -def calculate_rmse(mu_predicted): - dpl_estimate = sim_dipole(net.copy(), mu_predicted) - rmse = np.sqrt( ((dpl_estimate.data['agg'] - dpl_true.data['agg'])**2).sum() / len(dpl_estimate.times) ) + weights_ampa_dist = {'L2_basket': params[6], + 'L2_pyramidal': params[7], + 'L5_pyramidal': params[8]} + + net_obj.add_evoked_drive('evdist1', + mu=params[1], + sigma=2, + numspikes=1, + location='distal', + weights_ampa=weights_ampa_dist, + synaptic_delays=synaptic_delays_dist) + with MPIBackend(n_procs=2, mpi_cmd='mpiexec'): + dpl = simulate_dipole(net_obj, tstop=tstop, n_trials=n_trials)[0] + return dpl + +error_values_cobyla = list() +def calculate_rmse(params_predicted): + dpl_estimate = sim_dipole(net.copy(), params_predicted) + rmse = np.sqrt( ((dpl_estimate.data['agg'] - dpl_true.data['agg'])**2).sum() / len(dpl_estimate.times) ) / (max(dpl_true.data['agg']) - min(dpl_true.data['agg'])) error_values_cobyla.append(rmse) - print('New mu: ', mu_predicted, ' RMSE: ', rmse) + print('New params: ', params_predicted) + print('NRMSE: ', rmse) return rmse - +# network net = reduced_network() -mu_true = 2 -mu_offset = 4 -dpl_true = sim_dipole(net.copy(), mu_true) -dpl_offset = sim_dipole(net.copy(), mu_offset) -max_iter = 15 +# cell dynamics and synaptic weights +# params_true = {"prox_mu": [10], "dist_mu": [30], +# "prox_ampa": [0.1, 0.4, 0.2, 0.6], "dist_ampa": [0.3, 0.05, 0.7, 0.8]} +# offset parameters +# params_offset = deepcopy(params_true) +# params_offset["prox_mu"][0] += 5 +# params_offset["dist_mu"][0] -= 5 +# params_offset["prox_ampa"][0] += 0.4 +# params_offset["prox_ampa"][2] -= 0.1 +# params_offset["dist_ampa"][1] += 0.3 +# params_offset["dist_ampa"][3] -= 0.5 + +params_true = [10, 30, 0.12, 0.4, 0.2, 0.6, 0.3, 0.13, 0.8] +params_offset = [15, 25, 0.5, 0.4, 0.15, 0.6, 0.3, 0.35, 0.3] + +dpl_true = sim_dipole(net.copy(), params_true) +dpl_offset = sim_dipole(net.copy(), params_offset) + +# constraints +max_iter = 200 +# cons = [(0., 1)] * 4 +cons = [(0.0, 20.), (0.0, 50.), (0.0, 1), (0.0, 1), (0.0, 1), (0.0, 1), (0.0, 1), (0.0, 1), (0.0, 1)] #----------------------------------------------Bayesian optimization------------------------------------ -bayesian_optimization_results = gp_minimize(calculate_rmse, - [(0.01, 10.)], +with MPIBackend(n_procs=2, mpi_cmd='mpiexec'): + bayesian_optimization_results = gp_minimize(calculate_rmse, + cons, acq_func='EI', n_calls=max_iter, - x0=[mu_offset], + x0=params_offset, random_state=64) - -mu_bayesian = bayesian_optimization_results.x[0] -dpl_bayesian = sim_dipole(net.copy(), mu_bayesian) +params_bayesian = bayesian_optimization_results.x +dpl_bayesian = sim_dipole(net.copy(), params_bayesian) print('Bayesian Optimization Results \n-----------------------------') -print('True mu: ', mu_true, '\nOffset mu: ', mu_offset, '\nOptimized mu: ', mu_bayesian) +print('True params: ', params_true, '\nOffset params: ', params_offset, '\nOptimized params: ', params_bayesian) # plot_convergence(bayesian_optimization_results) error_values_bayesian = [np.min(bayesian_optimization_results.func_vals[:i]) for i in range(1, max_iter + 1)] #--------------------------------------------COBYLA optimization--------------------------------------- -cons = list() +# constraints +cons_func = list() x0 = list() -cons.append(lambda x, idx=0: 10. - x[0]) -cons.append(lambda x, ind=0: x[0] - 0.01) -x0.append(mu_offset) +cons_func.append(lambda x, idx=0: 20. - np.asarray(x[0])) +cons_func.append(lambda x, idx=0: np.asarray(x[0]) - 0.01) +cons_func.append(lambda x, idx=1: 50. - np.asarray(x[0])) +cons_func.append(lambda x, idx=1: np.asarray(x[0]) - 0.01) +for idx in range(2,9): + cons_func.append(lambda x, idx=idx: 1. - np.asarray(x[0])) + cons_func.append(lambda x, idx=idx: np.asarray(x[0]) - 0.01) +x0.append(params_offset) + error_values_cobyla.clear() -cobyla_optimization_results = fmin_cobyla(calculate_rmse, - cons=cons, +with MPIBackend(n_procs=2, mpi_cmd='mpiexec'): + cobyla_optimization_results = fmin_cobyla(calculate_rmse, + cons=cons_func, rhobeg=0.1, rhoend=1e-4, x0=x0, @@ -94,41 +144,50 @@ def calculate_rmse(mu_predicted): dpl_cobyla = sim_dipole(net.copy(), cobyla_optimization_results[0]) print('COBYLA Optimization Results \n---------------------------') -print('True mu: ', mu_true, '\nOffset mu: ', mu_offset, '\nOptimized mu: ', cobyla_optimization_results[0]) +print('True params: ', params_true, '\nOffset params: ', params_offset, '\nOptimized params: ', cobyla_optimization_results[0]) + +#--------------------------------------------Current optimization--------------------------------------- +# with MPIBackend(n_procs=2): +# net_opt = optimize_evoked(net, tstop=100, n_trials=1, +# target_dpl=dpl_true, initial_dpl=dpl_offset) +# with MPIBackend(n_procs=2): +# best_dpl = sim_dipole(net_opt, tstop=100, n_trials=1)[0] #--------------------------------------------Plot------------------------------------------------------- + +# fig +fig, axs = plt.subplot_mosaic([['left', 'right'],['bottom', 'bottom']], constrained_layout=False) +ax1 = axs['left'] +ax2 = axs['right'] +ax3 = axs['bottom'] x_axis = list(range(1, max_iter + 1)) -fig, axs = plt.subplots(2, 2, figsize=(10,4)) -ax1 = axs[0, 0] -ax2 = axs[0, 1] -ax3 = axs[1, 0] -ax4 = axs[1, 1] y_max = max([max(error_values_bayesian), max(error_values_cobyla)]) + min([min(error_values_bayesian), min(error_values_cobyla)]) # bayesian -dpl_true.plot(ax=ax1, color='black') -dpl_offset.plot(ax=ax1, color='blue') -dpl_bayesian.plot(ax=ax1, color='orange') +ax1.plot(dpl_true.times, dpl_true.data['agg'], color = 'black') +ax1.plot(dpl_offset.times, dpl_offset.data['agg'], color = 'tab:blue') +ax1.plot(dpl_bayesian.times, dpl_bayesian.data['agg'], color = 'orange') ax1.set_title('Bayesian optimization') -ax1.legend(['true', 'offset', 'opt'], frameon=False) -ax3.plot(x_axis, error_values_bayesian, color='black') -ax3.set_title('Convergence') -ax3.set_xlabel('Number of calls') -ax3.set_ylabel('RMSE') -ax3.set_ylim([0, y_max]) -ax3.text(9, 0.007, 'RMSE: ' + str(round(min(error_values_bayesian), 4)), style ='italic', fontsize = 15, color = 'red') +ax1.set_ylabel('Dipole moment (nAm)') +ax1.legend(['true', 'offset', 'opt'], frameon=False, loc = 'upper right') # COBYLA -dpl_true.plot(ax=ax2, color='black') -dpl_offset.plot(ax=ax2, color='blue') -dpl_cobyla.plot(ax=ax2, color='orange') +ax2.plot(dpl_true.times, dpl_true.data['agg'], color = 'black') +ax2.plot(dpl_offset.times, dpl_offset.data['agg'], color = 'tab:blue') +ax2.plot(dpl_cobyla.times, dpl_cobyla.data['agg'], color = 'orange') ax2.set_title('COBYLA optimization') -ax4.plot(x_axis, error_values_cobyla, color='black') -ax4.set_title('Convergence') -ax4.set_xlabel('Number of calls') -ax4.set_ylabel('RMSE') -ax4.set_ylim([0, y_max]) -ax4.text(9, 0.007, 'RMSE: ' + str(round(min(error_values_cobyla), 4)), style = 'italic', fontsize = 15, color = 'red') - -plt.tight_layout() +ax2.legend(['true', 'offset', 'opt'], frameon=False, loc = 'upper right') + +# convergence +ax3.plot(x_axis, error_values_bayesian, color='tab:green') +ax3.plot(error_values_cobyla, color='tab:purple') +# ax3.plot(len(error_values_cobyla), 0, marker = 'o', markersize = 5, markeredgecolor = 'tab:purple', markerfacecolor = 'tab:purple') +ax3.text(125, 0.1, 'BAYESIAN: ' + str(round(min(error_values_bayesian), 4)), style ='italic', fontsize = 15, color = 'tab:green') +ax3.text(125, 0.15, 'COBYLA: ' + str(round(min(error_values_cobyla), 4)), style = 'italic', fontsize = 15, color = 'tab:purple') +ax3.set_ylim([-.01, y_max]) +ax3.set_title('Convergence') +ax3.set_xlabel('Number of calls') +ax3.set_ylabel('NRMSE') +ax3.grid(visible = True) +fig.savefig('/Users/suanypujol/Desktop/development/fig/opt.svg') From 88ac514b0d953f55d2d4023b61e3a81860f73fe3 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Thu, 8 Jun 2023 13:39:44 -0400 Subject: [PATCH 03/63] Draft basic Optimizer class --- hnn_core/opt_toy_example/Optimizer.py | 321 ++++++++++++++++++ .../generalized_optimization.py | 193 ----------- 2 files changed, 321 insertions(+), 193 deletions(-) create mode 100644 hnn_core/opt_toy_example/Optimizer.py delete mode 100644 hnn_core/opt_toy_example/generalized_optimization.py diff --git a/hnn_core/opt_toy_example/Optimizer.py b/hnn_core/opt_toy_example/Optimizer.py new file mode 100644 index 000000000..5dd52458a --- /dev/null +++ b/hnn_core/opt_toy_example/Optimizer.py @@ -0,0 +1,321 @@ +import os.path as op +import numpy as np +import matplotlib.pyplot as plt +# import copy.deepcopy as dp + +import hnn_core +from hnn_core import read_params, jones_2009_model, simulate_dipole +from hnn_core.network import pick_connection +from hnn_core import MPIBackend + +from scipy.optimize import fmin_cobyla +from skopt import gp_minimize +from skopt.utils import use_named_args +from skopt.space import Real + +class Optimizer: + + def __init__(self, net, target_dpl, initial_dpl, constraints, res_type, optimizer_type): + self.net = net + self.target_dpl = target_dpl + self.initial_dpl = initial_dpl + self.constraints = constraints + self.res_type = res_type + self.optimizer_type = optimizer_type + self.predicted_params = dict() + self.error_values = list() + self.b_final_iter = False + self.max_iter = 200 + + def _get_drive_params(self, net, drive_names): + """Get drive parameters (dynamics, weights, and static) from a Network instance.""" + + drive_dynamics = list() # [{'mu': x, 'sigma': x, 'numspikes': x}] + drive_syn_weights_ampa = list() # [{'L2_pyramidal': x, 'L2_basket': x, 'L5_pyramidal': x}] + drive_syn_weights_nmda = list() + drive_static_params = dict() + # {'evprox1': {'numspikes': 1, + # 'location': 'proximal', + # 'n_drive_cells': 'n_cells', + # 'cell_specific': True, + # 'space_constant': {'L2_pyramidal': x, + # 'L2_basket': x, + # 'L5_basket': x, + # 'L5_pyramidal': x}, + # 'synaptic_delays': {'L5_basket': x, + # 'L5_pyramidal': x, + # 'L2_basket': x, + # 'L2_pyramidal': x}, + # 'probability': {'L5_basket': 1.0, + # 'L5_pyramidal': 1.0, + # 'L2_basket': 1.0, + # 'L2_pyramidal': 1.0}, + # 'event_seed': 2, + # 'conn_seed': 3}} + + for drive_name in drive_names: + drive = net.external_drives[drive_name] + connection_idxs = pick_connection(net, src_gids = drive_name) # connection (the prox input targets 4 sources [16, 17, 18, 19]) + + weights_ampa = dict() + weights_nmda = dict() + delays = dict() + probabilities = dict() + static_params = dict() + + for connection_idx in connection_idxs: + # source is drive + target_type = net.connectivity[connection_idx]['target_type'] + target_receptor = net.connectivity[connection_idx]['receptor'] + connection_weight = net.connectivity[connection_idx]['nc_dict']['A_weight'] # A_delay, A_weight, lamtha, threshold + delay = net.connectivity[connection_idx]['nc_dict']['A_delay'] + space_constant = net.connectivity[connection_idx]['nc_dict']['lamtha'] + probability = net.connectivity[connection_idx]['probability'] + + # add weight + if target_receptor == 'ampa': + weights_ampa.update({f'{target_type}': connection_weight}) + elif target_receptor == 'nmda': + weights_nmda.update({f'{target_type}': connection_weight}) + # add delay + delays.update({f'{target_type}': delay}) + # add probability + probabilities.update({f'{target_type}': probability}) + + drive_dynamics.append(drive['dynamics'].copy()) # dynamics + drive_syn_weights_ampa.append(weights_ampa) # synaptic weights + drive_syn_weights_nmda.append(weights_nmda) + + # static params + static_params.update({'numspikes': drive['dynamics']['numspikes']}) + static_params.update({'location': drive['location']}) + # True : each artificial drive cell has 1-to-1 connection parameters (number of drive cells = number of all available cells that this drive can target) + # False : each artificial drive cell has all-to-all (synchronous input) + if drive['cell_specific']: + static_params.update({'n_drive_cells': 'n_cells'}) + else: + static_params.update({'n_drive_cells': drive['n_drive_cells']}) + static_params.update({'cell_specific': drive['cell_specific']}) + static_params.update({'space_constant': space_constant}) + static_params.update({'synaptic_delays': delays}) + static_params.update({'probability': probabilities}) + static_params.update({'event_seed': drive['event_seed']}) + static_params.update({'conn_seed': drive['conn_seed']}) + drive_static_params.update({drive_name: static_params}) + + return drive_dynamics, drive_syn_weights_ampa, drive_syn_weights_nmda, drive_static_params + + def _assemble_predicted_params(self, drive_names, drive_syn_weights_ampa, drive_syn_weights_nmda, **params): + """Assembles constraints. + + Returns + ------- + predicted_params : a dictionary () + {'evprox1': {'mu': x, + 'sigma': x, + 'ampa_weights': {'L2_pyramidal': x, + 'L2_basket': x, + 'L5_basket': x, + 'L5_pyramidal': x}, + 'nmda_weights': {'L2_pyramidal': x, + 'L2_basket': x, + 'L5_basket': x, + 'L5_pyramidal': x} + } + + """ + + self.predicted_params = dict() + _params = dict() + + for drive_idx, drive_name in enumerate(drive_names): + _params.update({'mu': params[f'{drive_name}_mu']}) + _params.update({'sigma': params[f'{drive_name}_sigma']}) + + ampa_weights = dict() + nmda_weights = dict() + for target_key in drive_syn_weights_ampa[drive_idx]: # [{'L2_pyramidal': x, 'L2_basket': x, 'L5_pyramidal': x}] + ampa_weights.update({target_key: params[f'{drive_name}_ampa_{target_key}']}) + for target_key in drive_syn_weights_nmda[drive_idx]: + nmda_weights.update({target_key: params[f'{drive_name}_nmda_{target_key}']}) + + _params.update({'ampa_weights': ampa_weights}) + _params.update({'nmda_weights': nmda_weights}) + self.predicted_params.update({drive_name: _params}) + + return self.predicted_params + + def _simulate_dipole(self, **params): + """This function runs a simulation.""" + tstop = 100. + n_trials = 1 + + net = self.net.copy() + + # get current dynamics, weights, and static params + drive_names = [key for key in net.external_drives.keys() if net.external_drives[key]['type'] == 'evoked'] + drive_dynamics, drive_syn_weights_ampa, drive_syn_weights_nmda, drive_static_params = self._get_drive_params(net, drive_names) + + if self.b_final_iter: # final params + predicted_params = params + else: + # assemble predicted params + predicted_params = self._assemble_predicted_params(drive_names, drive_syn_weights_ampa, drive_syn_weights_nmda, **params) + + # remove existing drives, re-add them with updated params + for idx, drive_name in enumerate(drive_names): + # clear drive + del net.external_drives[drive_name] + # clear connectivity + conn_idxs = pick_connection(net, src_gids = drive_name) + net.connectivity = [conn for conn_idx, conn in enumerate(net.connectivity) if conn_idx not in conn_idxs] + + net.add_evoked_drive(name = drive_name, + mu = predicted_params[drive_name]['mu'], + sigma = predicted_params[drive_name]['sigma'], + numspikes = drive_dynamics[idx]['numspikes'], + location = drive_static_params[drive_name]['location'], + n_drive_cells = drive_static_params[drive_name]['n_drive_cells'], + cell_specific = drive_static_params[drive_name]['cell_specific'], + weights_ampa = predicted_params[drive_name]['ampa_weights'], + weights_nmda = predicted_params[drive_name]['nmda_weights'], + space_constant = drive_static_params[drive_name]['space_constant'], + synaptic_delays = drive_static_params[drive_name]['synaptic_delays'], + probability = drive_static_params[drive_name]['probability'], + event_seed = drive_static_params[drive_name]['event_seed'], + conn_seed = drive_static_params[drive_name]['conn_seed']) + + dpl = simulate_dipole(net, tstop=tstop, n_trials=n_trials)[0] + return dpl + + def _objective_function(self, **params): + """Objective function depends on response being optimized, whether evoked or rhythmic.""" + + error_values = list() + + # Normalized RMSE + if self.res_type == 'evoked': + + dpl_estimate = self._simulate_dipole(**params) + nrmse = np.sqrt( ((dpl_estimate.data['agg'] - self.target_dpl.data['agg'])**2).sum() / len(dpl_estimate.times) ) / (max(self.target_dpl.data['agg']) - min(self.target_dpl.data['agg'])) + error_values.append(nrmse) + return nrmse + + # ... + elif self.res_type == 'rhythmic': + return + + def _assemble_inputs(self, net): + """Assembles constraints & initial parameters for appropriate optimizer. + + Retruns + ------- + constraints : a (lower_bound, upper_bound, "prior") tuple (if Bayesian Optimizer) + + constraints = [Real(name='evprox1_mu', low=5, high=10), + Real(name='evprox1_sigma', low=0.01, high=1), + Real(name='evprox1_nmda_weights_target', low=0.01, high=1)] + + + initial_params : a list of single initial input points (if Bayesian Optimizer) + + initial_params = [mu, sigma, ampa, nmda, ...] + """ + + cons = list() + initial_params = list() + + # get drive names + drive_names = [key for key in net.external_drives.keys() if net.external_drives[key]['type'] == 'evoked'] # ... rhythmic + # get current drive params + drive_dynamics, drive_syn_weights_ampa, drive_syn_weights_nmda, drive_static_params = self._get_drive_params(net, drive_names) + + #--------------------------------------------------Bayesian--------------------------------------------------- + if self.optimizer_type == 'bayesian': + raw_constraints = self.constraints # {'evprox1': {'mu': [5, 10], 'sigma': [1, 5], 'weights': [0.01, 1]}} + + for drive_idx, drive_name in enumerate(drive_names): + for key in raw_constraints[drive_name]: # key is 'mu', 'sigma', 'weights' (in order) + + # add a cons for each target type + if key == 'weights': + for target_key in drive_syn_weights_ampa[drive_idx]: # [{'L2_pyramidal': x, 'L2_basket': x, 'L5_pyramidal': x}] + cons.append(Real(name=f'{drive_name}_ampa_{target_key}', low = raw_constraints[drive_name][key][0], high = raw_constraints[drive_name][key][1])) + initial_params.append(drive_syn_weights_ampa[drive_idx][target_key]) + + for target_key in drive_syn_weights_nmda[drive_idx]: + cons.append(Real(name=f'{drive_name}_nmda_{target_key}', low = raw_constraints[drive_name][key][0], high = raw_constraints[drive_name][key][1])) + initial_params.append(drive_syn_weights_nmda[drive_idx][target_key]) + else: + cons.append(Real(name=f'{drive_name}_{key}', low = raw_constraints[drive_name][key][0], high = raw_constraints[drive_name][key][1])) + initial_params.append(drive_dynamics[drive_idx][key]) + + return cons, initial_params + + #--------------------------------------------------COBYLA--------------------------------------------------- + elif self.optimizer_type == 'cobyla': + return + + def optimize_response(self): + """Optimize drives to generate evoked or rhythmic response. + + Parameters + ---------- + constraints : {'evprox1': {'mu': [min, max], 'sigma': [min, max], 'weights': [min, max]}} + """ + + net = self.net.copy() + optimization_results = list() + # assemble constraints & initial_params + cons, initial_params = self._assemble_inputs(net) + + #--------------------------------------------------Bayesian--------------------------------------------------- + if self.optimizer_type == 'bayesian': + + @use_named_args(dimensions = cons) + def _obj_func(**params): + return self._objective_function(**params) + + optimization_results = gp_minimize(func = _obj_func, + dimensions = cons, + acq_func = 'EI', + n_calls = self.max_iter, + x0 = initial_params, + random_state = 64) + # params_opt = optimization_results.x + self.b_final_iter = True + self.opt_dpl = self._simulate_dipole(**self.predicted_params) + self.error_values = [np.min(optimization_results.func_vals[:i]) for i in range(1, self.max_iter + 1)] + return self.predicted_params, self.opt_dpl, self.error_values + + #--------------------------------------------------COBYLA--------------------------------------------------- + elif self.optimizer_type == 'cobyla': + return + + def plot_results(self): + """Plots target, initial, and optimized dipoles. Plots convergence.""" + # fig + fig, axs = plt.subplot_mosaic([['left'],['right']], constrained_layout = True, figsize = (8, 5)) + ax1 = axs['left'] + ax2 = axs['right'] + x_axis = list(range(1, self.max_iter + 1)) + y_max = max(self.error_values) + 0.01 + + # bayesian + ax1.plot(self.target_dpl.times, self.target_dpl.data['agg'], color = 'black') + ax1.plot(self.initial_dpl.times, self.initial_dpl.data['agg'], color = 'tab:blue') + ax1.plot(self.opt_dpl.times, self.opt_dpl.data['agg'], color = 'orange') + ax1.set_title('Dipoles') + ax1.set_ylabel('Dipole moment (nAm)') + ax1.legend(['true', 'offset', 'opt'], frameon=False, loc = 'upper right') + + # convergence + ax2.plot(x_axis, self.error_values, color='tab:green') + ax2.set_ylim([-.01, y_max]) + ax2.set_title('Convergence') + ax2.set_xlabel('Number of calls') + ax2.set_ylabel('NRMSE') + ax2.grid(visible = True) + + fig.savefig('opt_bayesian.svg') + diff --git a/hnn_core/opt_toy_example/generalized_optimization.py b/hnn_core/opt_toy_example/generalized_optimization.py deleted file mode 100644 index 307a84ade..000000000 --- a/hnn_core/opt_toy_example/generalized_optimization.py +++ /dev/null @@ -1,193 +0,0 @@ -# Author: -import os.path as op -import numpy as np -import matplotlib.pyplot as plt -from copy import deepcopy - -import hnn_core -from hnn_core import read_params, jones_2009_model, simulate_dipole -# from hnn_core import JoblibBackend -from hnn_core import MPIBackend - -from scipy.optimize import fmin_cobyla -from skopt import gp_minimize -# from skopt.plots import plot_convergence, plot_objective -# from hnn_core.optimization import optimize_evoked - -def reduced_network(): - # load default params - hnn_core_root = op.dirname(hnn_core.__file__) - params_fname = op.join(hnn_core_root, 'param', 'default.json') - params = read_params(params_fname) - - # update params - params.update({'N_pyr_x': 3, 'N_pyr_y': 3}) - net_obj = jones_2009_model(params, add_drives_from_params=True) - net_obj.clear_drives() - return net_obj - -def sim_dipole(net_obj, params): - tstop = 100. - n_trials = 1 - - # prox input - synaptic_delays_prox = {'L2_basket': 0.1, - 'L2_pyramidal': 0.1, - 'L5_basket': 1., - 'L5_pyramidal': 1.} - - weights_ampa_prox = {'L2_basket': params[2], - 'L2_pyramidal': params[3], - 'L5_basket': params[4], - 'L5_pyramidal': params[5]} - - net_obj.add_evoked_drive('evprox1', - mu=params[0], - sigma=2, - numspikes=1, - location='proximal', - weights_ampa=weights_ampa_prox, - synaptic_delays=synaptic_delays_prox) - - # dist input - synaptic_delays_dist = {'L2_basket': 0.1, - 'L2_pyramidal': 0.1, - 'L5_pyramidal': 1.} - - weights_ampa_dist = {'L2_basket': params[6], - 'L2_pyramidal': params[7], - 'L5_pyramidal': params[8]} - - net_obj.add_evoked_drive('evdist1', - mu=params[1], - sigma=2, - numspikes=1, - location='distal', - weights_ampa=weights_ampa_dist, - synaptic_delays=synaptic_delays_dist) - with MPIBackend(n_procs=2, mpi_cmd='mpiexec'): - dpl = simulate_dipole(net_obj, tstop=tstop, n_trials=n_trials)[0] - return dpl - -error_values_cobyla = list() -def calculate_rmse(params_predicted): - dpl_estimate = sim_dipole(net.copy(), params_predicted) - rmse = np.sqrt( ((dpl_estimate.data['agg'] - dpl_true.data['agg'])**2).sum() / len(dpl_estimate.times) ) / (max(dpl_true.data['agg']) - min(dpl_true.data['agg'])) - error_values_cobyla.append(rmse) - print('New params: ', params_predicted) - print('NRMSE: ', rmse) - return rmse - -# network -net = reduced_network() - -# cell dynamics and synaptic weights -# params_true = {"prox_mu": [10], "dist_mu": [30], -# "prox_ampa": [0.1, 0.4, 0.2, 0.6], "dist_ampa": [0.3, 0.05, 0.7, 0.8]} -# offset parameters -# params_offset = deepcopy(params_true) -# params_offset["prox_mu"][0] += 5 -# params_offset["dist_mu"][0] -= 5 -# params_offset["prox_ampa"][0] += 0.4 -# params_offset["prox_ampa"][2] -= 0.1 -# params_offset["dist_ampa"][1] += 0.3 -# params_offset["dist_ampa"][3] -= 0.5 - -params_true = [10, 30, 0.12, 0.4, 0.2, 0.6, 0.3, 0.13, 0.8] -params_offset = [15, 25, 0.5, 0.4, 0.15, 0.6, 0.3, 0.35, 0.3] - -dpl_true = sim_dipole(net.copy(), params_true) -dpl_offset = sim_dipole(net.copy(), params_offset) - -# constraints -max_iter = 200 -# cons = [(0., 1)] * 4 -cons = [(0.0, 20.), (0.0, 50.), (0.0, 1), (0.0, 1), (0.0, 1), (0.0, 1), (0.0, 1), (0.0, 1), (0.0, 1)] -#----------------------------------------------Bayesian optimization------------------------------------ -with MPIBackend(n_procs=2, mpi_cmd='mpiexec'): - bayesian_optimization_results = gp_minimize(calculate_rmse, - cons, - acq_func='EI', - n_calls=max_iter, - x0=params_offset, - random_state=64) -params_bayesian = bayesian_optimization_results.x -dpl_bayesian = sim_dipole(net.copy(), params_bayesian) - -print('Bayesian Optimization Results \n-----------------------------') -print('True params: ', params_true, '\nOffset params: ', params_offset, '\nOptimized params: ', params_bayesian) -# plot_convergence(bayesian_optimization_results) -error_values_bayesian = [np.min(bayesian_optimization_results.func_vals[:i]) for i in range(1, max_iter + 1)] - -#--------------------------------------------COBYLA optimization--------------------------------------- -# constraints -cons_func = list() -x0 = list() -cons_func.append(lambda x, idx=0: 20. - np.asarray(x[0])) -cons_func.append(lambda x, idx=0: np.asarray(x[0]) - 0.01) -cons_func.append(lambda x, idx=1: 50. - np.asarray(x[0])) -cons_func.append(lambda x, idx=1: np.asarray(x[0]) - 0.01) -for idx in range(2,9): - cons_func.append(lambda x, idx=idx: 1. - np.asarray(x[0])) - cons_func.append(lambda x, idx=idx: np.asarray(x[0]) - 0.01) -x0.append(params_offset) - -error_values_cobyla.clear() -with MPIBackend(n_procs=2, mpi_cmd='mpiexec'): - cobyla_optimization_results = fmin_cobyla(calculate_rmse, - cons=cons_func, - rhobeg=0.1, - rhoend=1e-4, - x0=x0, - maxfun=max_iter, - catol=0.0) -dpl_cobyla = sim_dipole(net.copy(), cobyla_optimization_results[0]) - -print('COBYLA Optimization Results \n---------------------------') -print('True params: ', params_true, '\nOffset params: ', params_offset, '\nOptimized params: ', cobyla_optimization_results[0]) - -#--------------------------------------------Current optimization--------------------------------------- -# with MPIBackend(n_procs=2): -# net_opt = optimize_evoked(net, tstop=100, n_trials=1, -# target_dpl=dpl_true, initial_dpl=dpl_offset) -# with MPIBackend(n_procs=2): -# best_dpl = sim_dipole(net_opt, tstop=100, n_trials=1)[0] - -#--------------------------------------------Plot------------------------------------------------------- - -# fig -fig, axs = plt.subplot_mosaic([['left', 'right'],['bottom', 'bottom']], constrained_layout=False) -ax1 = axs['left'] -ax2 = axs['right'] -ax3 = axs['bottom'] -x_axis = list(range(1, max_iter + 1)) -y_max = max([max(error_values_bayesian), max(error_values_cobyla)]) + min([min(error_values_bayesian), min(error_values_cobyla)]) - -# bayesian -ax1.plot(dpl_true.times, dpl_true.data['agg'], color = 'black') -ax1.plot(dpl_offset.times, dpl_offset.data['agg'], color = 'tab:blue') -ax1.plot(dpl_bayesian.times, dpl_bayesian.data['agg'], color = 'orange') -ax1.set_title('Bayesian optimization') -ax1.set_ylabel('Dipole moment (nAm)') -ax1.legend(['true', 'offset', 'opt'], frameon=False, loc = 'upper right') - -# COBYLA -ax2.plot(dpl_true.times, dpl_true.data['agg'], color = 'black') -ax2.plot(dpl_offset.times, dpl_offset.data['agg'], color = 'tab:blue') -ax2.plot(dpl_cobyla.times, dpl_cobyla.data['agg'], color = 'orange') -ax2.set_title('COBYLA optimization') -ax2.legend(['true', 'offset', 'opt'], frameon=False, loc = 'upper right') - -# convergence -ax3.plot(x_axis, error_values_bayesian, color='tab:green') -ax3.plot(error_values_cobyla, color='tab:purple') -# ax3.plot(len(error_values_cobyla), 0, marker = 'o', markersize = 5, markeredgecolor = 'tab:purple', markerfacecolor = 'tab:purple') -ax3.text(125, 0.1, 'BAYESIAN: ' + str(round(min(error_values_bayesian), 4)), style ='italic', fontsize = 15, color = 'tab:green') -ax3.text(125, 0.15, 'COBYLA: ' + str(round(min(error_values_cobyla), 4)), style = 'italic', fontsize = 15, color = 'tab:purple') -ax3.set_ylim([-.01, y_max]) -ax3.set_title('Convergence') -ax3.set_xlabel('Number of calls') -ax3.set_ylabel('NRMSE') -ax3.grid(visible = True) - -fig.savefig('/Users/suanypujol/Desktop/development/fig/opt.svg') From 203e95ce80c50abeaf0ecf9ea1c44266f3b88262 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Thu, 15 Jun 2023 18:24:57 +0200 Subject: [PATCH 04/63] Draft opt class and functions based on comments --- hnn_core/opt_toy_example/Optimizer.py | 321 ------------------------ hnn_core/opt_toy_example/general.py | 344 ++++++++++++++++++++++++++ 2 files changed, 344 insertions(+), 321 deletions(-) delete mode 100644 hnn_core/opt_toy_example/Optimizer.py create mode 100644 hnn_core/opt_toy_example/general.py diff --git a/hnn_core/opt_toy_example/Optimizer.py b/hnn_core/opt_toy_example/Optimizer.py deleted file mode 100644 index 5dd52458a..000000000 --- a/hnn_core/opt_toy_example/Optimizer.py +++ /dev/null @@ -1,321 +0,0 @@ -import os.path as op -import numpy as np -import matplotlib.pyplot as plt -# import copy.deepcopy as dp - -import hnn_core -from hnn_core import read_params, jones_2009_model, simulate_dipole -from hnn_core.network import pick_connection -from hnn_core import MPIBackend - -from scipy.optimize import fmin_cobyla -from skopt import gp_minimize -from skopt.utils import use_named_args -from skopt.space import Real - -class Optimizer: - - def __init__(self, net, target_dpl, initial_dpl, constraints, res_type, optimizer_type): - self.net = net - self.target_dpl = target_dpl - self.initial_dpl = initial_dpl - self.constraints = constraints - self.res_type = res_type - self.optimizer_type = optimizer_type - self.predicted_params = dict() - self.error_values = list() - self.b_final_iter = False - self.max_iter = 200 - - def _get_drive_params(self, net, drive_names): - """Get drive parameters (dynamics, weights, and static) from a Network instance.""" - - drive_dynamics = list() # [{'mu': x, 'sigma': x, 'numspikes': x}] - drive_syn_weights_ampa = list() # [{'L2_pyramidal': x, 'L2_basket': x, 'L5_pyramidal': x}] - drive_syn_weights_nmda = list() - drive_static_params = dict() - # {'evprox1': {'numspikes': 1, - # 'location': 'proximal', - # 'n_drive_cells': 'n_cells', - # 'cell_specific': True, - # 'space_constant': {'L2_pyramidal': x, - # 'L2_basket': x, - # 'L5_basket': x, - # 'L5_pyramidal': x}, - # 'synaptic_delays': {'L5_basket': x, - # 'L5_pyramidal': x, - # 'L2_basket': x, - # 'L2_pyramidal': x}, - # 'probability': {'L5_basket': 1.0, - # 'L5_pyramidal': 1.0, - # 'L2_basket': 1.0, - # 'L2_pyramidal': 1.0}, - # 'event_seed': 2, - # 'conn_seed': 3}} - - for drive_name in drive_names: - drive = net.external_drives[drive_name] - connection_idxs = pick_connection(net, src_gids = drive_name) # connection (the prox input targets 4 sources [16, 17, 18, 19]) - - weights_ampa = dict() - weights_nmda = dict() - delays = dict() - probabilities = dict() - static_params = dict() - - for connection_idx in connection_idxs: - # source is drive - target_type = net.connectivity[connection_idx]['target_type'] - target_receptor = net.connectivity[connection_idx]['receptor'] - connection_weight = net.connectivity[connection_idx]['nc_dict']['A_weight'] # A_delay, A_weight, lamtha, threshold - delay = net.connectivity[connection_idx]['nc_dict']['A_delay'] - space_constant = net.connectivity[connection_idx]['nc_dict']['lamtha'] - probability = net.connectivity[connection_idx]['probability'] - - # add weight - if target_receptor == 'ampa': - weights_ampa.update({f'{target_type}': connection_weight}) - elif target_receptor == 'nmda': - weights_nmda.update({f'{target_type}': connection_weight}) - # add delay - delays.update({f'{target_type}': delay}) - # add probability - probabilities.update({f'{target_type}': probability}) - - drive_dynamics.append(drive['dynamics'].copy()) # dynamics - drive_syn_weights_ampa.append(weights_ampa) # synaptic weights - drive_syn_weights_nmda.append(weights_nmda) - - # static params - static_params.update({'numspikes': drive['dynamics']['numspikes']}) - static_params.update({'location': drive['location']}) - # True : each artificial drive cell has 1-to-1 connection parameters (number of drive cells = number of all available cells that this drive can target) - # False : each artificial drive cell has all-to-all (synchronous input) - if drive['cell_specific']: - static_params.update({'n_drive_cells': 'n_cells'}) - else: - static_params.update({'n_drive_cells': drive['n_drive_cells']}) - static_params.update({'cell_specific': drive['cell_specific']}) - static_params.update({'space_constant': space_constant}) - static_params.update({'synaptic_delays': delays}) - static_params.update({'probability': probabilities}) - static_params.update({'event_seed': drive['event_seed']}) - static_params.update({'conn_seed': drive['conn_seed']}) - drive_static_params.update({drive_name: static_params}) - - return drive_dynamics, drive_syn_weights_ampa, drive_syn_weights_nmda, drive_static_params - - def _assemble_predicted_params(self, drive_names, drive_syn_weights_ampa, drive_syn_weights_nmda, **params): - """Assembles constraints. - - Returns - ------- - predicted_params : a dictionary () - {'evprox1': {'mu': x, - 'sigma': x, - 'ampa_weights': {'L2_pyramidal': x, - 'L2_basket': x, - 'L5_basket': x, - 'L5_pyramidal': x}, - 'nmda_weights': {'L2_pyramidal': x, - 'L2_basket': x, - 'L5_basket': x, - 'L5_pyramidal': x} - } - - """ - - self.predicted_params = dict() - _params = dict() - - for drive_idx, drive_name in enumerate(drive_names): - _params.update({'mu': params[f'{drive_name}_mu']}) - _params.update({'sigma': params[f'{drive_name}_sigma']}) - - ampa_weights = dict() - nmda_weights = dict() - for target_key in drive_syn_weights_ampa[drive_idx]: # [{'L2_pyramidal': x, 'L2_basket': x, 'L5_pyramidal': x}] - ampa_weights.update({target_key: params[f'{drive_name}_ampa_{target_key}']}) - for target_key in drive_syn_weights_nmda[drive_idx]: - nmda_weights.update({target_key: params[f'{drive_name}_nmda_{target_key}']}) - - _params.update({'ampa_weights': ampa_weights}) - _params.update({'nmda_weights': nmda_weights}) - self.predicted_params.update({drive_name: _params}) - - return self.predicted_params - - def _simulate_dipole(self, **params): - """This function runs a simulation.""" - tstop = 100. - n_trials = 1 - - net = self.net.copy() - - # get current dynamics, weights, and static params - drive_names = [key for key in net.external_drives.keys() if net.external_drives[key]['type'] == 'evoked'] - drive_dynamics, drive_syn_weights_ampa, drive_syn_weights_nmda, drive_static_params = self._get_drive_params(net, drive_names) - - if self.b_final_iter: # final params - predicted_params = params - else: - # assemble predicted params - predicted_params = self._assemble_predicted_params(drive_names, drive_syn_weights_ampa, drive_syn_weights_nmda, **params) - - # remove existing drives, re-add them with updated params - for idx, drive_name in enumerate(drive_names): - # clear drive - del net.external_drives[drive_name] - # clear connectivity - conn_idxs = pick_connection(net, src_gids = drive_name) - net.connectivity = [conn for conn_idx, conn in enumerate(net.connectivity) if conn_idx not in conn_idxs] - - net.add_evoked_drive(name = drive_name, - mu = predicted_params[drive_name]['mu'], - sigma = predicted_params[drive_name]['sigma'], - numspikes = drive_dynamics[idx]['numspikes'], - location = drive_static_params[drive_name]['location'], - n_drive_cells = drive_static_params[drive_name]['n_drive_cells'], - cell_specific = drive_static_params[drive_name]['cell_specific'], - weights_ampa = predicted_params[drive_name]['ampa_weights'], - weights_nmda = predicted_params[drive_name]['nmda_weights'], - space_constant = drive_static_params[drive_name]['space_constant'], - synaptic_delays = drive_static_params[drive_name]['synaptic_delays'], - probability = drive_static_params[drive_name]['probability'], - event_seed = drive_static_params[drive_name]['event_seed'], - conn_seed = drive_static_params[drive_name]['conn_seed']) - - dpl = simulate_dipole(net, tstop=tstop, n_trials=n_trials)[0] - return dpl - - def _objective_function(self, **params): - """Objective function depends on response being optimized, whether evoked or rhythmic.""" - - error_values = list() - - # Normalized RMSE - if self.res_type == 'evoked': - - dpl_estimate = self._simulate_dipole(**params) - nrmse = np.sqrt( ((dpl_estimate.data['agg'] - self.target_dpl.data['agg'])**2).sum() / len(dpl_estimate.times) ) / (max(self.target_dpl.data['agg']) - min(self.target_dpl.data['agg'])) - error_values.append(nrmse) - return nrmse - - # ... - elif self.res_type == 'rhythmic': - return - - def _assemble_inputs(self, net): - """Assembles constraints & initial parameters for appropriate optimizer. - - Retruns - ------- - constraints : a (lower_bound, upper_bound, "prior") tuple (if Bayesian Optimizer) - - constraints = [Real(name='evprox1_mu', low=5, high=10), - Real(name='evprox1_sigma', low=0.01, high=1), - Real(name='evprox1_nmda_weights_target', low=0.01, high=1)] - - - initial_params : a list of single initial input points (if Bayesian Optimizer) - - initial_params = [mu, sigma, ampa, nmda, ...] - """ - - cons = list() - initial_params = list() - - # get drive names - drive_names = [key for key in net.external_drives.keys() if net.external_drives[key]['type'] == 'evoked'] # ... rhythmic - # get current drive params - drive_dynamics, drive_syn_weights_ampa, drive_syn_weights_nmda, drive_static_params = self._get_drive_params(net, drive_names) - - #--------------------------------------------------Bayesian--------------------------------------------------- - if self.optimizer_type == 'bayesian': - raw_constraints = self.constraints # {'evprox1': {'mu': [5, 10], 'sigma': [1, 5], 'weights': [0.01, 1]}} - - for drive_idx, drive_name in enumerate(drive_names): - for key in raw_constraints[drive_name]: # key is 'mu', 'sigma', 'weights' (in order) - - # add a cons for each target type - if key == 'weights': - for target_key in drive_syn_weights_ampa[drive_idx]: # [{'L2_pyramidal': x, 'L2_basket': x, 'L5_pyramidal': x}] - cons.append(Real(name=f'{drive_name}_ampa_{target_key}', low = raw_constraints[drive_name][key][0], high = raw_constraints[drive_name][key][1])) - initial_params.append(drive_syn_weights_ampa[drive_idx][target_key]) - - for target_key in drive_syn_weights_nmda[drive_idx]: - cons.append(Real(name=f'{drive_name}_nmda_{target_key}', low = raw_constraints[drive_name][key][0], high = raw_constraints[drive_name][key][1])) - initial_params.append(drive_syn_weights_nmda[drive_idx][target_key]) - else: - cons.append(Real(name=f'{drive_name}_{key}', low = raw_constraints[drive_name][key][0], high = raw_constraints[drive_name][key][1])) - initial_params.append(drive_dynamics[drive_idx][key]) - - return cons, initial_params - - #--------------------------------------------------COBYLA--------------------------------------------------- - elif self.optimizer_type == 'cobyla': - return - - def optimize_response(self): - """Optimize drives to generate evoked or rhythmic response. - - Parameters - ---------- - constraints : {'evprox1': {'mu': [min, max], 'sigma': [min, max], 'weights': [min, max]}} - """ - - net = self.net.copy() - optimization_results = list() - # assemble constraints & initial_params - cons, initial_params = self._assemble_inputs(net) - - #--------------------------------------------------Bayesian--------------------------------------------------- - if self.optimizer_type == 'bayesian': - - @use_named_args(dimensions = cons) - def _obj_func(**params): - return self._objective_function(**params) - - optimization_results = gp_minimize(func = _obj_func, - dimensions = cons, - acq_func = 'EI', - n_calls = self.max_iter, - x0 = initial_params, - random_state = 64) - # params_opt = optimization_results.x - self.b_final_iter = True - self.opt_dpl = self._simulate_dipole(**self.predicted_params) - self.error_values = [np.min(optimization_results.func_vals[:i]) for i in range(1, self.max_iter + 1)] - return self.predicted_params, self.opt_dpl, self.error_values - - #--------------------------------------------------COBYLA--------------------------------------------------- - elif self.optimizer_type == 'cobyla': - return - - def plot_results(self): - """Plots target, initial, and optimized dipoles. Plots convergence.""" - # fig - fig, axs = plt.subplot_mosaic([['left'],['right']], constrained_layout = True, figsize = (8, 5)) - ax1 = axs['left'] - ax2 = axs['right'] - x_axis = list(range(1, self.max_iter + 1)) - y_max = max(self.error_values) + 0.01 - - # bayesian - ax1.plot(self.target_dpl.times, self.target_dpl.data['agg'], color = 'black') - ax1.plot(self.initial_dpl.times, self.initial_dpl.data['agg'], color = 'tab:blue') - ax1.plot(self.opt_dpl.times, self.opt_dpl.data['agg'], color = 'orange') - ax1.set_title('Dipoles') - ax1.set_ylabel('Dipole moment (nAm)') - ax1.legend(['true', 'offset', 'opt'], frameon=False, loc = 'upper right') - - # convergence - ax2.plot(x_axis, self.error_values, color='tab:green') - ax2.set_ylim([-.01, y_max]) - ax2.set_title('Convergence') - ax2.set_xlabel('Number of calls') - ax2.set_ylabel('NRMSE') - ax2.grid(visible = True) - - fig.savefig('opt_bayesian.svg') - diff --git a/hnn_core/opt_toy_example/general.py b/hnn_core/opt_toy_example/general.py new file mode 100644 index 000000000..c6af5dccf --- /dev/null +++ b/hnn_core/opt_toy_example/general.py @@ -0,0 +1,344 @@ +import numpy as np +import matplotlib.pyplot as plt +from collections import namedtuple + +from hnn_core import read_params, jones_2009_model, simulate_dipole +from hnn_core.network import pick_connection + +from skopt import gp_minimize +from skopt.utils import use_named_args +from skopt.space import Real + +from scipy.optimize import fmin_cobyla + +class Optimizer: + def __init__(self, net, constraints, solver, metric): + self.net = net + self.constraints = constraints + if solver == 'bayesian': + self._get_params = _get_params_bayesian + self._run_opt = _run_opt_bayesian + elif solver == 'cobyla': + self._get_params = _get_params_cobyla + self._run_opt = _run_opt_cobyla + if metric == 'evoked': + self.metric = _rmse_evoked + elif metric == 'rhythmic': + self.metric = _rmse_rhythmic + self.net_ = None + self.error_values = None + self.opt_params = None + + def fit(self, target_statistic, window_len = None): + """ ... + Parameters + ---------- + target_statistic : dpl + window_len : for smoothing dpl + """ + + init_params, cons = self._get_params(self.net, self.constraints) + opt_params, error_values = self._run_opt(self.net, init_params, cons, self.metric, target_statistic, window_len) + + self.opt_params = opt_params + self.error_values = error_values + # self.net_ = _set_params(opt_params) + return + + def plot_convergence(): + return + + def plot_param_search(): + return + +# gets called only once +def _get_params_bayesian(net, constraints): + """Assembles constraints & initial parameters as required by gp_minimize. + + Parameters + ---------- + net : the Network object + constraints : the user-defined constraints + + Returns + ------- + init_params : list + [{drive_name}_mu, {drive_name}_sigma, {drive_name}_ampa_target, {drive_name}_nmda_target] + cons : list + [Real(name='{drive_name}_mu', low=x, high=x), + Real(name='{drive_name}_sigma', low=x, high=x), + Real(name='{drive_name}_ampa_target', low=x, high=x)] + Real(name='{drive_name}_nmda_target', low=x0.01, high=x)] + """ + + init_params = list() + cons = list() + for drive_name in net.external_drives.keys(): + for cons_key in constraints[drive_name]: + if cons_key == 'mu': + cons.append(Real(name=f'{drive_name}_{cons_key}', low = constraints[drive_name][cons_key][0], high = constraints[drive_name][cons_key][1])) + init_params.append(net.external_drives[drive_name]['dynamics']['mu']) + elif cons_key == 'sigma': + cons.append(Real(name=f'{drive_name}_{cons_key}', low = constraints[drive_name][cons_key][0], high = constraints[drive_name][cons_key][1])) + init_params.append(net.external_drives[drive_name]['dynamics']['sigma']) + elif cons_key == 'weights': + conn_idxs = pick_connection(net, src_gids = drive_name) + for conn_idx in conn_idxs: + target_type = net.connectivity[conn_idx]['target_type'] + target_receptor = net.connectivity[conn_idx]['receptor'] + cons.append(Real(name=f'{drive_name}_{target_receptor}_{target_type}', low = constraints[drive_name][cons_key][0], high = constraints[drive_name][cons_key][1])) + init_params.append(net.connectivity[conn_idx]['nc_dict']['A_weight']) + return init_params, cons + +# gets called only once +def _get_params_cobyla(net, constraints): + """Assembles constraints & initial parameters as required by fmin_cobyla. + + Returns + ------- + init_params : list + cons : list + """ + + init_params = list() + cons = list() + + for drive_name in net.external_drives.keys(): + for cons_idx, cons_key in enumerate(constraints[drive_name]): + if cons_key == 'mu' or cons_key == 'sigma': + cons.append(lambda x, idx = cons_idx: constraints[drive_name][cons_key][1] - x[idx]) + cons.append(lambda x, idx = cons_idx: constraints[drive_name][cons_key][0] - x[idx]) + init_params.append(net.external_drives[drive_name]['dynamics'][cons_key]) + elif cons_key == 'weights': + conn_idxs = pick_connection(net, src_gids = drive_name) + for conn_idx in conn_idxs: + cons.append(lambda x, idx = cons_idx: constraints[drive_name][cons_key][1] - x[idx]) + cons.append(lambda x, idx = cons_idx: constraints[drive_name][cons_key][0] - x[idx]) + init_params.append(net.connectivity[conn_idx]['nc_dict']['A_weight']) + return init_params, cons + +def _run_opt_bayesian(net, init_params, cons, metric, target_statistic, window_len): + max_iter = 11 + @use_named_args(dimensions = cons) + def _obj_func(**params): + return metric(net, target_statistic, window_len, **params) + + opt_results = gp_minimize(func = _obj_func, + dimensions = cons, + acq_func = 'EI', + n_calls = max_iter, + x0 = init_params, + random_state = 64) + opt_params = opt_results.x + # get net_ + # ... + error_values = [np.min(opt_results.func_vals[:i]) for i in range(1, max_iter + 1)] + return opt_params, error_values + +def _run_opt_cobyla(net, init_params, cons, metric, target_statistic, window_len): + max_iter = 11 + def _obj_func(params_cobyla): + return metric(net = net, target_statistic = target_statistic, window_len = window_len, params_cobyla = params_cobyla) + + opt_results = fmin_cobyla(_obj_func, + cons = cons, + rhobeg = 0.1, + rhoend = 1e-4, + x0 = init_params, + maxfun = max_iter, + catol = 0.0) + opt_params = opt_results + # get net_ + # ... + error_values = list() + return opt_params, error_values + +def _get_fixed_params(net): + """Gets fixed params (we need this function bc we have to remove and reset each drive). + + Returns + ------- + {'drive_name': {'numspikes': x, + 'location': 'proximal', + 'n_drive_cells': 'n_cells', + 'cell_specific': True, + 'space_constant': {'L2_pyramidal': x, + 'L2_basket': x, + 'L5_basket': x, + 'L5_pyramidal': x}, + 'synaptic_delays': {'L5_basket': x, + 'L5_pyramidal': x, + 'L2_basket': x, + 'L2_pyramidal': x}, + 'probability': {'L5_basket': x, + 'L5_pyramidal': x, + 'L2_basket': x, + 'L2_pyramidal': x}, + 'event_seed': x, + 'conn_seed': x}} + """ + + fixed_params = dict() + for drive_name in net.external_drives.keys(): + drive = net.external_drives[drive_name] + conn_idxs = pick_connection(net, src_gids = drive_name) + delays = dict() + probabilities = dict() + conn_fixed_params = dict() + for conn_idx in conn_idxs: + target_type = net.connectivity[conn_idx]['target_type'] + delay = net.connectivity[conn_idx]['nc_dict']['A_delay'] + space_constant = net.connectivity[conn_idx]['nc_dict']['lamtha'] + probability = net.connectivity[conn_idx]['probability'] + delays.update({f'{target_type}': delay}) + probabilities.update({f'{target_type}': probability}) + conn_fixed_params.update({'numspikes': drive['dynamics']['numspikes']}) + conn_fixed_params.update({'location': drive['location']}) + if drive['cell_specific']: + conn_fixed_params.update({'n_drive_cells': 'n_cells'}) + else: + conn_fixed_params.update({'n_drive_cells': drive['n_drive_cells']}) + conn_fixed_params.update({'cell_specific': drive['cell_specific']}) + conn_fixed_params.update({'space_constant': space_constant}) + conn_fixed_params.update({'synaptic_delays': delays}) + conn_fixed_params.update({'probability': probabilities}) + conn_fixed_params.update({'event_seed': drive['event_seed']}) + conn_fixed_params.update({'conn_seed': drive['conn_seed']}) + fixed_params.update({drive_name: conn_fixed_params}) + return fixed_params + +def _get_predicted_params(net, **params): + """Assembles the parameters to be passed to the simulation. + + Returns + ------- + predicted_params : a dictionary () + {'drive_name': {'mu': x, + 'sigma': x, + 'ampa_weights': {'L5_basket': x, + 'L2_basket': x, + 'L5_pyramidal': x, + 'L2_pyramidal': x}, + 'nmda_weights': {'L5_basket': x, + 'L2_basket': x, + 'L5_pyramidal': x, + 'L2_pyramidal': x} + } + """ + + predicted_params = dict() + for drive_name in net.external_drives.keys(): + drive_predicted_params = dict() + drive_predicted_params.update({'mu': params[f'{drive_name}_mu']}) + drive_predicted_params.update({'sigma': params[f'{drive_name}_sigma']}) + ampa_weights = dict() + nmda_weights = dict() + conn_idxs = pick_connection(net, src_gids = drive_name) + for conn_idx in conn_idxs: + target_type = net.connectivity[conn_idx]['target_type'] + target_receptor = net.connectivity[conn_idx]['receptor'] + if target_receptor == 'ampa': + ampa_weights.update({target_type: params[f'{drive_name}_ampa_{target_type}']}) + elif target_receptor == 'nmda': + nmda_weights.update({target_type: params[f'{drive_name}_nmda_{target_type}']}) + drive_predicted_params.update({'ampa_weights': ampa_weights}) + drive_predicted_params.update({'nmda_weights': nmda_weights}) + predicted_params.update({drive_name: drive_predicted_params}) + return predicted_params + +def _set_params(net, fixed_params, predicted_params): + """Sets the network parameters. + + Parameters + ---------- + net : the Network object + fixed_params : unchanging network parameters + predicted_params : the parameters predicted by the optimizer + + Returns + ------- + net : Network object + """ + + net_new = net.copy() + # remove existing drives, re-set them with updated parameters + for drive_name in net.external_drives.keys(): + # clear drive + del net_new.external_drives[drive_name] + # clear connectivity + conn_idxs = pick_connection(net_new, src_gids = drive_name) + net_new.connectivity = [conn for conn_idx, conn in enumerate(net_new.connectivity) if conn_idx not in conn_idxs] + net_new.add_evoked_drive(name = drive_name, + mu = predicted_params[drive_name]['mu'], + sigma = predicted_params[drive_name]['sigma'], + numspikes = fixed_params[drive_name]['numspikes'], + location = fixed_params[drive_name]['location'], + n_drive_cells = fixed_params[drive_name]['n_drive_cells'], + cell_specific = fixed_params[drive_name]['cell_specific'], + weights_ampa = predicted_params[drive_name]['ampa_weights'], + weights_nmda = predicted_params[drive_name]['nmda_weights'], + space_constant = fixed_params[drive_name]['space_constant'], + synaptic_delays = fixed_params[drive_name]['synaptic_delays'], + probability = fixed_params[drive_name]['probability'], + event_seed = fixed_params[drive_name]['event_seed'], + conn_seed = fixed_params[drive_name]['conn_seed']) + return net_new + +def _get_opt_net(opt_params, fixed_params): + """Returns the optimized network net_.""" + return + +# These 2 functions will go in a file called metrics.py +def _rmse_evoked(net, target_statistic, window_len, **params): # params_cobyla = None + """The objective function for evoked responses. + + Parameters + ----------- + net : the Network object + target_dpl : the recorded dipole + params : the constraints + + Returns + ------- + rmse : normalized root mean squared error between recorded and simulated dipole + """ + + fixed_params = _get_fixed_params(net) + # get predicted params + # gp_minimize & fmin_cobyla return predicted parameters that are formatted differently (bc we are using gp_minimize w/ named args), + # we will probably need 2 different _get_predicted_params functions + predicted_params = _get_predicted_params(net, **params) + # get network with predicted params + new_net = _set_params(net, fixed_params, predicted_params) + # simulate dipole + dpl = simulate_dipole(new_net, tstop = 100, n_trials = 1)[0] + # smooth & scale + if window_len: + dpl.smooth(window_len) + scaling_factor = target_statistic.scale + # calculate error + rmse = np.sqrt( ((dpl.data['agg'] - target_statistic.data['agg'])**2).sum() / len(dpl.times) ) / (max(target_statistic.data['agg']) - min(target_statistic.data['agg'])) + return rmse + +def _rmse_rhythmic(): + return + + + + + + + + + + + + + + + + + + + + From 3d972136c9d8cfa7806ceb96906844bca36866bd Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Fri, 16 Jun 2023 17:05:18 +0200 Subject: [PATCH 05/63] added pep8 formatting --- hnn_core/opt_toy_example/general.py | 328 +++++++++++++++------------- 1 file changed, 181 insertions(+), 147 deletions(-) diff --git a/hnn_core/opt_toy_example/general.py b/hnn_core/opt_toy_example/general.py index c6af5dccf..7f58d59fc 100644 --- a/hnn_core/opt_toy_example/general.py +++ b/hnn_core/opt_toy_example/general.py @@ -1,16 +1,12 @@ import numpy as np -import matplotlib.pyplot as plt -from collections import namedtuple - -from hnn_core import read_params, jones_2009_model, simulate_dipole +from hnn_core import simulate_dipole from hnn_core.network import pick_connection - -from skopt import gp_minimize +from skopt import gp_minimize from skopt.utils import use_named_args from skopt.space import Real - from scipy.optimize import fmin_cobyla + class Optimizer: def __init__(self, net, constraints, solver, metric): self.net = net @@ -28,134 +24,168 @@ def __init__(self, net, constraints, solver, metric): self.net_ = None self.error_values = None self.opt_params = None - - def fit(self, target_statistic, window_len = None): + + def fit(self, target_statistic, window_len=None): """ ... Parameters ---------- target_statistic : dpl window_len : for smoothing dpl """ - - init_params, cons = self._get_params(self.net, self.constraints) - opt_params, error_values = self._run_opt(self.net, init_params, cons, self.metric, target_statistic, window_len) - + + init_params, cons = self._get_params(self.net, self.constraints) + opt_params, error_values = self._run_opt(self.net, + init_params, + cons, + self.metric, + target_statistic, + window_len) self.opt_params = opt_params self.error_values = error_values # self.net_ = _set_params(opt_params) - return - + return + def plot_convergence(): - return - + return + def plot_param_search(): - return - -# gets called only once + return + + +# gets called only once def _get_params_bayesian(net, constraints): - """Assembles constraints & initial parameters as required by gp_minimize. - + """Assembles constraints & initial parameters as required by gp_minimize. + Parameters ---------- net : the Network object constraints : the user-defined constraints - - Returns + + Returns ------- init_params : list - [{drive_name}_mu, {drive_name}_sigma, {drive_name}_ampa_target, {drive_name}_nmda_target] + [drive_name_mu, + drive_name_sigma, + drive_name_ampa_target, + drive_name_nmda_target] cons : list [Real(name='{drive_name}_mu', low=x, high=x), Real(name='{drive_name}_sigma', low=x, high=x), Real(name='{drive_name}_ampa_target', low=x, high=x)] Real(name='{drive_name}_nmda_target', low=x0.01, high=x)] """ - + init_params = list() cons = list() for drive_name in net.external_drives.keys(): - for cons_key in constraints[drive_name]: + for cons_key in constraints[drive_name]: if cons_key == 'mu': - cons.append(Real(name=f'{drive_name}_{cons_key}', low = constraints[drive_name][cons_key][0], high = constraints[drive_name][cons_key][1])) - init_params.append(net.external_drives[drive_name]['dynamics']['mu']) + cons.append(Real(name=f'{drive_name}_{cons_key}', + low=constraints[drive_name][cons_key][0], + high=constraints[drive_name][cons_key][1])) + init_params.append( + net.external_drives[drive_name]['dynamics']['mu']) elif cons_key == 'sigma': - cons.append(Real(name=f'{drive_name}_{cons_key}', low = constraints[drive_name][cons_key][0], high = constraints[drive_name][cons_key][1])) - init_params.append(net.external_drives[drive_name]['dynamics']['sigma']) + cons.append(Real(name=f'{drive_name}_{cons_key}', + low=constraints[drive_name][cons_key][0], + high=constraints[drive_name][cons_key][1])) + init_params.append( + net.external_drives[drive_name]['dynamics']['sigma']) elif cons_key == 'weights': - conn_idxs = pick_connection(net, src_gids = drive_name) - for conn_idx in conn_idxs: + conn_idxs = pick_connection(net, src_gids=drive_name) + for conn_idx in conn_idxs: target_type = net.connectivity[conn_idx]['target_type'] target_receptor = net.connectivity[conn_idx]['receptor'] - cons.append(Real(name=f'{drive_name}_{target_receptor}_{target_type}', low = constraints[drive_name][cons_key][0], high = constraints[drive_name][cons_key][1])) - init_params.append(net.connectivity[conn_idx]['nc_dict']['A_weight']) + cons.append( + Real(name=f'{drive_name}_{target_receptor}_{target_type}', + low=constraints[drive_name][cons_key][0], + high=constraints[drive_name][cons_key][1])) + init_params.append( + net.connectivity[conn_idx]['nc_dict']['A_weight']) return init_params, cons -# gets called only once + +# gets called only once def _get_params_cobyla(net, constraints): - """Assembles constraints & initial parameters as required by fmin_cobyla. - - Returns + """Assembles constraints & initial parameters as required by fmin_cobyla. + + Returns ------- init_params : list cons : list """ - + init_params = list() cons = list() - - for drive_name in net.external_drives.keys(): - for cons_idx, cons_key in enumerate(constraints[drive_name]): + + for drive_name in net.external_drives.keys(): + for cons_idx, cons_key in enumerate(constraints[drive_name]): if cons_key == 'mu' or cons_key == 'sigma': - cons.append(lambda x, idx = cons_idx: constraints[drive_name][cons_key][1] - x[idx]) - cons.append(lambda x, idx = cons_idx: constraints[drive_name][cons_key][0] - x[idx]) - init_params.append(net.external_drives[drive_name]['dynamics'][cons_key]) + cons.append(lambda x, idx=cons_idx: + constraints[drive_name][cons_key][1] - x[idx]) + cons.append(lambda x, idx=cons_idx: + constraints[drive_name][cons_key][0] - x[idx]) + init_params.append( + net.external_drives[drive_name]['dynamics'][cons_key]) elif cons_key == 'weights': - conn_idxs = pick_connection(net, src_gids = drive_name) - for conn_idx in conn_idxs: - cons.append(lambda x, idx = cons_idx: constraints[drive_name][cons_key][1] - x[idx]) - cons.append(lambda x, idx = cons_idx: constraints[drive_name][cons_key][0] - x[idx]) - init_params.append(net.connectivity[conn_idx]['nc_dict']['A_weight']) + conn_idxs = pick_connection(net, src_gids=drive_name) + for conn_idx in conn_idxs: + cons.append(lambda x, idx=cons_idx: + constraints[drive_name][cons_key][1] - x[idx]) + cons.append(lambda x, idx=cons_idx: + constraints[drive_name][cons_key][0] - x[idx]) + init_params.append( + net.connectivity[conn_idx]['nc_dict']['A_weight']) return init_params, cons -def _run_opt_bayesian(net, init_params, cons, metric, target_statistic, window_len): - max_iter = 11 - @use_named_args(dimensions = cons) + +def _run_opt_bayesian(net, init_params, cons, metric, target_statistic, + window_len): + + @use_named_args(dimensions=cons) def _obj_func(**params): - return metric(net, target_statistic, window_len, **params) - - opt_results = gp_minimize(func = _obj_func, - dimensions = cons, - acq_func = 'EI', - n_calls = max_iter, - x0 = init_params, - random_state = 64) - opt_params = opt_results.x + return metric(net, target_statistic, window_len, **params) + + max_iter = 11 + opt_results = gp_minimize(func=_obj_func, + dimensions=cons, + acq_func='EI', + n_calls=max_iter, + x0=init_params, + random_state=64) + opt_params = opt_results.x # get net_ # ... - error_values = [np.min(opt_results.func_vals[:i]) for i in range(1, max_iter + 1)] + error_values = [np.min(opt_results.func_vals[:i]) + for i in range(1, max_iter + 1)] return opt_params, error_values -def _run_opt_cobyla(net, init_params, cons, metric, target_statistic, window_len): - max_iter = 11 + +def _run_opt_cobyla(net, init_params, cons, metric, target_statistic, + window_len): + def _obj_func(params_cobyla): - return metric(net = net, target_statistic = target_statistic, window_len = window_len, params_cobyla = params_cobyla) - + return metric(net=net, target_statistic=target_statistic, + window_len=window_len, params_cobyla=params_cobyla) + + max_iter = 11 opt_results = fmin_cobyla(_obj_func, - cons = cons, - rhobeg = 0.1, - rhoend = 1e-4, - x0 = init_params, - maxfun = max_iter, - catol = 0.0) + cons=cons, + rhobeg=0.1, + rhoend=1e-4, + x0=init_params, + maxfun=max_iter, + catol=0.0) opt_params = opt_results # get net_ # ... error_values = list() return opt_params, error_values - + + def _get_fixed_params(net): - """Gets fixed params (we need this function bc we have to remove and reset each drive). - + """Gets fixed params. + Returns ------- {'drive_name': {'numspikes': x, @@ -177,11 +207,11 @@ def _get_fixed_params(net): 'event_seed': x, 'conn_seed': x}} """ - - fixed_params = dict() + + fixed_params = dict() for drive_name in net.external_drives.keys(): drive = net.external_drives[drive_name] - conn_idxs = pick_connection(net, src_gids = drive_name) + conn_idxs = pick_connection(net, src_gids=drive_name) delays = dict() probabilities = dict() conn_fixed_params = dict() @@ -191,7 +221,7 @@ def _get_fixed_params(net): space_constant = net.connectivity[conn_idx]['nc_dict']['lamtha'] probability = net.connectivity[conn_idx]['probability'] delays.update({f'{target_type}': delay}) - probabilities.update({f'{target_type}': probability}) + probabilities.update({f'{target_type}': probability}) conn_fixed_params.update({'numspikes': drive['dynamics']['numspikes']}) conn_fixed_params.update({'location': drive['location']}) if drive['cell_specific']: @@ -207,9 +237,10 @@ def _get_fixed_params(net): fixed_params.update({drive_name: conn_fixed_params}) return fixed_params + def _get_predicted_params(net, **params): """Assembles the parameters to be passed to the simulation. - + Returns ------- predicted_params : a dictionary () @@ -225,7 +256,7 @@ def _get_predicted_params(net, **params): 'L2_pyramidal': x} } """ - + predicted_params = dict() for drive_name in net.external_drives.keys(): drive_predicted_params = dict() @@ -233,112 +264,115 @@ def _get_predicted_params(net, **params): drive_predicted_params.update({'sigma': params[f'{drive_name}_sigma']}) ampa_weights = dict() nmda_weights = dict() - conn_idxs = pick_connection(net, src_gids = drive_name) - for conn_idx in conn_idxs: + conn_idxs = pick_connection(net, src_gids=drive_name) + for conn_idx in conn_idxs: target_type = net.connectivity[conn_idx]['target_type'] target_receptor = net.connectivity[conn_idx]['receptor'] if target_receptor == 'ampa': - ampa_weights.update({target_type: params[f'{drive_name}_ampa_{target_type}']}) + ampa_weights.update( + {target_type: params[f'{drive_name}_ampa_{target_type}']}) elif target_receptor == 'nmda': - nmda_weights.update({target_type: params[f'{drive_name}_nmda_{target_type}']}) - drive_predicted_params.update({'ampa_weights': ampa_weights}) - drive_predicted_params.update({'nmda_weights': nmda_weights}) + nmda_weights.update( + {target_type: params[f'{drive_name}_nmda_{target_type}']}) + drive_predicted_params.update({'ampa_weights': ampa_weights}) + drive_predicted_params.update({'nmda_weights': nmda_weights}) predicted_params.update({drive_name: drive_predicted_params}) return predicted_params + def _set_params(net, fixed_params, predicted_params): """Sets the network parameters. - + Parameters ---------- - net : the Network object - fixed_params : unchanging network parameters + net : the Network object + fixed_params : unchanging network parameters predicted_params : the parameters predicted by the optimizer - + Returns ------- net : Network object """ - + net_new = net.copy() - # remove existing drives, re-set them with updated parameters + # remove existing drives, re-set them with updated parameters for drive_name in net.external_drives.keys(): - # clear drive - del net_new.external_drives[drive_name] + # clear drive + del net_new.external_drives[drive_name] # clear connectivity - conn_idxs = pick_connection(net_new, src_gids = drive_name) - net_new.connectivity = [conn for conn_idx, conn in enumerate(net_new.connectivity) if conn_idx not in conn_idxs] - net_new.add_evoked_drive(name = drive_name, - mu = predicted_params[drive_name]['mu'], - sigma = predicted_params[drive_name]['sigma'], - numspikes = fixed_params[drive_name]['numspikes'], - location = fixed_params[drive_name]['location'], - n_drive_cells = fixed_params[drive_name]['n_drive_cells'], - cell_specific = fixed_params[drive_name]['cell_specific'], - weights_ampa = predicted_params[drive_name]['ampa_weights'], - weights_nmda = predicted_params[drive_name]['nmda_weights'], - space_constant = fixed_params[drive_name]['space_constant'], - synaptic_delays = fixed_params[drive_name]['synaptic_delays'], - probability = fixed_params[drive_name]['probability'], - event_seed = fixed_params[drive_name]['event_seed'], - conn_seed = fixed_params[drive_name]['conn_seed']) + conn_idxs = pick_connection(net_new, src_gids=drive_name) + net_new.connectivity = [conn for conn_idx, conn + in enumerate(net_new.connectivity) + if conn_idx not in conn_idxs] + net_new.add_evoked_drive(name=drive_name, + mu=predicted_params[drive_name]['mu'], + sigma=predicted_params[drive_name]['sigma'], + numspikes=fixed_params[drive_name] + ['numspikes'], + location=fixed_params[drive_name]['location'], + n_drive_cells=fixed_params[drive_name] + ['n_drive_cells'], + cell_specific=fixed_params[drive_name] + ['cell_specific'], + weights_ampa=predicted_params[drive_name] + ['ampa_weights'], + weights_nmda=predicted_params[drive_name] + ['nmda_weights'], + space_constant=fixed_params[drive_name] + ['space_constant'], + synaptic_delays=fixed_params[drive_name] + ['synaptic_delays'], + probability=fixed_params[drive_name] + ['probability'], + event_seed=fixed_params[drive_name] + ['event_seed'], + conn_seed=fixed_params[drive_name] + ['conn_seed']) return net_new + def _get_opt_net(opt_params, fixed_params): """Returns the optimized network net_.""" - return + return + # These 2 functions will go in a file called metrics.py -def _rmse_evoked(net, target_statistic, window_len, **params): # params_cobyla = None +def _rmse_evoked(net, target_statistic, window_len, **params): + # params_cobyla = None """The objective function for evoked responses. - + Parameters ----------- - net : the Network object + net : the Network object target_dpl : the recorded dipole - params : the constraints - + params : the constraints + Returns ------- - rmse : normalized root mean squared error between recorded and simulated dipole + rmse : normalized RMSE between recorded and simulated dipole """ fixed_params = _get_fixed_params(net) # get predicted params - # gp_minimize & fmin_cobyla return predicted parameters that are formatted differently (bc we are using gp_minimize w/ named args), - # we will probably need 2 different _get_predicted_params functions - predicted_params = _get_predicted_params(net, **params) - # get network with predicted params + # gp_minimize & fmin_cobyla return params in different formats + # (bc we are using gp_minimize w/ named args), + # we will probably need 2 different _get_predicted_params functions + predicted_params = _get_predicted_params(net, **params) + # get network with predicted params new_net = _set_params(net, fixed_params, predicted_params) # simulate dipole - dpl = simulate_dipole(new_net, tstop = 100, n_trials = 1)[0] + dpl = simulate_dipole(new_net, tstop=100, n_trials=1)[0] # smooth & scale if window_len: dpl.smooth(window_len) scaling_factor = target_statistic.scale + dpl.scale(scaling_factor) # calculate error - rmse = np.sqrt( ((dpl.data['agg'] - target_statistic.data['agg'])**2).sum() / len(dpl.times) ) / (max(target_statistic.data['agg']) - min(target_statistic.data['agg'])) + rmse = np.sqrt(((dpl.data['agg'] - target_statistic.data['agg'])**2).sum() + / len(dpl.times)) / (max(target_statistic.data['agg']) + - min(target_statistic.data['agg'])) return rmse -def _rmse_rhythmic(): - return - - - - - - - - - - - - - - - - - - - +def _rmse_rhythmic(): + return From 8f3dfef26633f4de22f20f8fadf787827910b8de Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Thu, 15 Jun 2023 18:24:57 +0200 Subject: [PATCH 06/63] Draft opt class and functions based on comments --- hnn_core/opt_toy_example/general.py | 266 ++++++++++++++++++++++++++++ 1 file changed, 266 insertions(+) diff --git a/hnn_core/opt_toy_example/general.py b/hnn_core/opt_toy_example/general.py index 7f58d59fc..46b4207fd 100644 --- a/hnn_core/opt_toy_example/general.py +++ b/hnn_core/opt_toy_example/general.py @@ -1,4 +1,5 @@ import numpy as np +<<<<<<< HEAD from hnn_core import simulate_dipole from hnn_core.network import pick_connection from skopt import gp_minimize @@ -6,6 +7,19 @@ from skopt.space import Real from scipy.optimize import fmin_cobyla +======= +import matplotlib.pyplot as plt +from collections import namedtuple + +from hnn_core import read_params, jones_2009_model, simulate_dipole +from hnn_core.network import pick_connection + +from skopt import gp_minimize +from skopt.utils import use_named_args +from skopt.space import Real + +from scipy.optimize import fmin_cobyla +>>>>>>> df86a5d (Draft opt class and functions based on comments) class Optimizer: def __init__(self, net, constraints, solver, metric): @@ -24,14 +38,20 @@ def __init__(self, net, constraints, solver, metric): self.net_ = None self.error_values = None self.opt_params = None +<<<<<<< HEAD def fit(self, target_statistic, window_len=None): +======= + + def fit(self, target_statistic, window_len = None): +>>>>>>> df86a5d (Draft opt class and functions based on comments) """ ... Parameters ---------- target_statistic : dpl window_len : for smoothing dpl """ +<<<<<<< HEAD init_params, cons = self._get_params(self.net, self.constraints) opt_params, error_values = self._run_opt(self.net, @@ -56,10 +76,32 @@ def plot_param_search(): def _get_params_bayesian(net, constraints): """Assembles constraints & initial parameters as required by gp_minimize. +======= + + init_params, cons = self._get_params(self.net, self.constraints) + opt_params, error_values = self._run_opt(self.net, init_params, cons, self.metric, target_statistic, window_len) + + self.opt_params = opt_params + self.error_values = error_values + # self.net_ = _set_params(opt_params) + return + + def plot_convergence(): + return + + def plot_param_search(): + return + +# gets called only once +def _get_params_bayesian(net, constraints): + """Assembles constraints & initial parameters as required by gp_minimize. + +>>>>>>> df86a5d (Draft opt class and functions based on comments) Parameters ---------- net : the Network object constraints : the user-defined constraints +<<<<<<< HEAD Returns ------- @@ -68,12 +110,20 @@ def _get_params_bayesian(net, constraints): drive_name_sigma, drive_name_ampa_target, drive_name_nmda_target] +======= + + Returns + ------- + init_params : list + [{drive_name}_mu, {drive_name}_sigma, {drive_name}_ampa_target, {drive_name}_nmda_target] +>>>>>>> df86a5d (Draft opt class and functions based on comments) cons : list [Real(name='{drive_name}_mu', low=x, high=x), Real(name='{drive_name}_sigma', low=x, high=x), Real(name='{drive_name}_ampa_target', low=x, high=x)] Real(name='{drive_name}_nmda_target', low=x0.01, high=x)] """ +<<<<<<< HEAD init_params = list() cons = list() @@ -110,10 +160,38 @@ def _get_params_cobyla(net, constraints): """Assembles constraints & initial parameters as required by fmin_cobyla. Returns +======= + + init_params = list() + cons = list() + for drive_name in net.external_drives.keys(): + for cons_key in constraints[drive_name]: + if cons_key == 'mu': + cons.append(Real(name=f'{drive_name}_{cons_key}', low = constraints[drive_name][cons_key][0], high = constraints[drive_name][cons_key][1])) + init_params.append(net.external_drives[drive_name]['dynamics']['mu']) + elif cons_key == 'sigma': + cons.append(Real(name=f'{drive_name}_{cons_key}', low = constraints[drive_name][cons_key][0], high = constraints[drive_name][cons_key][1])) + init_params.append(net.external_drives[drive_name]['dynamics']['sigma']) + elif cons_key == 'weights': + conn_idxs = pick_connection(net, src_gids = drive_name) + for conn_idx in conn_idxs: + target_type = net.connectivity[conn_idx]['target_type'] + target_receptor = net.connectivity[conn_idx]['receptor'] + cons.append(Real(name=f'{drive_name}_{target_receptor}_{target_type}', low = constraints[drive_name][cons_key][0], high = constraints[drive_name][cons_key][1])) + init_params.append(net.connectivity[conn_idx]['nc_dict']['A_weight']) + return init_params, cons + +# gets called only once +def _get_params_cobyla(net, constraints): + """Assembles constraints & initial parameters as required by fmin_cobyla. + + Returns +>>>>>>> df86a5d (Draft opt class and functions based on comments) ------- init_params : list cons : list """ +<<<<<<< HEAD init_params = list() cons = list() @@ -176,16 +254,73 @@ def _obj_func(params_cobyla): x0=init_params, maxfun=max_iter, catol=0.0) +======= + + init_params = list() + cons = list() + + for drive_name in net.external_drives.keys(): + for cons_idx, cons_key in enumerate(constraints[drive_name]): + if cons_key == 'mu' or cons_key == 'sigma': + cons.append(lambda x, idx = cons_idx: constraints[drive_name][cons_key][1] - x[idx]) + cons.append(lambda x, idx = cons_idx: constraints[drive_name][cons_key][0] - x[idx]) + init_params.append(net.external_drives[drive_name]['dynamics'][cons_key]) + elif cons_key == 'weights': + conn_idxs = pick_connection(net, src_gids = drive_name) + for conn_idx in conn_idxs: + cons.append(lambda x, idx = cons_idx: constraints[drive_name][cons_key][1] - x[idx]) + cons.append(lambda x, idx = cons_idx: constraints[drive_name][cons_key][0] - x[idx]) + init_params.append(net.connectivity[conn_idx]['nc_dict']['A_weight']) + return init_params, cons + +def _run_opt_bayesian(net, init_params, cons, metric, target_statistic, window_len): + max_iter = 11 + @use_named_args(dimensions = cons) + def _obj_func(**params): + return metric(net, target_statistic, window_len, **params) + + opt_results = gp_minimize(func = _obj_func, + dimensions = cons, + acq_func = 'EI', + n_calls = max_iter, + x0 = init_params, + random_state = 64) + opt_params = opt_results.x + # get net_ + # ... + error_values = [np.min(opt_results.func_vals[:i]) for i in range(1, max_iter + 1)] + return opt_params, error_values + +def _run_opt_cobyla(net, init_params, cons, metric, target_statistic, window_len): + max_iter = 11 + def _obj_func(params_cobyla): + return metric(net = net, target_statistic = target_statistic, window_len = window_len, params_cobyla = params_cobyla) + + opt_results = fmin_cobyla(_obj_func, + cons = cons, + rhobeg = 0.1, + rhoend = 1e-4, + x0 = init_params, + maxfun = max_iter, + catol = 0.0) +>>>>>>> df86a5d (Draft opt class and functions based on comments) opt_params = opt_results # get net_ # ... error_values = list() return opt_params, error_values +<<<<<<< HEAD def _get_fixed_params(net): """Gets fixed params. +======= + +def _get_fixed_params(net): + """Gets fixed params (we need this function bc we have to remove and reset each drive). + +>>>>>>> df86a5d (Draft opt class and functions based on comments) Returns ------- {'drive_name': {'numspikes': x, @@ -207,11 +342,19 @@ def _get_fixed_params(net): 'event_seed': x, 'conn_seed': x}} """ +<<<<<<< HEAD fixed_params = dict() for drive_name in net.external_drives.keys(): drive = net.external_drives[drive_name] conn_idxs = pick_connection(net, src_gids=drive_name) +======= + + fixed_params = dict() + for drive_name in net.external_drives.keys(): + drive = net.external_drives[drive_name] + conn_idxs = pick_connection(net, src_gids = drive_name) +>>>>>>> df86a5d (Draft opt class and functions based on comments) delays = dict() probabilities = dict() conn_fixed_params = dict() @@ -221,7 +364,11 @@ def _get_fixed_params(net): space_constant = net.connectivity[conn_idx]['nc_dict']['lamtha'] probability = net.connectivity[conn_idx]['probability'] delays.update({f'{target_type}': delay}) +<<<<<<< HEAD probabilities.update({f'{target_type}': probability}) +======= + probabilities.update({f'{target_type}': probability}) +>>>>>>> df86a5d (Draft opt class and functions based on comments) conn_fixed_params.update({'numspikes': drive['dynamics']['numspikes']}) conn_fixed_params.update({'location': drive['location']}) if drive['cell_specific']: @@ -237,10 +384,16 @@ def _get_fixed_params(net): fixed_params.update({drive_name: conn_fixed_params}) return fixed_params +<<<<<<< HEAD def _get_predicted_params(net, **params): """Assembles the parameters to be passed to the simulation. +======= +def _get_predicted_params(net, **params): + """Assembles the parameters to be passed to the simulation. + +>>>>>>> df86a5d (Draft opt class and functions based on comments) Returns ------- predicted_params : a dictionary () @@ -256,7 +409,11 @@ def _get_predicted_params(net, **params): 'L2_pyramidal': x} } """ +<<<<<<< HEAD +======= + +>>>>>>> df86a5d (Draft opt class and functions based on comments) predicted_params = dict() for drive_name in net.external_drives.keys(): drive_predicted_params = dict() @@ -264,6 +421,7 @@ def _get_predicted_params(net, **params): drive_predicted_params.update({'sigma': params[f'{drive_name}_sigma']}) ampa_weights = dict() nmda_weights = dict() +<<<<<<< HEAD conn_idxs = pick_connection(net, src_gids=drive_name) for conn_idx in conn_idxs: target_type = net.connectivity[conn_idx]['target_type'] @@ -289,10 +447,35 @@ def _set_params(net, fixed_params, predicted_params): fixed_params : unchanging network parameters predicted_params : the parameters predicted by the optimizer +======= + conn_idxs = pick_connection(net, src_gids = drive_name) + for conn_idx in conn_idxs: + target_type = net.connectivity[conn_idx]['target_type'] + target_receptor = net.connectivity[conn_idx]['receptor'] + if target_receptor == 'ampa': + ampa_weights.update({target_type: params[f'{drive_name}_ampa_{target_type}']}) + elif target_receptor == 'nmda': + nmda_weights.update({target_type: params[f'{drive_name}_nmda_{target_type}']}) + drive_predicted_params.update({'ampa_weights': ampa_weights}) + drive_predicted_params.update({'nmda_weights': nmda_weights}) + predicted_params.update({drive_name: drive_predicted_params}) + return predicted_params + +def _set_params(net, fixed_params, predicted_params): + """Sets the network parameters. + + Parameters + ---------- + net : the Network object + fixed_params : unchanging network parameters + predicted_params : the parameters predicted by the optimizer + +>>>>>>> df86a5d (Draft opt class and functions based on comments) Returns ------- net : Network object """ +<<<<<<< HEAD net_new = net.copy() # remove existing drives, re-set them with updated parameters @@ -350,10 +533,55 @@ def _rmse_evoked(net, target_statistic, window_len, **params): Returns ------- rmse : normalized RMSE between recorded and simulated dipole +======= + + net_new = net.copy() + # remove existing drives, re-set them with updated parameters + for drive_name in net.external_drives.keys(): + # clear drive + del net_new.external_drives[drive_name] + # clear connectivity + conn_idxs = pick_connection(net_new, src_gids = drive_name) + net_new.connectivity = [conn for conn_idx, conn in enumerate(net_new.connectivity) if conn_idx not in conn_idxs] + net_new.add_evoked_drive(name = drive_name, + mu = predicted_params[drive_name]['mu'], + sigma = predicted_params[drive_name]['sigma'], + numspikes = fixed_params[drive_name]['numspikes'], + location = fixed_params[drive_name]['location'], + n_drive_cells = fixed_params[drive_name]['n_drive_cells'], + cell_specific = fixed_params[drive_name]['cell_specific'], + weights_ampa = predicted_params[drive_name]['ampa_weights'], + weights_nmda = predicted_params[drive_name]['nmda_weights'], + space_constant = fixed_params[drive_name]['space_constant'], + synaptic_delays = fixed_params[drive_name]['synaptic_delays'], + probability = fixed_params[drive_name]['probability'], + event_seed = fixed_params[drive_name]['event_seed'], + conn_seed = fixed_params[drive_name]['conn_seed']) + return net_new + +def _get_opt_net(opt_params, fixed_params): + """Returns the optimized network net_.""" + return + +# These 2 functions will go in a file called metrics.py +def _rmse_evoked(net, target_statistic, window_len, **params): # params_cobyla = None + """The objective function for evoked responses. + + Parameters + ----------- + net : the Network object + target_dpl : the recorded dipole + params : the constraints + + Returns + ------- + rmse : normalized root mean squared error between recorded and simulated dipole +>>>>>>> df86a5d (Draft opt class and functions based on comments) """ fixed_params = _get_fixed_params(net) # get predicted params +<<<<<<< HEAD # gp_minimize & fmin_cobyla return params in different formats # (bc we are using gp_minimize w/ named args), # we will probably need 2 different _get_predicted_params functions @@ -362,10 +590,20 @@ def _rmse_evoked(net, target_statistic, window_len, **params): new_net = _set_params(net, fixed_params, predicted_params) # simulate dipole dpl = simulate_dipole(new_net, tstop=100, n_trials=1)[0] +======= + # gp_minimize & fmin_cobyla return predicted parameters that are formatted differently (bc we are using gp_minimize w/ named args), + # we will probably need 2 different _get_predicted_params functions + predicted_params = _get_predicted_params(net, **params) + # get network with predicted params + new_net = _set_params(net, fixed_params, predicted_params) + # simulate dipole + dpl = simulate_dipole(new_net, tstop = 100, n_trials = 1)[0] +>>>>>>> df86a5d (Draft opt class and functions based on comments) # smooth & scale if window_len: dpl.smooth(window_len) scaling_factor = target_statistic.scale +<<<<<<< HEAD dpl.scale(scaling_factor) # calculate error rmse = np.sqrt(((dpl.data['agg'] - target_statistic.data['agg'])**2).sum() @@ -376,3 +614,31 @@ def _rmse_evoked(net, target_statistic, window_len, **params): def _rmse_rhythmic(): return +======= + # calculate error + rmse = np.sqrt( ((dpl.data['agg'] - target_statistic.data['agg'])**2).sum() / len(dpl.times) ) / (max(target_statistic.data['agg']) - min(target_statistic.data['agg'])) + return rmse + +def _rmse_rhythmic(): + return + + + + + + + + + + + + + + + + + + + + +>>>>>>> df86a5d (Draft opt class and functions based on comments) From c78406bb32b3de2d858ba28ae0809221ed178c41 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Fri, 16 Jun 2023 17:05:18 +0200 Subject: [PATCH 07/63] added pep8 formatting --- hnn_core/opt_toy_example/general.py | 348 ++++++++++++++++++++-------- 1 file changed, 246 insertions(+), 102 deletions(-) diff --git a/hnn_core/opt_toy_example/general.py b/hnn_core/opt_toy_example/general.py index 46b4207fd..53b8505f1 100644 --- a/hnn_core/opt_toy_example/general.py +++ b/hnn_core/opt_toy_example/general.py @@ -1,5 +1,6 @@ import numpy as np <<<<<<< HEAD +<<<<<<< HEAD from hnn_core import simulate_dipole from hnn_core.network import pick_connection from skopt import gp_minimize @@ -12,15 +13,17 @@ from collections import namedtuple from hnn_core import read_params, jones_2009_model, simulate_dipole +======= +from hnn_core import simulate_dipole +>>>>>>> 2f308f8 (added pep8 formatting) from hnn_core.network import pick_connection - -from skopt import gp_minimize +from skopt import gp_minimize from skopt.utils import use_named_args from skopt.space import Real - from scipy.optimize import fmin_cobyla >>>>>>> df86a5d (Draft opt class and functions based on comments) + class Optimizer: def __init__(self, net, constraints, solver, metric): self.net = net @@ -38,6 +41,7 @@ def __init__(self, net, constraints, solver, metric): self.net_ = None self.error_values = None self.opt_params = None +<<<<<<< HEAD <<<<<<< HEAD def fit(self, target_statistic, window_len=None): @@ -45,6 +49,10 @@ def fit(self, target_statistic, window_len=None): def fit(self, target_statistic, window_len = None): >>>>>>> df86a5d (Draft opt class and functions based on comments) +======= + + def fit(self, target_statistic, window_len=None): +>>>>>>> 2f308f8 (added pep8 formatting) """ ... Parameters ---------- @@ -52,6 +60,9 @@ def fit(self, target_statistic, window_len = None): window_len : for smoothing dpl """ <<<<<<< HEAD +<<<<<<< HEAD +======= +>>>>>>> 2f308f8 (added pep8 formatting) init_params, cons = self._get_params(self.net, self.constraints) opt_params, error_values = self._run_opt(self.net, @@ -60,6 +71,7 @@ def fit(self, target_statistic, window_len = None): self.metric, target_statistic, window_len) +<<<<<<< HEAD self.opt_params = opt_params self.error_values = error_values # self.net_ = _set_params(opt_params) @@ -81,26 +93,35 @@ def _get_params_bayesian(net, constraints): init_params, cons = self._get_params(self.net, self.constraints) opt_params, error_values = self._run_opt(self.net, init_params, cons, self.metric, target_statistic, window_len) +======= +>>>>>>> 2f308f8 (added pep8 formatting) self.opt_params = opt_params self.error_values = error_values # self.net_ = _set_params(opt_params) - return - + return + def plot_convergence(): - return - + return + def plot_param_search(): - return - -# gets called only once + return + + +# gets called only once def _get_params_bayesian(net, constraints): +<<<<<<< HEAD """Assembles constraints & initial parameters as required by gp_minimize. >>>>>>> df86a5d (Draft opt class and functions based on comments) +======= + """Assembles constraints & initial parameters as required by gp_minimize. + +>>>>>>> 2f308f8 (added pep8 formatting) Parameters ---------- net : the Network object constraints : the user-defined constraints +<<<<<<< HEAD <<<<<<< HEAD Returns @@ -117,12 +138,23 @@ def _get_params_bayesian(net, constraints): init_params : list [{drive_name}_mu, {drive_name}_sigma, {drive_name}_ampa_target, {drive_name}_nmda_target] >>>>>>> df86a5d (Draft opt class and functions based on comments) +======= + + Returns + ------- + init_params : list + [drive_name_mu, + drive_name_sigma, + drive_name_ampa_target, + drive_name_nmda_target] +>>>>>>> 2f308f8 (added pep8 formatting) cons : list [Real(name='{drive_name}_mu', low=x, high=x), Real(name='{drive_name}_sigma', low=x, high=x), Real(name='{drive_name}_ampa_target', low=x, high=x)] Real(name='{drive_name}_nmda_target', low=x0.01, high=x)] """ +<<<<<<< HEAD <<<<<<< HEAD init_params = list() @@ -162,35 +194,56 @@ def _get_params_cobyla(net, constraints): Returns ======= +======= + +>>>>>>> 2f308f8 (added pep8 formatting) init_params = list() cons = list() for drive_name in net.external_drives.keys(): - for cons_key in constraints[drive_name]: + for cons_key in constraints[drive_name]: if cons_key == 'mu': - cons.append(Real(name=f'{drive_name}_{cons_key}', low = constraints[drive_name][cons_key][0], high = constraints[drive_name][cons_key][1])) - init_params.append(net.external_drives[drive_name]['dynamics']['mu']) + cons.append(Real(name=f'{drive_name}_{cons_key}', + low=constraints[drive_name][cons_key][0], + high=constraints[drive_name][cons_key][1])) + init_params.append( + net.external_drives[drive_name]['dynamics']['mu']) elif cons_key == 'sigma': - cons.append(Real(name=f'{drive_name}_{cons_key}', low = constraints[drive_name][cons_key][0], high = constraints[drive_name][cons_key][1])) - init_params.append(net.external_drives[drive_name]['dynamics']['sigma']) + cons.append(Real(name=f'{drive_name}_{cons_key}', + low=constraints[drive_name][cons_key][0], + high=constraints[drive_name][cons_key][1])) + init_params.append( + net.external_drives[drive_name]['dynamics']['sigma']) elif cons_key == 'weights': - conn_idxs = pick_connection(net, src_gids = drive_name) - for conn_idx in conn_idxs: + conn_idxs = pick_connection(net, src_gids=drive_name) + for conn_idx in conn_idxs: target_type = net.connectivity[conn_idx]['target_type'] target_receptor = net.connectivity[conn_idx]['receptor'] - cons.append(Real(name=f'{drive_name}_{target_receptor}_{target_type}', low = constraints[drive_name][cons_key][0], high = constraints[drive_name][cons_key][1])) - init_params.append(net.connectivity[conn_idx]['nc_dict']['A_weight']) + cons.append( + Real(name=f'{drive_name}_{target_receptor}_{target_type}', + low=constraints[drive_name][cons_key][0], + high=constraints[drive_name][cons_key][1])) + init_params.append( + net.connectivity[conn_idx]['nc_dict']['A_weight']) return init_params, cons -# gets called only once + +# gets called only once def _get_params_cobyla(net, constraints): +<<<<<<< HEAD """Assembles constraints & initial parameters as required by fmin_cobyla. Returns >>>>>>> df86a5d (Draft opt class and functions based on comments) +======= + """Assembles constraints & initial parameters as required by fmin_cobyla. + + Returns +>>>>>>> 2f308f8 (added pep8 formatting) ------- init_params : list cons : list """ +<<<<<<< HEAD <<<<<<< HEAD init_params = list() @@ -256,47 +309,65 @@ def _obj_func(params_cobyla): catol=0.0) ======= +======= + +>>>>>>> 2f308f8 (added pep8 formatting) init_params = list() cons = list() - - for drive_name in net.external_drives.keys(): - for cons_idx, cons_key in enumerate(constraints[drive_name]): + + for drive_name in net.external_drives.keys(): + for cons_idx, cons_key in enumerate(constraints[drive_name]): if cons_key == 'mu' or cons_key == 'sigma': - cons.append(lambda x, idx = cons_idx: constraints[drive_name][cons_key][1] - x[idx]) - cons.append(lambda x, idx = cons_idx: constraints[drive_name][cons_key][0] - x[idx]) - init_params.append(net.external_drives[drive_name]['dynamics'][cons_key]) + cons.append(lambda x, idx=cons_idx: + constraints[drive_name][cons_key][1] - x[idx]) + cons.append(lambda x, idx=cons_idx: + constraints[drive_name][cons_key][0] - x[idx]) + init_params.append( + net.external_drives[drive_name]['dynamics'][cons_key]) elif cons_key == 'weights': - conn_idxs = pick_connection(net, src_gids = drive_name) - for conn_idx in conn_idxs: - cons.append(lambda x, idx = cons_idx: constraints[drive_name][cons_key][1] - x[idx]) - cons.append(lambda x, idx = cons_idx: constraints[drive_name][cons_key][0] - x[idx]) - init_params.append(net.connectivity[conn_idx]['nc_dict']['A_weight']) + conn_idxs = pick_connection(net, src_gids=drive_name) + for conn_idx in conn_idxs: + cons.append(lambda x, idx=cons_idx: + constraints[drive_name][cons_key][1] - x[idx]) + cons.append(lambda x, idx=cons_idx: + constraints[drive_name][cons_key][0] - x[idx]) + init_params.append( + net.connectivity[conn_idx]['nc_dict']['A_weight']) return init_params, cons -def _run_opt_bayesian(net, init_params, cons, metric, target_statistic, window_len): - max_iter = 11 - @use_named_args(dimensions = cons) + +def _run_opt_bayesian(net, init_params, cons, metric, target_statistic, + window_len): + + @use_named_args(dimensions=cons) def _obj_func(**params): - return metric(net, target_statistic, window_len, **params) - - opt_results = gp_minimize(func = _obj_func, - dimensions = cons, - acq_func = 'EI', - n_calls = max_iter, - x0 = init_params, - random_state = 64) - opt_params = opt_results.x + return metric(net, target_statistic, window_len, **params) + + max_iter = 11 + opt_results = gp_minimize(func=_obj_func, + dimensions=cons, + acq_func='EI', + n_calls=max_iter, + x0=init_params, + random_state=64) + opt_params = opt_results.x # get net_ # ... - error_values = [np.min(opt_results.func_vals[:i]) for i in range(1, max_iter + 1)] + error_values = [np.min(opt_results.func_vals[:i]) + for i in range(1, max_iter + 1)] return opt_params, error_values -def _run_opt_cobyla(net, init_params, cons, metric, target_statistic, window_len): - max_iter = 11 + +def _run_opt_cobyla(net, init_params, cons, metric, target_statistic, + window_len): + def _obj_func(params_cobyla): - return metric(net = net, target_statistic = target_statistic, window_len = window_len, params_cobyla = params_cobyla) - + return metric(net=net, target_statistic=target_statistic, + window_len=window_len, params_cobyla=params_cobyla) + + max_iter = 11 opt_results = fmin_cobyla(_obj_func, +<<<<<<< HEAD cons = cons, rhobeg = 0.1, rhoend = 1e-4, @@ -304,12 +375,21 @@ def _obj_func(params_cobyla): maxfun = max_iter, catol = 0.0) >>>>>>> df86a5d (Draft opt class and functions based on comments) +======= + cons=cons, + rhobeg=0.1, + rhoend=1e-4, + x0=init_params, + maxfun=max_iter, + catol=0.0) +>>>>>>> 2f308f8 (added pep8 formatting) opt_params = opt_results # get net_ # ... error_values = list() return opt_params, error_values <<<<<<< HEAD +<<<<<<< HEAD def _get_fixed_params(net): @@ -321,6 +401,13 @@ def _get_fixed_params(net): """Gets fixed params (we need this function bc we have to remove and reset each drive). >>>>>>> df86a5d (Draft opt class and functions based on comments) +======= + + +def _get_fixed_params(net): + """Gets fixed params. + +>>>>>>> 2f308f8 (added pep8 formatting) Returns ------- {'drive_name': {'numspikes': x, @@ -342,6 +429,7 @@ def _get_fixed_params(net): 'event_seed': x, 'conn_seed': x}} """ +<<<<<<< HEAD <<<<<<< HEAD fixed_params = dict() @@ -355,6 +443,13 @@ def _get_fixed_params(net): drive = net.external_drives[drive_name] conn_idxs = pick_connection(net, src_gids = drive_name) >>>>>>> df86a5d (Draft opt class and functions based on comments) +======= + + fixed_params = dict() + for drive_name in net.external_drives.keys(): + drive = net.external_drives[drive_name] + conn_idxs = pick_connection(net, src_gids=drive_name) +>>>>>>> 2f308f8 (added pep8 formatting) delays = dict() probabilities = dict() conn_fixed_params = dict() @@ -364,11 +459,15 @@ def _get_fixed_params(net): space_constant = net.connectivity[conn_idx]['nc_dict']['lamtha'] probability = net.connectivity[conn_idx]['probability'] delays.update({f'{target_type}': delay}) +<<<<<<< HEAD <<<<<<< HEAD probabilities.update({f'{target_type}': probability}) ======= probabilities.update({f'{target_type}': probability}) >>>>>>> df86a5d (Draft opt class and functions based on comments) +======= + probabilities.update({f'{target_type}': probability}) +>>>>>>> 2f308f8 (added pep8 formatting) conn_fixed_params.update({'numspikes': drive['dynamics']['numspikes']}) conn_fixed_params.update({'location': drive['location']}) if drive['cell_specific']: @@ -385,6 +484,7 @@ def _get_fixed_params(net): return fixed_params <<<<<<< HEAD +<<<<<<< HEAD def _get_predicted_params(net, **params): """Assembles the parameters to be passed to the simulation. @@ -394,6 +494,12 @@ def _get_predicted_params(net, **params): """Assembles the parameters to be passed to the simulation. >>>>>>> df86a5d (Draft opt class and functions based on comments) +======= + +def _get_predicted_params(net, **params): + """Assembles the parameters to be passed to the simulation. + +>>>>>>> 2f308f8 (added pep8 formatting) Returns ------- predicted_params : a dictionary () @@ -410,10 +516,14 @@ def _get_predicted_params(net, **params): } """ <<<<<<< HEAD +<<<<<<< HEAD ======= >>>>>>> df86a5d (Draft opt class and functions based on comments) +======= + +>>>>>>> 2f308f8 (added pep8 formatting) predicted_params = dict() for drive_name in net.external_drives.keys(): drive_predicted_params = dict() @@ -421,6 +531,7 @@ def _get_predicted_params(net, **params): drive_predicted_params.update({'sigma': params[f'{drive_name}_sigma']}) ampa_weights = dict() nmda_weights = dict() +<<<<<<< HEAD <<<<<<< HEAD conn_idxs = pick_connection(net, src_gids=drive_name) for conn_idx in conn_idxs: @@ -450,31 +561,43 @@ def _set_params(net, fixed_params, predicted_params): ======= conn_idxs = pick_connection(net, src_gids = drive_name) for conn_idx in conn_idxs: +======= + conn_idxs = pick_connection(net, src_gids=drive_name) + for conn_idx in conn_idxs: +>>>>>>> 2f308f8 (added pep8 formatting) target_type = net.connectivity[conn_idx]['target_type'] target_receptor = net.connectivity[conn_idx]['receptor'] if target_receptor == 'ampa': - ampa_weights.update({target_type: params[f'{drive_name}_ampa_{target_type}']}) + ampa_weights.update( + {target_type: params[f'{drive_name}_ampa_{target_type}']}) elif target_receptor == 'nmda': - nmda_weights.update({target_type: params[f'{drive_name}_nmda_{target_type}']}) - drive_predicted_params.update({'ampa_weights': ampa_weights}) - drive_predicted_params.update({'nmda_weights': nmda_weights}) + nmda_weights.update( + {target_type: params[f'{drive_name}_nmda_{target_type}']}) + drive_predicted_params.update({'ampa_weights': ampa_weights}) + drive_predicted_params.update({'nmda_weights': nmda_weights}) predicted_params.update({drive_name: drive_predicted_params}) return predicted_params + def _set_params(net, fixed_params, predicted_params): """Sets the network parameters. - + Parameters ---------- - net : the Network object - fixed_params : unchanging network parameters + net : the Network object + fixed_params : unchanging network parameters predicted_params : the parameters predicted by the optimizer +<<<<<<< HEAD >>>>>>> df86a5d (Draft opt class and functions based on comments) +======= + +>>>>>>> 2f308f8 (added pep8 formatting) Returns ------- net : Network object """ +<<<<<<< HEAD <<<<<<< HEAD net_new = net.copy() @@ -535,58 +658,84 @@ def _rmse_evoked(net, target_statistic, window_len, **params): rmse : normalized RMSE between recorded and simulated dipole ======= +======= + +>>>>>>> 2f308f8 (added pep8 formatting) net_new = net.copy() - # remove existing drives, re-set them with updated parameters + # remove existing drives, re-set them with updated parameters for drive_name in net.external_drives.keys(): - # clear drive - del net_new.external_drives[drive_name] + # clear drive + del net_new.external_drives[drive_name] # clear connectivity - conn_idxs = pick_connection(net_new, src_gids = drive_name) - net_new.connectivity = [conn for conn_idx, conn in enumerate(net_new.connectivity) if conn_idx not in conn_idxs] - net_new.add_evoked_drive(name = drive_name, - mu = predicted_params[drive_name]['mu'], - sigma = predicted_params[drive_name]['sigma'], - numspikes = fixed_params[drive_name]['numspikes'], - location = fixed_params[drive_name]['location'], - n_drive_cells = fixed_params[drive_name]['n_drive_cells'], - cell_specific = fixed_params[drive_name]['cell_specific'], - weights_ampa = predicted_params[drive_name]['ampa_weights'], - weights_nmda = predicted_params[drive_name]['nmda_weights'], - space_constant = fixed_params[drive_name]['space_constant'], - synaptic_delays = fixed_params[drive_name]['synaptic_delays'], - probability = fixed_params[drive_name]['probability'], - event_seed = fixed_params[drive_name]['event_seed'], - conn_seed = fixed_params[drive_name]['conn_seed']) + conn_idxs = pick_connection(net_new, src_gids=drive_name) + net_new.connectivity = [conn for conn_idx, conn + in enumerate(net_new.connectivity) + if conn_idx not in conn_idxs] + net_new.add_evoked_drive(name=drive_name, + mu=predicted_params[drive_name]['mu'], + sigma=predicted_params[drive_name]['sigma'], + numspikes=fixed_params[drive_name] + ['numspikes'], + location=fixed_params[drive_name]['location'], + n_drive_cells=fixed_params[drive_name] + ['n_drive_cells'], + cell_specific=fixed_params[drive_name] + ['cell_specific'], + weights_ampa=predicted_params[drive_name] + ['ampa_weights'], + weights_nmda=predicted_params[drive_name] + ['nmda_weights'], + space_constant=fixed_params[drive_name] + ['space_constant'], + synaptic_delays=fixed_params[drive_name] + ['synaptic_delays'], + probability=fixed_params[drive_name] + ['probability'], + event_seed=fixed_params[drive_name] + ['event_seed'], + conn_seed=fixed_params[drive_name] + ['conn_seed']) return net_new + def _get_opt_net(opt_params, fixed_params): """Returns the optimized network net_.""" - return + return + # These 2 functions will go in a file called metrics.py -def _rmse_evoked(net, target_statistic, window_len, **params): # params_cobyla = None +def _rmse_evoked(net, target_statistic, window_len, **params): + # params_cobyla = None """The objective function for evoked responses. - + Parameters ----------- - net : the Network object + net : the Network object target_dpl : the recorded dipole - params : the constraints - + params : the constraints + Returns ------- +<<<<<<< HEAD rmse : normalized root mean squared error between recorded and simulated dipole >>>>>>> df86a5d (Draft opt class and functions based on comments) +======= + rmse : normalized RMSE between recorded and simulated dipole +>>>>>>> 2f308f8 (added pep8 formatting) """ fixed_params = _get_fixed_params(net) # get predicted params <<<<<<< HEAD +<<<<<<< HEAD +======= +>>>>>>> 2f308f8 (added pep8 formatting) # gp_minimize & fmin_cobyla return params in different formats # (bc we are using gp_minimize w/ named args), # we will probably need 2 different _get_predicted_params functions predicted_params = _get_predicted_params(net, **params) # get network with predicted params +<<<<<<< HEAD new_net = _set_params(net, fixed_params, predicted_params) # simulate dipole dpl = simulate_dipole(new_net, tstop=100, n_trials=1)[0] @@ -599,10 +748,16 @@ def _rmse_evoked(net, target_statistic, window_len, **params): # params_cobyla = # simulate dipole dpl = simulate_dipole(new_net, tstop = 100, n_trials = 1)[0] >>>>>>> df86a5d (Draft opt class and functions based on comments) +======= + new_net = _set_params(net, fixed_params, predicted_params) + # simulate dipole + dpl = simulate_dipole(new_net, tstop=100, n_trials=1)[0] +>>>>>>> 2f308f8 (added pep8 formatting) # smooth & scale if window_len: dpl.smooth(window_len) scaling_factor = target_statistic.scale +<<<<<<< HEAD <<<<<<< HEAD dpl.scale(scaling_factor) # calculate error @@ -615,30 +770,19 @@ def _rmse_evoked(net, target_statistic, window_len, **params): # params_cobyla = def _rmse_rhythmic(): return ======= +======= + dpl.scale(scaling_factor) +>>>>>>> 2f308f8 (added pep8 formatting) # calculate error - rmse = np.sqrt( ((dpl.data['agg'] - target_statistic.data['agg'])**2).sum() / len(dpl.times) ) / (max(target_statistic.data['agg']) - min(target_statistic.data['agg'])) + rmse = np.sqrt(((dpl.data['agg'] - target_statistic.data['agg'])**2).sum() + / len(dpl.times)) / (max(target_statistic.data['agg']) + - min(target_statistic.data['agg'])) return rmse -def _rmse_rhythmic(): - return - - - - - - - - - - - - - - - - - - - +<<<<<<< HEAD >>>>>>> df86a5d (Draft opt class and functions based on comments) +======= +def _rmse_rhythmic(): + return +>>>>>>> 2f308f8 (added pep8 formatting) From 69503bca974aa5dd8a94f6ce81ea9ba7764f1e91 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Thu, 22 Jun 2023 13:42:40 +0200 Subject: [PATCH 08/63] Address comments for more generalized routine --- hnn_core/opt_toy_example/general.py | 467 ++++++++++++++++++++++------ 1 file changed, 364 insertions(+), 103 deletions(-) diff --git a/hnn_core/opt_toy_example/general.py b/hnn_core/opt_toy_example/general.py index 53b8505f1..a2c412efd 100644 --- a/hnn_core/opt_toy_example/general.py +++ b/hnn_core/opt_toy_example/general.py @@ -1,11 +1,14 @@ import numpy as np <<<<<<< HEAD <<<<<<< HEAD +<<<<<<< HEAD +======= + +>>>>>>> 672ce00 (Address comments for more generalized routine) from hnn_core import simulate_dipole from hnn_core.network import pick_connection + from skopt import gp_minimize -from skopt.utils import use_named_args -from skopt.space import Real from scipy.optimize import fmin_cobyla ======= @@ -25,7 +28,7 @@ class Optimizer: - def __init__(self, net, constraints, solver, metric): + def __init__(self, net, constraints, solver, obj_fun): self.net = net self.constraints = constraints if solver == 'bayesian': @@ -34,14 +37,17 @@ def __init__(self, net, constraints, solver, metric): elif solver == 'cobyla': self._get_params = _get_params_cobyla self._run_opt = _run_opt_cobyla - if metric == 'evoked': - self.metric = _rmse_evoked - elif metric == 'rhythmic': - self.metric = _rmse_rhythmic + if obj_fun == 'evoked': + self.obj_fun = _rmse_evoked + elif obj_fun == 'rhythmic': + self.obj_fun = _rmse_rhythmic + elif obj_fun == 'poisson': + self.obj_fun = _rmse_poisson self.net_ = None - self.error_values = None + self.obj = list() self.opt_params = None <<<<<<< HEAD +<<<<<<< HEAD <<<<<<< HEAD def fit(self, target_statistic, window_len=None): @@ -53,17 +59,30 @@ def fit(self, target_statistic, window_len = None): def fit(self, target_statistic, window_len=None): >>>>>>> 2f308f8 (added pep8 formatting) +======= + self.max_iter = 200 + + def __repr__(self): + class_name = self.__class__.__name__ + return '<%s | %s>' % (class_name) + + def fit(self, target_statistic): +>>>>>>> 672ce00 (Address comments for more generalized routine) """ ... + Parameters ---------- - target_statistic : dpl - window_len : for smoothing dpl + target_statistic : + dpl + window_len : float + for smoothing dpl """ <<<<<<< HEAD <<<<<<< HEAD ======= >>>>>>> 2f308f8 (added pep8 formatting) +<<<<<<< HEAD init_params, cons = self._get_params(self.net, self.constraints) opt_params, error_values = self._run_opt(self.net, init_params, @@ -95,30 +114,141 @@ def _get_params_bayesian(net, constraints): ======= >>>>>>> 2f308f8 (added pep8 formatting) +======= + params = self._get_params(self.net, self.constraints) + opt_params, obj, net_ = self._run_opt(self.net, + params, + self.obj_fun, + target_statistic, + self.max_iter) +>>>>>>> 672ce00 (Address comments for more generalized routine) self.opt_params = opt_params - self.error_values = error_values - # self.net_ = _set_params(opt_params) + self.obj = obj + self.net_ = net_ return - def plot_convergence(): - return + def plot_convergence(self, ax=None, show=True): + """Convergence plot. + + Parameters + ---------- + ax : instance of matplotlib figure | None + The matplotlib axis + show : bool + If True, show the figure + + Returns + ------- + fig : instance of plt.fig + The matplotlib figure handle. + """ + import matplotlib as mpl + import matplotlib.pyplot as plt + + if ax is None: + fig, ax = plt.subplots(constrained_layout=True) + + axis = ax if isinstance(ax, mpl.axes._axes.Axes) else ax + + x = list(range(1, self.max_iter + 1)) + y_max = max(self.obj) + 0.01 + + axis.plot(x, self.obj, color='black') + axis.set_ylim([-.01, y_max]) + axis.set_title('Convergence') + axis.set_xlabel('Number of calls') + axis.set_ylabel('Objective value') + axis.grid(visible=True) + + fig.show(show) + return axis.get_figure() def plot_param_search(): return -# gets called only once +def _get_params(net, constraints): + """Gets parameters. + + Parameters + ---------- + net : Network + constraints : + {'drive_name': {'param_name': [min, max], + 'param_name': [min, max], + 'param_name': [min, max]}} + + Returns + ------- + params : dictionary + params['initial'] : list + params['constraints'] : list of tuples (min, max) + + params_to_optim : dictionary + {drive_name: ['param_name', 'param_name', 'param_name']} + might use this later to override net params in _set_params + (might have to go back to weights_ampa instead of ampa) + """ + params = dict() + + # Get params to optimize + param_names = dict() + for drive_name in constraints: + temp = list() + for param_name in constraints[drive_name]: + temp.append(param_name) + param_names.update({drive_name: temp}) + params.update({'names': param_names}) + + # Get initial params (bayesian & cobyla can use the same format) + initial_params = list() + cons = list() + + # get net drive names + drive_names = [key for key in net.external_drives.keys()] + + for drive_name in param_names: + # get relevant params + if drive_name in drive_names: + for param_name in param_names[drive_name]: + # instead check obj_fun + if param_name in ('mu', 'sigma', 'tstart', 'burst_rate', + 'burst_std'): + initial_params.append(net.external_drives[drive_name] + ['dynamics'][param_name]) + cons.append(constraints[drive_name][param_name]) + elif param_name in ('ampa', 'nmda'): + conn_idxs = pick_connection(net, src_gids=drive_name) + for conn_idx in conn_idxs: + # L5_pyramidal, L2_basket, L5_basket, L2_pyramidal + target_receptor = net.connectivity[conn_idx]\ + ['receptor'] + if target_receptor == param_name: + initial_params.append(net.connectivity[conn_idx] + ['nc_dict']['A_weight']) + cons.append(constraints[drive_name][param_name]) + + params.update({'initial': initial_params}) + params.update({'constraints': cons}) + return params + + def _get_params_bayesian(net, constraints): +<<<<<<< HEAD <<<<<<< HEAD """Assembles constraints & initial parameters as required by gp_minimize. >>>>>>> df86a5d (Draft opt class and functions based on comments) ======= """Assembles constraints & initial parameters as required by gp_minimize. +======= + """Assembles constraints in format required by gp_minimize. +>>>>>>> 672ce00 (Address comments for more generalized routine) >>>>>>> 2f308f8 (added pep8 formatting) Parameters ---------- +<<<<<<< HEAD net : the Network object constraints : the user-defined constraints <<<<<<< HEAD @@ -314,59 +444,135 @@ def _obj_func(params_cobyla): >>>>>>> 2f308f8 (added pep8 formatting) init_params = list() cons = list() +======= + net : Network + The network object. + constraints : list of lists + Constraints for each parameter to be optimized ([min, max]). - for drive_name in net.external_drives.keys(): - for cons_idx, cons_key in enumerate(constraints[drive_name]): - if cons_key == 'mu' or cons_key == 'sigma': - cons.append(lambda x, idx=cons_idx: - constraints[drive_name][cons_key][1] - x[idx]) - cons.append(lambda x, idx=cons_idx: - constraints[drive_name][cons_key][0] - x[idx]) - init_params.append( - net.external_drives[drive_name]['dynamics'][cons_key]) - elif cons_key == 'weights': - conn_idxs = pick_connection(net, src_gids=drive_name) - for conn_idx in conn_idxs: - cons.append(lambda x, idx=cons_idx: - constraints[drive_name][cons_key][1] - x[idx]) - cons.append(lambda x, idx=cons_idx: - constraints[drive_name][cons_key][0] - x[idx]) - init_params.append( - net.connectivity[conn_idx]['nc_dict']['A_weight']) - return init_params, cons + Returns + ------- + params : dictionary + Contains parameter names, initial parameters, and constraints. + """ + # get initial params + params = _get_params(net, constraints) -def _run_opt_bayesian(net, init_params, cons, metric, target_statistic, - window_len): + # assemble constraints in solver-specific format + cons_bayesian = list() + for cons in params['constraints']: + cons_bayesian.append((cons[0], cons[1])) + params.update({'constraints': cons_bayesian}) + return params - @use_named_args(dimensions=cons) - def _obj_func(**params): - return metric(net, target_statistic, window_len, **params) - max_iter = 11 +def _get_params_cobyla(net, constraints): + """Assembles constraints in format required by fmin_cobyla. + + Parameters + ---------- + net : Network + The network object. + constraints : list of lists + Constraints for each parameter to be optimized ([min, max]). + + Returns + ------- + params : dictionary + Contains parameter names, initial parameters, and constraints. + + """ + # get initial params + params = _get_params(net, constraints) + + # assemble constraints in solver-specific format + cons_cobyla = list() + for cons_idx, cons_val in enumerate(params['constraints']): + cons_cobyla.append(lambda x: + params['constraints'][cons_idx][1] - x[cons_idx]) + cons_cobyla.append(lambda x: + x[cons_idx] - params['constraints'][cons_idx][0]) + params.update({'constraints': cons_cobyla}) + return params + + +def _run_opt_bayesian(net, params, obj_fun, target_statistic, max_iter): + """Uses gp_minimize optimizer. + + Parameters + ---------- + net : Network + params : dictionary + Contains parameter names, initial parameters, and constraints. + obj_fun : func + The objective function. + target_statistic : Dipole + The target statistic. + max_iter : int + Max number of calls. + + Returns + ------- + opt_params : list + Final parameters. + obj : list + Objective values. + net_ : Network + Optimized network object. + """ + def _obj_func(predicted_params): + return obj_fun(net, + params['names'], + target_statistic, + predicted_params) +>>>>>>> 672ce00 (Address comments for more generalized routine) + opt_results = gp_minimize(func=_obj_func, - dimensions=cons, + dimensions=params['constraints'], acq_func='EI', n_calls=max_iter, - x0=init_params, + x0=params['initial'], random_state=64) opt_params = opt_results.x - # get net_ - # ... - error_values = [np.min(opt_results.func_vals[:i]) - for i in range(1, max_iter + 1)] - return opt_params, error_values + obj = [np.min(opt_results.func_vals[:i]) for i in range(1, max_iter + 1)] + # get optimized net + net_ = _set_params(net, params['names'], opt_params) + return opt_params, obj, net_ -def _run_opt_cobyla(net, init_params, cons, metric, target_statistic, - window_len): +def _run_opt_cobyla(net, params, obj_fun, target_statistic, max_iter): + """Uses fmin_cobyla optimizer. - def _obj_func(params_cobyla): - return metric(net=net, target_statistic=target_statistic, - window_len=window_len, params_cobyla=params_cobyla) + Parameters + ---------- + net : Network + params : dictionary + Contains parameter names, initial parameters, and constraints. + obj_fun : func + The objective function. + target_statistic : Dipole + The target statistic. + max_iter : int + Max number of calls. + + Returns + ------- + opt_params : list + Final parameters. + obj : list + Objective values. + net_ : Network + Optimized network object. + """ + def _obj_func(predicted_params): + return obj_fun(net, + params['names'], + target_statistic, + predicted_params) - max_iter = 11 opt_results = fmin_cobyla(_obj_func, +<<<<<<< HEAD <<<<<<< HEAD cons = cons, rhobeg = 0.1, @@ -377,13 +583,17 @@ def _obj_func(params_cobyla): >>>>>>> df86a5d (Draft opt class and functions based on comments) ======= cons=cons, +======= + cons=params['constraints'], +>>>>>>> 672ce00 (Address comments for more generalized routine) rhobeg=0.1, rhoend=1e-4, - x0=init_params, + x0=params['initial'], maxfun=max_iter, catol=0.0) >>>>>>> 2f308f8 (added pep8 formatting) opt_params = opt_results +<<<<<<< HEAD # get net_ # ... error_values = list() @@ -391,10 +601,15 @@ def _obj_func(params_cobyla): <<<<<<< HEAD <<<<<<< HEAD +======= + obj = list() + # get optimized net + net_ = _set_params(net, params['names'], opt_params) + return opt_params, obj, net_ +>>>>>>> 672ce00 (Address comments for more generalized routine) -def _get_fixed_params(net): - """Gets fixed params. +<<<<<<< HEAD ======= def _get_fixed_params(net): @@ -550,13 +765,18 @@ def _get_predicted_params(net, **params): def _set_params(net, fixed_params, predicted_params): +======= +def _set_params(net, param_names, predicted_params): +>>>>>>> 672ce00 (Address comments for more generalized routine) """Sets the network parameters. Parameters ---------- - net : the Network object - fixed_params : unchanging network parameters - predicted_params : the parameters predicted by the optimizer + net : Network + param_names : dictionary + Parameters to change. + predicted_params : list + The parameters selected by the optimizer. ======= conn_idxs = pick_connection(net, src_gids = drive_name) @@ -595,7 +815,7 @@ def _set_params(net, fixed_params, predicted_params): >>>>>>> 2f308f8 (added pep8 formatting) Returns ------- - net : Network object + net : Network """ <<<<<<< HEAD <<<<<<< HEAD @@ -662,57 +882,45 @@ def _rmse_evoked(net, target_statistic, window_len, **params): >>>>>>> 2f308f8 (added pep8 formatting) net_new = net.copy() - # remove existing drives, re-set them with updated parameters - for drive_name in net.external_drives.keys(): - # clear drive - del net_new.external_drives[drive_name] - # clear connectivity - conn_idxs = pick_connection(net_new, src_gids=drive_name) - net_new.connectivity = [conn for conn_idx, conn - in enumerate(net_new.connectivity) - if conn_idx not in conn_idxs] - net_new.add_evoked_drive(name=drive_name, - mu=predicted_params[drive_name]['mu'], - sigma=predicted_params[drive_name]['sigma'], - numspikes=fixed_params[drive_name] - ['numspikes'], - location=fixed_params[drive_name]['location'], - n_drive_cells=fixed_params[drive_name] - ['n_drive_cells'], - cell_specific=fixed_params[drive_name] - ['cell_specific'], - weights_ampa=predicted_params[drive_name] - ['ampa_weights'], - weights_nmda=predicted_params[drive_name] - ['nmda_weights'], - space_constant=fixed_params[drive_name] - ['space_constant'], - synaptic_delays=fixed_params[drive_name] - ['synaptic_delays'], - probability=fixed_params[drive_name] - ['probability'], - event_seed=fixed_params[drive_name] - ['event_seed'], - conn_seed=fixed_params[drive_name] - ['conn_seed']) - return net_new - -def _get_opt_net(opt_params, fixed_params): - """Returns the optimized network net_.""" - return + # get net drive names + count = 0 + drive_names = [key for key in net_new.external_drives.keys()] + for drive_name in param_names: + + # set relevant params + if drive_name in drive_names: + for param_name in param_names[drive_name]: + if param_name in ('mu', 'sigma'): + net_new.external_drives[drive_name]['dynamics']\ + [param_name] = predicted_params[count] + count += 1 + + elif param_name in ('ampa', 'nmda'): + conn_idxs = pick_connection(net_new, src_gids=drive_name) + for conn_idx in conn_idxs: + target_receptor = net_new.connectivity[conn_idx]\ + ['receptor'] + if target_receptor == param_name: + net_new.connectivity[conn_idx]['nc_dict']\ + ['A_weight'] = predicted_params[count] + count += 1 + return net_new # These 2 functions will go in a file called metrics.py -def _rmse_evoked(net, target_statistic, window_len, **params): - # params_cobyla = None +def _rmse_evoked(net, param_names, target_statistic, predicted_params): """The objective function for evoked responses. Parameters ----------- - net : the Network object - target_dpl : the recorded dipole - params : the constraints + net : Network + param_names : dictionary + Parameters to change. + target_statistic : Dipole + The recorded dipole. + predicted_params : list + Parameters selected by the optimizer. Returns ------- @@ -724,6 +932,7 @@ def _rmse_evoked(net, target_statistic, window_len, **params): >>>>>>> 2f308f8 (added pep8 formatting) """ +<<<<<<< HEAD fixed_params = _get_fixed_params(net) # get predicted params <<<<<<< HEAD @@ -773,6 +982,20 @@ def _rmse_rhythmic(): ======= dpl.scale(scaling_factor) >>>>>>> 2f308f8 (added pep8 formatting) +======= + # get network with predicted params + new_net = _set_params(net, param_names, predicted_params) + # simulate dipole + dpl = simulate_dipole(new_net, tstop=100, n_trials=1)[0] + + # smooth & scale + # if dpl.smooth(window_len): + # dpl_smooth = dpl.copy().smooth(window_len) + dpl.smooth(30) + # scaling_factor = get from target_statistic + # dpl.scale(scaling_factor) + +>>>>>>> 672ce00 (Address comments for more generalized routine) # calculate error rmse = np.sqrt(((dpl.data['agg'] - target_statistic.data['agg'])**2).sum() / len(dpl.times)) / (max(target_statistic.data['agg']) @@ -780,9 +1003,47 @@ def _rmse_rhythmic(): return rmse +<<<<<<< HEAD <<<<<<< HEAD >>>>>>> df86a5d (Draft opt class and functions based on comments) ======= def _rmse_rhythmic(): +======= +def _rmse_rhythmic(net, param_names, target_statistic, predicted_params): + """The objective function for evoked responses. + + Parameters + ----------- + net : Network + param_names : dictionary + Parameters to change. + target_statistic : Dipole + The recorded dipole. + predicted_params : list + Parameters selected by the optimizer. + + Returns + ------- + rmse : norm + """ + + from scipy import signal + + # expose these + fmin = 0.0 + fmax = 200.0 + + new_net = _set_params(net, param_names, predicted_params) + dpl = simulate_dipole(new_net, tstop=100, n_trials=1)[0] + + f_target, psd_target = signal.periodogram(target_statistic.data['agg']) + f_simulated, psd_simulated = signal.periodogram(dpl.data['agg']) + + rmse = np.linalg.norm(psd_target - psd_simulated) + return rmse + + +def _rmse_poisson(): +>>>>>>> 672ce00 (Address comments for more generalized routine) return >>>>>>> 2f308f8 (added pep8 formatting) From 4db96ddffc6b9589322953eed71bd4d5e4c00091 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Thu, 29 Jun 2023 19:31:09 +0200 Subject: [PATCH 09/63] added methods to remove 1/f and compute psd --- hnn_core/opt_toy_example/general.py | 117 ++++++++++++--- hnn_core/opt_toy_example/metrics.py | 225 ++++++++++++++++++++++++++++ 2 files changed, 324 insertions(+), 18 deletions(-) create mode 100644 hnn_core/opt_toy_example/metrics.py diff --git a/hnn_core/opt_toy_example/general.py b/hnn_core/opt_toy_example/general.py index a2c412efd..d595c3318 100644 --- a/hnn_core/opt_toy_example/general.py +++ b/hnn_core/opt_toy_example/general.py @@ -4,10 +4,15 @@ <<<<<<< HEAD ======= +<<<<<<< HEAD >>>>>>> 672ce00 (Address comments for more generalized routine) from hnn_core import simulate_dipole +======= +>>>>>>> 18ac228 (added methods to remove 1/f and compute psd) from hnn_core.network import pick_connection +from metrics import _rmse_evoked, _rmse_rhythmic, _rmse_poisson, _compute_welch_psd, _compute_multitaper_psd, _remove_aperiodic_foof, _remove_aperiodic_irasa + from skopt import gp_minimize from scipy.optimize import fmin_cobyla @@ -28,21 +33,38 @@ class Optimizer: - def __init__(self, net, constraints, solver, obj_fun): + def __init__(self, net, constraints, solver, obj_fun, psd_method=None, + aperiodic_method=None): self.net = net self.constraints = constraints + # Optimizer method if solver == 'bayesian': self._get_params = _get_params_bayesian self._run_opt = _run_opt_bayesian elif solver == 'cobyla': self._get_params = _get_params_cobyla self._run_opt = _run_opt_cobyla + # Response to be optimized if obj_fun == 'evoked': self.obj_fun = _rmse_evoked + self.obj_fun_type = 'evoked' elif obj_fun == 'rhythmic': self.obj_fun = _rmse_rhythmic + self.obj_fun_type = 'rhythmic' elif obj_fun == 'poisson': self.obj_fun = _rmse_poisson + # Methods to compute PSD (for rhythmic responses only) + self._compute_psd = None + if psd_method == 'welch': + self._compute_psd = _compute_welch_psd + elif psd_method == 'multitaper': + self._compute_psd = _compute_multitaper_psd + # Methods to remove aperiodic component (for rhythmic responses only) + self._remove_aperiodic = None + if aperiodic_method == 'fooof': + self._remove_aperiodic = _remove_aperiodic_foof + elif aperiodic_method == 'irasa': + self._remove_aperiodic = _remove_aperiodic_irasa self.net_ = None self.obj = list() self.opt_params = None @@ -66,16 +88,25 @@ def __repr__(self): class_name = self.__class__.__name__ return '<%s | %s>' % (class_name) +<<<<<<< HEAD def fit(self, target_statistic): >>>>>>> 672ce00 (Address comments for more generalized routine) +======= + def fit(self, target_statistic, sfreq): +>>>>>>> 18ac228 (added methods to remove 1/f and compute psd) """ ... Parameters ---------- - target_statistic : - dpl + target_statistic : ndarray + Recorded dipole (must have the same amount of data points as + the initial, simulated dipole). window_len : float - for smoothing dpl + ... + sfreq : float + Sampling frequency of recorded dipole (must be the same as the + sampling frequency of the initial, simulated + dipole). """ <<<<<<< HEAD <<<<<<< HEAD @@ -120,8 +151,16 @@ def _get_params_bayesian(net, constraints): params, self.obj_fun, target_statistic, +<<<<<<< HEAD self.max_iter) >>>>>>> 672ce00 (Address comments for more generalized routine) +======= + self.max_iter, + self.obj_fun_type, + self._remove_aperiodic, + self._compute_psd, + sfreq) +>>>>>>> 18ac228 (added methods to remove 1/f and compute psd) self.opt_params = opt_params self.obj = obj self.net_ = net_ @@ -133,15 +172,16 @@ def plot_convergence(self, ax=None, show=True): Parameters ---------- ax : instance of matplotlib figure | None - The matplotlib axis + The matplotlib axis. show : bool - If True, show the figure + If True, show the figure. Returns - ------- - fig : instance of plt.fig - The matplotlib figure handle. + ------- + fig : instance of plt.fig + The matplotlib figure handle. """ + import matplotlib as mpl import matplotlib.pyplot as plt @@ -173,7 +213,7 @@ def _get_params(net, constraints): Parameters ---------- net : Network - constraints : + constraints : dictionary {'drive_name': {'param_name': [min, max], 'param_name': [min, max], 'param_name': [min, max]}} @@ -189,6 +229,7 @@ def _get_params(net, constraints): might use this later to override net params in _set_params (might have to go back to weights_ampa instead of ampa) """ + params = dict() # Get params to optimize @@ -456,6 +497,7 @@ def _obj_func(params_cobyla): Contains parameter names, initial parameters, and constraints. """ + # get initial params params = _get_params(net, constraints) @@ -483,6 +525,7 @@ def _get_params_cobyla(net, constraints): Contains parameter names, initial parameters, and constraints. """ + # get initial params params = _get_params(net, constraints) @@ -497,7 +540,8 @@ def _get_params_cobyla(net, constraints): return params -def _run_opt_bayesian(net, params, obj_fun, target_statistic, max_iter): +def _run_opt_bayesian(net, params, obj_fun, target_statistic, max_iter, + obj_fun_type, remove_aperiodic, compute_psd, sfreq): """Uses gp_minimize optimizer. Parameters @@ -507,10 +551,18 @@ def _run_opt_bayesian(net, params, obj_fun, target_statistic, max_iter): Contains parameter names, initial parameters, and constraints. obj_fun : func The objective function. - target_statistic : Dipole - The target statistic. + target_statistic : ndarray + The recorded dipole. max_iter : int Max number of calls. + obj_fun_type : string + Evoked or rhythmic. + remove_aperiodic : + ... + compute_psd : + ... + sfreq : + ... Returns ------- @@ -521,12 +573,23 @@ def _run_opt_bayesian(net, params, obj_fun, target_statistic, max_iter): net_ : Network Optimized network object. """ + + # if response type is rhythmic, first remove 1/f + if obj_fun_type == 'rhythmic': + target_statistic = remove_aperiodic(target_statistic, compute_psd, + sfreq) # psd + def _obj_func(predicted_params): return obj_fun(net, params['names'], target_statistic, +<<<<<<< HEAD predicted_params) >>>>>>> 672ce00 (Address comments for more generalized routine) +======= + predicted_params, + compute_psd) +>>>>>>> 18ac228 (added methods to remove 1/f and compute psd) opt_results = gp_minimize(func=_obj_func, dimensions=params['constraints'], @@ -541,7 +604,8 @@ def _obj_func(predicted_params): return opt_params, obj, net_ -def _run_opt_cobyla(net, params, obj_fun, target_statistic, max_iter): +def _run_opt_cobyla(net, params, obj_fun, target_statistic, max_iter, + obj_fun_type, remove_aperiodic, compute_psd, sfreq): """Uses fmin_cobyla optimizer. Parameters @@ -551,10 +615,18 @@ def _run_opt_cobyla(net, params, obj_fun, target_statistic, max_iter): Contains parameter names, initial parameters, and constraints. obj_fun : func The objective function. - target_statistic : Dipole - The target statistic. + target_statistic : ndarray + The recorded dipole. max_iter : int Max number of calls. + obj_fun_type : string + Evoked or rhythmic. + remove_aperiodic : + ... + compute_psd : + ... + sfreq : + ... Returns ------- @@ -565,11 +637,18 @@ def _run_opt_cobyla(net, params, obj_fun, target_statistic, max_iter): net_ : Network Optimized network object. """ + + # if response type is rhythmic, first remove 1/f + if obj_fun_type == 'rhythmic': + target_statistic = remove_aperiodic(target_statistic, compute_psd, + sfreq) + def _obj_func(predicted_params): return obj_fun(net, params['names'], target_statistic, - predicted_params) + predicted_params, + compute_psd) opt_results = fmin_cobyla(_obj_func, <<<<<<< HEAD @@ -895,7 +974,6 @@ def _rmse_evoked(net, target_statistic, window_len, **params): net_new.external_drives[drive_name]['dynamics']\ [param_name] = predicted_params[count] count += 1 - elif param_name in ('ampa', 'nmda'): conn_idxs = pick_connection(net_new, src_gids=drive_name) for conn_idx in conn_idxs: @@ -906,6 +984,7 @@ def _rmse_evoked(net, target_statistic, window_len, **params): ['A_weight'] = predicted_params[count] count += 1 return net_new +<<<<<<< HEAD # These 2 functions will go in a file called metrics.py @@ -1047,3 +1126,5 @@ def _rmse_poisson(): >>>>>>> 672ce00 (Address comments for more generalized routine) return >>>>>>> 2f308f8 (added pep8 formatting) +======= +>>>>>>> 18ac228 (added methods to remove 1/f and compute psd) diff --git a/hnn_core/opt_toy_example/metrics.py b/hnn_core/opt_toy_example/metrics.py new file mode 100644 index 000000000..ece9639e6 --- /dev/null +++ b/hnn_core/opt_toy_example/metrics.py @@ -0,0 +1,225 @@ +from hnn_core import simulate_dipole + + +def _rmse_evoked(net, param_names, target_statistic, predicted_params, + compute_psd): + """The objective function for evoked responses. + + Parameters + ----------- + net : Network + param_names : dictionary + Parameters to change. + target_statistic : ndarray + The recorded dipole. + predicted_params : list + Parameters selected by the optimizer. + compute_psd : + ... + + Returns + ------- + rmse : normalized RMSE between recorded and simulated dipole + """ + + # get network with predicted params + new_net = _set_params(net, param_names, predicted_params) + # simulate dipole + dpl = simulate_dipole(new_net, tstop=100, n_trials=1)[0] + + # smooth & scale (scale must be passed in by user, window length?) + dpl.smooth(30) + # dpl.scale(300) + + # calculate error + rmse = np.sqrt(((dpl.data['agg'] - target_statistic)**2).sum() + / len(dpl.times)) / (max(target_statistic) + - min(target_statistic)) + return rmse + + +def _rmse_rhythmic(net, param_names, psd_target_statistic, predicted_params, + compute_psd): + """The objective function for evoked responses. + + Parameters + ----------- + net : Network + param_names : dictionary + Parameters to change. + psd_target_statistic : ndarray + PSD of recorded dipole minus the aperiodic component. + predicted_params : list + Parameters selected by the optimizer. + freq_range : list + Frequency range wihtin which to optimize the response (fmin, fmax). + compute_psd : + ... + + Returns + ------- + rmse : normalized RMSE between recorded and simulated dipole + """ + + # expose these + fmin = 0.0 + fmax = 200.0 + + # simulate dpl with predicted params + new_net = _set_params(net, param_names, predicted_params) + dpl = simulate_dipole(new_net, tstop=100, n_trials=1)[0] + + # get psd of simulated dpl + freqs_simulated, psd_simulated = compute_psd(dpl.data['agg'], dpl.sfreq) + + # calculate error + rmse = np.sqrt(((psd_simulated - psd_target_statistic)**2).sum() + / len(dpl.times)) / (max(psd_target_statistic) + - min(psd_target_statistic)) + + return rmse + + +def _rmse_poisson(): + return + + +def _compute_welch_psd(target_statistic, sfreq): + """ Computes the PSD usig scipy.signal.welch. + + Parameters + ---------- + target_statistic : ndarray + The target dipole. psd_array_multitaper takes input signal in the + time-domain. + sfreq : + ... + + Returns + ------- + freqs : ndarray + The frequency points in Hz. + psd : ndarray + Power spectral density. + """ + + from scipy.signal import welch + + freqs, psd = welch(target_statistic, fs=sfreq) + + return freqs, psd + + +def _compute_multitaper_psd(target_statistic, sfreq): + """ Computes the PSD usig mne.time_frequency.psd_array_multitaper. + + Parameters + ---------- + target_statistic : ndarray + The target dipole. psd_array_multitaper takes input signal in the + time-domain. + sfreq: + ... + + Returns + ------- + freqs : array + The frequency points in Hz. + psd : ndarray + Power spectral density. + """ + + from mne.time_frequency import psd_array_multitaper + + psd, freqs = psd_array_multitaper(target_statistic, sfreq=sfreq) + + return freqs, psd + + +def _remove_aperiodic_foof(target_statistic, compute_psd, sfreq): + """Gets periodic and aperiodic parameters. Calculates the aperiodic + PSD from the Lorentzian function. Subtracts the aperiodic PSD from the + neural power spectra (PSD of the target statistic). + + Relies on the Fitting Oscillations and one-over-f (FOOOF) method + (Donoghue et al 2020) to model the PSD of EEG/MEG signals as a combination + of a periodic component (1/f) and at least one periodic component + (oscillations). + + Parameters + ---------- + target_statistic : ndarray + The target dipole. + compute_psd : + ... + sfreq : + ... + + Returns + ------- + clean_psd : ndarray + Neural power spectra minus PSD of the aperiodic approximation. + + """ + + from fooof import FOOOF + import numpy as np + + # get PSD of target statistic + freqs, psd = compute_psd(target_statistic, sfreq) + + # fit FOOOF model + fm = FOOOF(aperiodic_mode='knee') + fm.fit(freqs, psd) + + # get aperiodic parameters + offset, knee, exponent = fm.aperiodic_params_[0],\ + fm.aperiodic_params_[1],\ + fm.aperiodic_params_[2] + + # compute aperiodic component + aperiodic_component = offset - np.log10(knee + (psd**exponent)) + + clean_psd = psd - aperiodic_component + + return clean_psd + + +def _remove_aperiodic_irasa(target_statistic, compute_psd, sfreq): + """Gets periodic and aperiodic PSD components. Subtracts the aperiodic + PSD from the neural power spectra (PSD of the target statistic). + + Relies on the Irregular-Resampling Auto-Spectral Analysis (IRASA) method + (Wen & Liu 2016) to separate the aperiodic (1/f) and periodic components + (oscillations) in the PSD of EEG signals. + + + Parameters + ---------- + target_statistic : ndarray + The target dipole. + compute_psd : + ... + sfreq : + ... + + Returns + ------- + clean_psd : ndarray + Neural power spectra minus PSD of the aperiodic approximation. + + """ + + import yasa + + # get PSD of target statistic + freqs, psd = compute_psd(target_statistic, sfreq) + + # get aperiodic component + f, aperiodic_component, periodic_component = yasa.irasa(data=target_statistic, + sf=sfreq, + return_fit=False) + + clean_psd = psd - aperiodic_component + + return clean_psd From 4cd4f4ab4b1e11cd9fa04de0ec8b49f6c91cbbd2 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Wed, 12 Jul 2023 14:09:33 -0400 Subject: [PATCH 10/63] add optimize rhythmic function --- hnn_core/opt_toy_example/general.py | 90 +++----- hnn_core/opt_toy_example/metrics.py | 200 +++--------------- .../opt_toy_example/remove_aperiodic_funcs.py | 139 ++++++++++++ 3 files changed, 206 insertions(+), 223 deletions(-) create mode 100644 hnn_core/opt_toy_example/remove_aperiodic_funcs.py diff --git a/hnn_core/opt_toy_example/general.py b/hnn_core/opt_toy_example/general.py index d595c3318..3e0868ab4 100644 --- a/hnn_core/opt_toy_example/general.py +++ b/hnn_core/opt_toy_example/general.py @@ -11,7 +11,7 @@ >>>>>>> 18ac228 (added methods to remove 1/f and compute psd) from hnn_core.network import pick_connection -from metrics import _rmse_evoked, _rmse_rhythmic, _rmse_poisson, _compute_welch_psd, _compute_multitaper_psd, _remove_aperiodic_foof, _remove_aperiodic_irasa +from metrics import _rmse_evoked, _rmse_rhythmic, _rmse_poisson from skopt import gp_minimize from scipy.optimize import fmin_cobyla @@ -33,8 +33,8 @@ class Optimizer: - def __init__(self, net, constraints, solver, obj_fun, psd_method=None, - aperiodic_method=None): + def __init__(self, net, constraints, solver, obj_fun, f_bands=None, + weights=None): self.net = net self.constraints = constraints # Optimizer method @@ -47,29 +47,18 @@ def __init__(self, net, constraints, solver, obj_fun, psd_method=None, # Response to be optimized if obj_fun == 'evoked': self.obj_fun = _rmse_evoked - self.obj_fun_type = 'evoked' elif obj_fun == 'rhythmic': self.obj_fun = _rmse_rhythmic - self.obj_fun_type = 'rhythmic' + self.f_bands = f_bands + self.weights = weights elif obj_fun == 'poisson': self.obj_fun = _rmse_poisson - # Methods to compute PSD (for rhythmic responses only) - self._compute_psd = None - if psd_method == 'welch': - self._compute_psd = _compute_welch_psd - elif psd_method == 'multitaper': - self._compute_psd = _compute_multitaper_psd - # Methods to remove aperiodic component (for rhythmic responses only) - self._remove_aperiodic = None - if aperiodic_method == 'fooof': - self._remove_aperiodic = _remove_aperiodic_foof - elif aperiodic_method == 'irasa': - self._remove_aperiodic = _remove_aperiodic_irasa self.net_ = None self.obj = list() self.opt_params = None <<<<<<< HEAD <<<<<<< HEAD +<<<<<<< HEAD <<<<<<< HEAD def fit(self, target_statistic, window_len=None): @@ -83,17 +72,24 @@ def fit(self, target_statistic, window_len=None): >>>>>>> 2f308f8 (added pep8 formatting) ======= self.max_iter = 200 +======= + self.max_iter = 150 +>>>>>>> a4f67f6 (add optimize rhythmic function) def __repr__(self): class_name = self.__class__.__name__ return '<%s | %s>' % (class_name) +<<<<<<< HEAD <<<<<<< HEAD def fit(self, target_statistic): >>>>>>> 672ce00 (Address comments for more generalized routine) ======= def fit(self, target_statistic, sfreq): >>>>>>> 18ac228 (added methods to remove 1/f and compute psd) +======= + def fit(self, target_statistic): +>>>>>>> a4f67f6 (add optimize rhythmic function) """ ... Parameters @@ -101,12 +97,8 @@ def fit(self, target_statistic, sfreq): target_statistic : ndarray Recorded dipole (must have the same amount of data points as the initial, simulated dipole). - window_len : float + scaling_factor : float ... - sfreq : float - Sampling frequency of recorded dipole (must be the same as the - sampling frequency of the initial, simulated - dipole). """ <<<<<<< HEAD <<<<<<< HEAD @@ -156,11 +148,16 @@ def _get_params_bayesian(net, constraints): >>>>>>> 672ce00 (Address comments for more generalized routine) ======= self.max_iter, +<<<<<<< HEAD self.obj_fun_type, self._remove_aperiodic, self._compute_psd, sfreq) >>>>>>> 18ac228 (added methods to remove 1/f and compute psd) +======= + self.f_bands, + self.weights) +>>>>>>> a4f67f6 (add optimize rhythmic function) self.opt_params = opt_params self.obj = obj self.net_ = net_ @@ -191,10 +188,11 @@ def plot_convergence(self, ax=None, show=True): axis = ax if isinstance(ax, mpl.axes._axes.Axes) else ax x = list(range(1, self.max_iter + 1)) + y_min = min(self.obj) - 0.01 y_max = max(self.obj) + 0.01 axis.plot(x, self.obj, color='black') - axis.set_ylim([-.01, y_max]) + axis.set_ylim([y_min, y_max]) axis.set_title('Convergence') axis.set_xlabel('Number of calls') axis.set_ylabel('Objective value') @@ -252,7 +250,6 @@ def _get_params(net, constraints): # get relevant params if drive_name in drive_names: for param_name in param_names[drive_name]: - # instead check obj_fun if param_name in ('mu', 'sigma', 'tstart', 'burst_rate', 'burst_std'): initial_params.append(net.external_drives[drive_name] @@ -541,7 +538,7 @@ def _get_params_cobyla(net, constraints): def _run_opt_bayesian(net, params, obj_fun, target_statistic, max_iter, - obj_fun_type, remove_aperiodic, compute_psd, sfreq): + f_bands, weights): """Uses gp_minimize optimizer. Parameters @@ -555,14 +552,6 @@ def _run_opt_bayesian(net, params, obj_fun, target_statistic, max_iter, The recorded dipole. max_iter : int Max number of calls. - obj_fun_type : string - Evoked or rhythmic. - remove_aperiodic : - ... - compute_psd : - ... - sfreq : - ... Returns ------- @@ -574,11 +563,6 @@ def _run_opt_bayesian(net, params, obj_fun, target_statistic, max_iter, Optimized network object. """ - # if response type is rhythmic, first remove 1/f - if obj_fun_type == 'rhythmic': - target_statistic = remove_aperiodic(target_statistic, compute_psd, - sfreq) # psd - def _obj_func(predicted_params): return obj_fun(net, params['names'], @@ -588,8 +572,14 @@ def _obj_func(predicted_params): >>>>>>> 672ce00 (Address comments for more generalized routine) ======= predicted_params, +<<<<<<< HEAD compute_psd) >>>>>>> 18ac228 (added methods to remove 1/f and compute psd) +======= + f_bands, + weights, + _set_params) +>>>>>>> a4f67f6 (add optimize rhythmic function) opt_results = gp_minimize(func=_obj_func, dimensions=params['constraints'], @@ -604,8 +594,8 @@ def _obj_func(predicted_params): return opt_params, obj, net_ -def _run_opt_cobyla(net, params, obj_fun, target_statistic, max_iter, - obj_fun_type, remove_aperiodic, compute_psd, sfreq): +def _run_opt_cobyla(net, params, obj_fun, target_statistic, max_iter, f_bands, + weights): """Uses fmin_cobyla optimizer. Parameters @@ -619,14 +609,6 @@ def _run_opt_cobyla(net, params, obj_fun, target_statistic, max_iter, The recorded dipole. max_iter : int Max number of calls. - obj_fun_type : string - Evoked or rhythmic. - remove_aperiodic : - ... - compute_psd : - ... - sfreq : - ... Returns ------- @@ -638,17 +620,14 @@ def _run_opt_cobyla(net, params, obj_fun, target_statistic, max_iter, Optimized network object. """ - # if response type is rhythmic, first remove 1/f - if obj_fun_type == 'rhythmic': - target_statistic = remove_aperiodic(target_statistic, compute_psd, - sfreq) - def _obj_func(predicted_params): return obj_fun(net, params['names'], target_statistic, predicted_params, - compute_psd) + f_bands, + weights, + _set_params) opt_results = fmin_cobyla(_obj_func, <<<<<<< HEAD @@ -970,7 +949,8 @@ def _rmse_evoked(net, target_statistic, window_len, **params): # set relevant params if drive_name in drive_names: for param_name in param_names[drive_name]: - if param_name in ('mu', 'sigma'): + if param_name in ('mu', 'sigma', 'tstart', 'burst_rate', + 'burst_std'): net_new.external_drives[drive_name]['dynamics']\ [param_name] = predicted_params[count] count += 1 diff --git a/hnn_core/opt_toy_example/metrics.py b/hnn_core/opt_toy_example/metrics.py index ece9639e6..52930d7ff 100644 --- a/hnn_core/opt_toy_example/metrics.py +++ b/hnn_core/opt_toy_example/metrics.py @@ -1,8 +1,10 @@ from hnn_core import simulate_dipole +import numpy as np +from mne.time_frequency import psd_array_multitaper def _rmse_evoked(net, param_names, target_statistic, predicted_params, - compute_psd): + f_bands, weights, _set_params): """The objective function for evoked responses. Parameters @@ -14,8 +16,6 @@ def _rmse_evoked(net, param_names, target_statistic, predicted_params, The recorded dipole. predicted_params : list Parameters selected by the optimizer. - compute_psd : - ... Returns ------- @@ -25,21 +25,21 @@ def _rmse_evoked(net, param_names, target_statistic, predicted_params, # get network with predicted params new_net = _set_params(net, param_names, predicted_params) # simulate dipole - dpl = simulate_dipole(new_net, tstop=100, n_trials=1)[0] + dpl = simulate_dipole(new_net, tstop=500, n_trials=1)[0] - # smooth & scale (scale must be passed in by user, window length?) - dpl.smooth(30) - # dpl.scale(300) + # smooth & scale (scale must be passed in by user) + dpl.scale(1000) + dpl.smooth(20) - # calculate error - rmse = np.sqrt(((dpl.data['agg'] - target_statistic)**2).sum() + # calculate rmse + obj = np.sqrt(((dpl.data['agg'] - target_statistic)**2).sum() / len(dpl.times)) / (max(target_statistic) - min(target_statistic)) - return rmse + return obj -def _rmse_rhythmic(net, param_names, psd_target_statistic, predicted_params, - compute_psd): +def _rmse_rhythmic(net, param_names, target_statistic, predicted_params, + f_bands, weights, _set_params): """The objective function for evoked responses. Parameters @@ -47,12 +47,10 @@ def _rmse_rhythmic(net, param_names, psd_target_statistic, predicted_params, net : Network param_names : dictionary Parameters to change. - psd_target_statistic : ndarray - PSD of recorded dipole minus the aperiodic component. + target_statistic : ndarray + Recorded dipole. predicted_params : list Parameters selected by the optimizer. - freq_range : list - Frequency range wihtin which to optimize the response (fmin, fmax). compute_psd : ... @@ -61,165 +59,31 @@ def _rmse_rhythmic(net, param_names, psd_target_statistic, predicted_params, rmse : normalized RMSE between recorded and simulated dipole """ - # expose these - fmin = 0.0 - fmax = 200.0 + from scipy.signal import periodogram # simulate dpl with predicted params new_net = _set_params(net, param_names, predicted_params) - dpl = simulate_dipole(new_net, tstop=100, n_trials=1)[0] - - # get psd of simulated dpl - freqs_simulated, psd_simulated = compute_psd(dpl.data['agg'], dpl.sfreq) - - # calculate error - rmse = np.sqrt(((psd_simulated - psd_target_statistic)**2).sum() - / len(dpl.times)) / (max(psd_target_statistic) - - min(psd_target_statistic)) - - return rmse - - -def _rmse_poisson(): - return - - -def _compute_welch_psd(target_statistic, sfreq): - """ Computes the PSD usig scipy.signal.welch. - - Parameters - ---------- - target_statistic : ndarray - The target dipole. psd_array_multitaper takes input signal in the - time-domain. - sfreq : - ... - - Returns - ------- - freqs : ndarray - The frequency points in Hz. - psd : ndarray - Power spectral density. - """ - - from scipy.signal import welch - - freqs, psd = welch(target_statistic, fs=sfreq) - - return freqs, psd - - -def _compute_multitaper_psd(target_statistic, sfreq): - """ Computes the PSD usig mne.time_frequency.psd_array_multitaper. - - Parameters - ---------- - target_statistic : ndarray - The target dipole. psd_array_multitaper takes input signal in the - time-domain. - sfreq: - ... - - Returns - ------- - freqs : array - The frequency points in Hz. - psd : ndarray - Power spectral density. - """ - - from mne.time_frequency import psd_array_multitaper - - psd, freqs = psd_array_multitaper(target_statistic, sfreq=sfreq) - - return freqs, psd + dpl = simulate_dipole(new_net, tstop=300, n_trials=1)[0] + # scale (scale must be passed in by user) + dpl.scale(1000) -def _remove_aperiodic_foof(target_statistic, compute_psd, sfreq): - """Gets periodic and aperiodic parameters. Calculates the aperiodic - PSD from the Lorentzian function. Subtracts the aperiodic PSD from the - neural power spectra (PSD of the target statistic). - - Relies on the Fitting Oscillations and one-over-f (FOOOF) method - (Donoghue et al 2020) to model the PSD of EEG/MEG signals as a combination - of a periodic component (1/f) and at least one periodic component - (oscillations). - - Parameters - ---------- - target_statistic : ndarray - The target dipole. - compute_psd : - ... - sfreq : - ... - - Returns - ------- - clean_psd : ndarray - Neural power spectra minus PSD of the aperiodic approximation. - - """ - - from fooof import FOOOF - import numpy as np - - # get PSD of target statistic - freqs, psd = compute_psd(target_statistic, sfreq) - - # fit FOOOF model - fm = FOOOF(aperiodic_mode='knee') - fm.fit(freqs, psd) - - # get aperiodic parameters - offset, knee, exponent = fm.aperiodic_params_[0],\ - fm.aperiodic_params_[1],\ - fm.aperiodic_params_[2] - - # compute aperiodic component - aperiodic_component = offset - np.log10(knee + (psd**exponent)) - - clean_psd = psd - aperiodic_component - - return clean_psd - - -def _remove_aperiodic_irasa(target_statistic, compute_psd, sfreq): - """Gets periodic and aperiodic PSD components. Subtracts the aperiodic - PSD from the neural power spectra (PSD of the target statistic). - - Relies on the Irregular-Resampling Auto-Spectral Analysis (IRASA) method - (Wen & Liu 2016) to separate the aperiodic (1/f) and periodic components - (oscillations) in the PSD of EEG signals. - - - Parameters - ---------- - target_statistic : ndarray - The target dipole. - compute_psd : - ... - sfreq : - ... - - Returns - ------- - clean_psd : ndarray - Neural power spectra minus PSD of the aperiodic approximation. - - """ + # get psd of simulated dpl + freqs_simulated, psd_simulated = periodogram(dpl.data['agg'], dpl.sfreq, + window='hamming') - import yasa + # for each f band + f_bands_psds = list() + for f_band_idx, f_band_val in enumerate(f_bands): + f_band_psd_idx = np.where(np.logical_and(freqs_simulated >= f_band_val[0], + freqs_simulated <= f_band_val[1]))[0] + f_bands_psds.append((-weights[f_band_idx] * sum(psd_simulated[f_band_psd_idx])) / sum(psd_simulated)) - # get PSD of target statistic - freqs, psd = compute_psd(target_statistic, sfreq) + # grand sum + obj = sum(f_bands_psds) - # get aperiodic component - f, aperiodic_component, periodic_component = yasa.irasa(data=target_statistic, - sf=sfreq, - return_fit=False) + return obj - clean_psd = psd - aperiodic_component - return clean_psd +def _rmse_poisson(): + return \ No newline at end of file diff --git a/hnn_core/opt_toy_example/remove_aperiodic_funcs.py b/hnn_core/opt_toy_example/remove_aperiodic_funcs.py new file mode 100644 index 000000000..b38c10b9d --- /dev/null +++ b/hnn_core/opt_toy_example/remove_aperiodic_funcs.py @@ -0,0 +1,139 @@ +def _compute_welch_psd(target_statistic, sfreq): + """ Computes the PSD usig scipy.signal.welch. + + Parameters + ---------- + target_statistic : ndarray + The target dipole. psd_array_multitaper takes input signal in the + time-domain. + sfreq : + ... + + Returns + ------- + freqs : ndarray + The frequency points in Hz. + psd : ndarray + Power spectral density. + """ + + from scipy.signal import welch + + freqs, psd = welch(target_statistic, fs=sfreq) + + return freqs, psd + + +def _compute_multitaper_psd(target_statistic, sfreq): + """ Computes the PSD usig mne.time_frequency.psd_array_multitaper. + + Parameters + ---------- + target_statistic : ndarray + The target dipole. psd_array_multitaper takes input signal in the + time-domain. + sfreq: + ... + + Returns + ------- + freqs : array + The frequency points in Hz. + psd : ndarray + Power spectral density. + """ + + from mne.time_frequency import psd_array_multitaper + + psd, freqs = psd_array_multitaper(target_statistic, sfreq=sfreq) + + return freqs, psd + + +def _remove_aperiodic_foof(target_statistic, compute_psd, sfreq): + """Gets periodic and aperiodic parameters. Calculates the aperiodic + PSD from the Lorentzian function. Subtracts the aperiodic PSD from the + neural power spectra (PSD of the target statistic). + + Relies on the Fitting Oscillations and one-over-f (FOOOF) method + (Donoghue et al 2020) to model the PSD of EEG/MEG signals as a combination + of a periodic component (1/f) and at least one periodic component + (oscillations). + + Parameters + ---------- + target_statistic : ndarray + The target dipole. + compute_psd : + ... + sfreq : + ... + + Returns + ------- + clean_psd : ndarray + Neural power spectra minus PSD of the aperiodic approximation. + + """ + + from fooof import FOOOF + import numpy as np + + # get PSD of target statistic + freqs, psd = compute_psd(target_statistic, sfreq) + + # fit FOOOF model + fm = FOOOF(aperiodic_mode='knee') + fm.fit(freqs, psd) + + # get aperiodic parameters + offset, knee, exponent = fm.aperiodic_params_[0],\ + fm.aperiodic_params_[1],\ + fm.aperiodic_params_[2] + + # compute aperiodic component + aperiodic_component = offset - np.log10(knee + (psd**exponent)) + + clean_psd = psd - aperiodic_component + + return clean_psd + + +def _remove_aperiodic_irasa(target_statistic, compute_psd, sfreq): + """Gets periodic and aperiodic PSD components. Subtracts the aperiodic + PSD from the neural power spectra (PSD of the target statistic). + + Relies on the Irregular-Resampling Auto-Spectral Analysis (IRASA) method + (Wen & Liu 2016) to separate the aperiodic (1/f) and periodic components + (oscillations) in the PSD of EEG signals. + + + Parameters + ---------- + target_statistic : ndarray + The target dipole. + compute_psd : + ... + sfreq : + ... + + Returns + ------- + clean_psd : ndarray + Neural power spectra minus PSD of the aperiodic approximation. + + """ + + import yasa + + # get PSD of target statistic + freqs, psd = compute_psd(target_statistic, sfreq) + + # get aperiodic component + f, aperiodic_component, periodic_component = yasa.irasa(data=target_statistic, + sf=sfreq, + return_fit=False) + + clean_psd = psd - aperiodic_component + + return clean_psd From 2977aa158e7ac57306a5917f76885cf4666ad4fa Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Thu, 15 Jun 2023 18:24:57 +0200 Subject: [PATCH 11/63] Draft opt class and functions based on comments --- hnn_core/opt_toy_example/general.py | 122 +++++++++++++++++++++------- hnn_core/opt_toy_example/metrics.py | 25 +++--- 2 files changed, 104 insertions(+), 43 deletions(-) diff --git a/hnn_core/opt_toy_example/general.py b/hnn_core/opt_toy_example/general.py index 3e0868ab4..7a6eae766 100644 --- a/hnn_core/opt_toy_example/general.py +++ b/hnn_core/opt_toy_example/general.py @@ -33,16 +33,17 @@ class Optimizer: - def __init__(self, net, constraints, solver, obj_fun, f_bands=None, - weights=None): + def __init__(self, net, constraints, set_params, solver, obj_fun, scaling, + f_bands=None, weights=None): self.net = net self.constraints = constraints + self._set_params = set_params # Optimizer method if solver == 'bayesian': - self._get_params = _get_params_bayesian + self._assemble_constraints = _assemble_constraints_bayesian self._run_opt = _run_opt_bayesian elif solver == 'cobyla': - self._get_params = _get_params_cobyla + self.__assemble_constraints = _assemble_constraints_cobyla self._run_opt = _run_opt_cobyla # Response to be optimized if obj_fun == 'evoked': @@ -53,6 +54,7 @@ def __init__(self, net, constraints, solver, obj_fun, f_bands=None, self.weights = weights elif obj_fun == 'poisson': self.obj_fun = _rmse_poisson + self.scaling = scaling self.net_ = None self.obj = list() self.opt_params = None @@ -80,6 +82,7 @@ def __repr__(self): class_name = self.__class__.__name__ return '<%s | %s>' % (class_name) +<<<<<<< HEAD <<<<<<< HEAD <<<<<<< HEAD def fit(self, target_statistic): @@ -90,6 +93,9 @@ def fit(self, target_statistic, sfreq): ======= def fit(self, target_statistic): >>>>>>> a4f67f6 (add optimize rhythmic function) +======= + def fit(self, target_statistic=None): +>>>>>>> e101c24 (Draft opt class and functions based on comments) """ ... Parameters @@ -130,6 +136,7 @@ def plot_param_search(): def _get_params_bayesian(net, constraints): """Assembles constraints & initial parameters as required by gp_minimize. +<<<<<<< HEAD ======= init_params, cons = self._get_params(self.net, self.constraints) @@ -139,9 +146,16 @@ def _get_params_bayesian(net, constraints): >>>>>>> 2f308f8 (added pep8 formatting) ======= params = self._get_params(self.net, self.constraints) +======= + params = self.__assemble_constraints(self._set_params, + self.constraints) + +>>>>>>> e101c24 (Draft opt class and functions based on comments) opt_params, obj, net_ = self._run_opt(self.net, params, + self._set_params, self.obj_fun, +<<<<<<< HEAD target_statistic, <<<<<<< HEAD self.max_iter) @@ -158,6 +172,14 @@ def _get_params_bayesian(net, constraints): self.f_bands, self.weights) >>>>>>> a4f67f6 (add optimize rhythmic function) +======= + self.scaling, + self.max_iter, + target_statistic, + self.f_bands, + self.weights) + +>>>>>>> e101c24 (Draft opt class and functions based on comments) self.opt_params = opt_params self.obj = obj self.net_ = net_ @@ -205,7 +227,7 @@ def plot_param_search(): return -def _get_params(net, constraints): +def _get_initial_params(net, constraints): """Gets parameters. Parameters @@ -228,49 +250,71 @@ def _get_params(net, constraints): (might have to go back to weights_ampa instead of ampa) """ - params = dict() + # get params to optimize + # param_names = dict() + # for drive_name in constraints: + # temp = list() + # for param_name in constraints[drive_name]: + # temp.append(param_name) + # param_names.update({drive_name: temp}) + # params.update({'names': param_names}) - # Get params to optimize + params = dict() param_names = dict() - for drive_name in constraints: - temp = list() - for param_name in constraints[drive_name]: - temp.append(param_name) - param_names.update({drive_name: temp}) - params.update({'names': param_names}) - - # Get initial params (bayesian & cobyla can use the same format) initial_params = list() cons = list() # get net drive names drive_names = [key for key in net.external_drives.keys()] - for drive_name in param_names: + for drive_name in constraints: # get relevant params if drive_name in drive_names: - for param_name in param_names[drive_name]: + + drive_param_names = list() # keys ***************** + + for param_name in constraints[drive_name]: if param_name in ('mu', 'sigma', 'tstart', 'burst_rate', 'burst_std'): + + drive_param_names.append(param_name) # ***************** + initial_params.append(net.external_drives[drive_name] ['dynamics'][param_name]) cons.append(constraints[drive_name][param_name]) elif param_name in ('ampa', 'nmda'): + + drive_param_names.append(param_name) # ***************** + weight_target_names = list() # values for ampa/nmda keys ***************** + conn_idxs = pick_connection(net, src_gids=drive_name) for conn_idx in conn_idxs: + + target_type = net.connectivity[conn_idx]['target'] # ***************** + # L5_pyramidal, L2_basket, L5_basket, L2_pyramidal target_receptor = net.connectivity[conn_idx]\ ['receptor'] if target_receptor == param_name: + + weight_target_names.append(target_type) # ***************** + initial_params.append(net.connectivity[conn_idx] ['nc_dict']['A_weight']) cons.append(constraints[drive_name][param_name]) + + param_names.update({drive_name: dict.fromkeys(drive_param_names)}) + param_names[drive_name]['ampa'] = dict.fromkeys(weight_target_names) + + + # params.update({'names': param_names}) params.update({'initial': initial_params}) params.update({'constraints': cons}) return params +<<<<<<< HEAD def _get_params_bayesian(net, constraints): <<<<<<< HEAD <<<<<<< HEAD @@ -280,6 +324,9 @@ def _get_params_bayesian(net, constraints): ======= """Assembles constraints & initial parameters as required by gp_minimize. ======= +======= +def _assemble_constraints_bayesian(set_params, constraints): +>>>>>>> e101c24 (Draft opt class and functions based on comments) """Assembles constraints in format required by gp_minimize. >>>>>>> 672ce00 (Address comments for more generalized routine) @@ -495,8 +542,10 @@ def _obj_func(params_cobyla): """ + net = set_params + # get initial params - params = _get_params(net, constraints) + params = _get_initial_params(net, constraints) # assemble constraints in solver-specific format cons_bayesian = list() @@ -506,7 +555,7 @@ def _obj_func(params_cobyla): return params -def _get_params_cobyla(net, constraints): +def _assemble_constraints_cobyla(set_params, constraints): """Assembles constraints in format required by fmin_cobyla. Parameters @@ -523,8 +572,10 @@ def _get_params_cobyla(net, constraints): """ + net = set_params + # get initial params - params = _get_params(net, constraints) + params = _get_initial_params(net, constraints) # assemble constraints in solver-specific format cons_cobyla = list() @@ -537,8 +588,8 @@ def _get_params_cobyla(net, constraints): return params -def _run_opt_bayesian(net, params, obj_fun, target_statistic, max_iter, - f_bands, weights): +def _run_opt_bayesian(net, params, set_params, obj_fun, scaling, max_iter, + target_statistic, f_bands, weights): """Uses gp_minimize optimizer. Parameters @@ -566,6 +617,7 @@ def _run_opt_bayesian(net, params, obj_fun, target_statistic, max_iter, def _obj_func(predicted_params): return obj_fun(net, params['names'], +<<<<<<< HEAD target_statistic, <<<<<<< HEAD predicted_params) @@ -580,6 +632,14 @@ def _obj_func(predicted_params): weights, _set_params) >>>>>>> a4f67f6 (add optimize rhythmic function) +======= + set_params, + predicted_params, + scaling, + target_statistic, + f_bands, + weights) +>>>>>>> e101c24 (Draft opt class and functions based on comments) opt_results = gp_minimize(func=_obj_func, dimensions=params['constraints'], @@ -590,12 +650,12 @@ def _obj_func(predicted_params): opt_params = opt_results.x obj = [np.min(opt_results.func_vals[:i]) for i in range(1, max_iter + 1)] # get optimized net - net_ = _set_params(net, params['names'], opt_params) + net_ = set_params(net, params['names'], opt_params) return opt_params, obj, net_ -def _run_opt_cobyla(net, params, obj_fun, target_statistic, max_iter, f_bands, - weights): +def _run_opt_cobyla(net, params, set_params, obj_fun, scaling, max_iter, + target_statistic, f_bands, weights): """Uses fmin_cobyla optimizer. Parameters @@ -623,11 +683,12 @@ def _run_opt_cobyla(net, params, obj_fun, target_statistic, max_iter, f_bands, def _obj_func(predicted_params): return obj_fun(net, params['names'], - target_statistic, + set_params, predicted_params, + scaling, + target_statistic, f_bands, - weights, - _set_params) + weights) opt_results = fmin_cobyla(_obj_func, <<<<<<< HEAD @@ -662,10 +723,11 @@ def _obj_func(predicted_params): ======= obj = list() # get optimized net - net_ = _set_params(net, params['names'], opt_params) + net_ = set_params(net, params['names'], opt_params) return opt_params, obj, net_ >>>>>>> 672ce00 (Address comments for more generalized routine) +<<<<<<< HEAD <<<<<<< HEAD ======= @@ -1108,3 +1170,5 @@ def _rmse_poisson(): >>>>>>> 2f308f8 (added pep8 formatting) ======= >>>>>>> 18ac228 (added methods to remove 1/f and compute psd) +======= +>>>>>>> e101c24 (Draft opt class and functions based on comments) diff --git a/hnn_core/opt_toy_example/metrics.py b/hnn_core/opt_toy_example/metrics.py index 52930d7ff..a8e090ec8 100644 --- a/hnn_core/opt_toy_example/metrics.py +++ b/hnn_core/opt_toy_example/metrics.py @@ -1,10 +1,9 @@ from hnn_core import simulate_dipole import numpy as np -from mne.time_frequency import psd_array_multitaper -def _rmse_evoked(net, param_names, target_statistic, predicted_params, - f_bands, weights, _set_params): +def _rmse_evoked(net, param_names, set_params, predicted_params, scaling, + target_statistic, f_bands, weights): """The objective function for evoked responses. Parameters @@ -23,23 +22,21 @@ def _rmse_evoked(net, param_names, target_statistic, predicted_params, """ # get network with predicted params - new_net = _set_params(net, param_names, predicted_params) + new_net = set_params(net, param_names, predicted_params) # ??? # simulate dipole dpl = simulate_dipole(new_net, tstop=500, n_trials=1)[0] - # smooth & scale (scale must be passed in by user) - dpl.scale(1000) + # smooth & scale + dpl.scale(scaling) dpl.smooth(20) # calculate rmse - obj = np.sqrt(((dpl.data['agg'] - target_statistic)**2).sum() - / len(dpl.times)) / (max(target_statistic) - - min(target_statistic)) + obj = np.sqrt(((dpl.data['agg'] - target_statistic)**2).sum() / len(dpl.times)) / (max(target_statistic) - min(target_statistic)) return obj -def _rmse_rhythmic(net, param_names, target_statistic, predicted_params, - f_bands, weights, _set_params): +def _rmse_rhythmic(net, param_names, set_params, predicted_params, scaling, + target_statistic, f_bands, weights): """The objective function for evoked responses. Parameters @@ -62,11 +59,11 @@ def _rmse_rhythmic(net, param_names, target_statistic, predicted_params, from scipy.signal import periodogram # simulate dpl with predicted params - new_net = _set_params(net, param_names, predicted_params) + new_net = set_params(net, param_names, predicted_params) # ??? dpl = simulate_dipole(new_net, tstop=300, n_trials=1)[0] - # scale (scale must be passed in by user) - dpl.scale(1000) + # scale + dpl.scale(scaling) # get psd of simulated dpl freqs_simulated, psd_simulated = periodogram(dpl.data['agg'], dpl.sfreq, From d8b9cb822f8cd58e9fb5eef8547237a511344e80 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Fri, 16 Jun 2023 17:05:18 +0200 Subject: [PATCH 12/63] added pep8 formatting --- hnn_core/opt_toy_example/general.py | 32 ++++++++--------------------- 1 file changed, 9 insertions(+), 23 deletions(-) diff --git a/hnn_core/opt_toy_example/general.py b/hnn_core/opt_toy_example/general.py index 7a6eae766..14d2b93db 100644 --- a/hnn_core/opt_toy_example/general.py +++ b/hnn_core/opt_toy_example/general.py @@ -61,6 +61,7 @@ def __init__(self, net, constraints, set_params, solver, obj_fun, scaling, <<<<<<< HEAD <<<<<<< HEAD <<<<<<< HEAD +<<<<<<< HEAD <<<<<<< HEAD def fit(self, target_statistic, window_len=None): @@ -77,6 +78,9 @@ def fit(self, target_statistic, window_len=None): ======= self.max_iter = 150 >>>>>>> a4f67f6 (add optimize rhythmic function) +======= + self.max_iter = 200 +>>>>>>> f3490e1 (added pep8 formatting) def __repr__(self): class_name = self.__class__.__name__ @@ -100,11 +104,8 @@ def fit(self, target_statistic=None): Parameters ---------- - target_statistic : ndarray - Recorded dipole (must have the same amount of data points as - the initial, simulated dipole). - scaling_factor : float - ... + target_statistic : ndarray | None + Recorded dipole. """ <<<<<<< HEAD <<<<<<< HEAD @@ -223,6 +224,7 @@ def plot_convergence(self, ax=None, show=True): fig.show(show) return axis.get_figure() + def plot_param_search(): return @@ -242,22 +244,8 @@ def _get_initial_params(net, constraints): ------- params : dictionary params['initial'] : list - params['constraints'] : list of tuples (min, max) + params['constraints'] : list of tuples (min, max)""" - params_to_optim : dictionary - {drive_name: ['param_name', 'param_name', 'param_name']} - might use this later to override net params in _set_params - (might have to go back to weights_ampa instead of ampa) - """ - - # get params to optimize - # param_names = dict() - # for drive_name in constraints: - # temp = list() - # for param_name in constraints[drive_name]: - # temp.append(param_name) - # param_names.update({drive_name: temp}) - # params.update({'names': param_names}) params = dict() param_names = dict() @@ -539,7 +527,6 @@ def _obj_func(params_cobyla): ------- params : dictionary Contains parameter names, initial parameters, and constraints. - """ net = set_params @@ -548,7 +535,7 @@ def _obj_func(params_cobyla): params = _get_initial_params(net, constraints) # assemble constraints in solver-specific format - cons_bayesian = list() + cons_bayesian = list() for cons in params['constraints']: cons_bayesian.append((cons[0], cons[1])) params.update({'constraints': cons_bayesian}) @@ -569,7 +556,6 @@ def _assemble_constraints_cobyla(set_params, constraints): ------- params : dictionary Contains parameter names, initial parameters, and constraints. - """ net = set_params From 726bfd8fcd38b5fc78b5d6e3adc3a46f40974a68 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Thu, 22 Jun 2023 13:42:40 +0200 Subject: [PATCH 13/63] Address comments for more generalized routine --- hnn_core/opt_toy_example/general.py | 35 ++++++++++++++++++++++++----- 1 file changed, 30 insertions(+), 5 deletions(-) diff --git a/hnn_core/opt_toy_example/general.py b/hnn_core/opt_toy_example/general.py index 14d2b93db..b31428c86 100644 --- a/hnn_core/opt_toy_example/general.py +++ b/hnn_core/opt_toy_example/general.py @@ -62,6 +62,7 @@ def __init__(self, net, constraints, set_params, solver, obj_fun, scaling, <<<<<<< HEAD <<<<<<< HEAD <<<<<<< HEAD +<<<<<<< HEAD <<<<<<< HEAD def fit(self, target_statistic, window_len=None): @@ -81,6 +82,9 @@ def fit(self, target_statistic, window_len=None): ======= self.max_iter = 200 >>>>>>> f3490e1 (added pep8 formatting) +======= + self.max_iter = 150 +>>>>>>> 9131299 (Address comments for more generalized routine) def __repr__(self): class_name = self.__class__.__name__ @@ -104,8 +108,11 @@ def fit(self, target_statistic=None): Parameters ---------- - target_statistic : ndarray | None - Recorded dipole. + target_statistic : ndarray + Recorded dipole (must have the same amount of data points as + the initial, simulated dipole). + scaling_factor : float + ... """ <<<<<<< HEAD <<<<<<< HEAD @@ -224,7 +231,6 @@ def plot_convergence(self, ax=None, show=True): fig.show(show) return axis.get_figure() - def plot_param_search(): return @@ -244,8 +250,22 @@ def _get_initial_params(net, constraints): ------- params : dictionary params['initial'] : list - params['constraints'] : list of tuples (min, max)""" + params['constraints'] : list of tuples (min, max) + params_to_optim : dictionary + {drive_name: ['param_name', 'param_name', 'param_name']} + might use this later to override net params in _set_params + (might have to go back to weights_ampa instead of ampa) + """ + + # get params to optimize + # param_names = dict() + # for drive_name in constraints: + # temp = list() + # for param_name in constraints[drive_name]: + # temp.append(param_name) + # param_names.update({drive_name: temp}) + # params.update({'names': param_names}) params = dict() param_names = dict() @@ -527,6 +547,7 @@ def _obj_func(params_cobyla): ------- params : dictionary Contains parameter names, initial parameters, and constraints. + """ net = set_params @@ -535,7 +556,7 @@ def _obj_func(params_cobyla): params = _get_initial_params(net, constraints) # assemble constraints in solver-specific format - cons_bayesian = list() + cons_bayesian = list() for cons in params['constraints']: cons_bayesian.append((cons[0], cons[1])) params.update({'constraints': cons_bayesian}) @@ -556,6 +577,7 @@ def _assemble_constraints_cobyla(set_params, constraints): ------- params : dictionary Contains parameter names, initial parameters, and constraints. + """ net = set_params @@ -711,6 +733,7 @@ def _obj_func(predicted_params): # get optimized net net_ = set_params(net, params['names'], opt_params) return opt_params, obj, net_ +<<<<<<< HEAD >>>>>>> 672ce00 (Address comments for more generalized routine) <<<<<<< HEAD @@ -1158,3 +1181,5 @@ def _rmse_poisson(): >>>>>>> 18ac228 (added methods to remove 1/f and compute psd) ======= >>>>>>> e101c24 (Draft opt class and functions based on comments) +======= +>>>>>>> 9131299 (Address comments for more generalized routine) From 9530dd7550e9fed6610c97ecd507135b2e1f02db Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Thu, 27 Jul 2023 06:42:33 -0400 Subject: [PATCH 14/63] Clean up optimize evoked and example --- hnn_core/opt_toy_example/general.py | 415 +++++++++++--------- hnn_core/opt_toy_example/metrics.py | 119 +++--- hnn_core/opt_toy_example/optimize_evoked.py | 162 ++++++++ 3 files changed, 443 insertions(+), 253 deletions(-) create mode 100644 hnn_core/opt_toy_example/optimize_evoked.py diff --git a/hnn_core/opt_toy_example/general.py b/hnn_core/opt_toy_example/general.py index b31428c86..9be50825a 100644 --- a/hnn_core/opt_toy_example/general.py +++ b/hnn_core/opt_toy_example/general.py @@ -2,6 +2,7 @@ <<<<<<< HEAD <<<<<<< HEAD <<<<<<< HEAD +<<<<<<< HEAD ======= <<<<<<< HEAD @@ -12,6 +13,10 @@ from hnn_core.network import pick_connection from metrics import _rmse_evoked, _rmse_rhythmic, _rmse_poisson +======= + +from metrics import _rmse_evoked +>>>>>>> 50186cb (Clean up optimize evoked and example) from skopt import gp_minimize from scipy.optimize import fmin_cobyla @@ -33,8 +38,8 @@ class Optimizer: - def __init__(self, net, constraints, set_params, solver, obj_fun, scaling, - f_bands=None, weights=None): + def __init__(self, net, constraints, set_params, solver, obj_fun, + scale_factor, smooth_window_len, tstop): self.net = net self.constraints = constraints self._set_params = set_params @@ -43,18 +48,14 @@ def __init__(self, net, constraints, set_params, solver, obj_fun, scaling, self._assemble_constraints = _assemble_constraints_bayesian self._run_opt = _run_opt_bayesian elif solver == 'cobyla': - self.__assemble_constraints = _assemble_constraints_cobyla + self._assemble_constraints = _assemble_constraints_cobyla self._run_opt = _run_opt_cobyla # Response to be optimized if obj_fun == 'evoked': self.obj_fun = _rmse_evoked - elif obj_fun == 'rhythmic': - self.obj_fun = _rmse_rhythmic - self.f_bands = f_bands - self.weights = weights - elif obj_fun == 'poisson': - self.obj_fun = _rmse_poisson - self.scaling = scaling + self.scale_factor = scale_factor + self.smooth_window_len = smooth_window_len + self.tstop = tstop self.net_ = None self.obj = list() self.opt_params = None @@ -63,6 +64,7 @@ def __init__(self, net, constraints, set_params, solver, obj_fun, scaling, <<<<<<< HEAD <<<<<<< HEAD <<<<<<< HEAD +<<<<<<< HEAD <<<<<<< HEAD def fit(self, target_statistic, window_len=None): @@ -85,13 +87,17 @@ def fit(self, target_statistic, window_len=None): ======= self.max_iter = 150 >>>>>>> 9131299 (Address comments for more generalized routine) +======= + self.max_iter = 200 +>>>>>>> 50186cb (Clean up optimize evoked and example) def __repr__(self): class_name = self.__class__.__name__ - return '<%s | %s>' % (class_name) + return class_name <<<<<<< HEAD <<<<<<< HEAD +<<<<<<< HEAD <<<<<<< HEAD def fit(self, target_statistic): >>>>>>> 672ce00 (Address comments for more generalized routine) @@ -157,12 +163,33 @@ def _get_params_bayesian(net, constraints): ======= params = self.__assemble_constraints(self._set_params, self.constraints) +======= + def fit(self, target_statistic): + """ + Runs optimization routine. + + Parameters + ---------- + target_statistic : ndarray + The recorded dipole. + + Returns + ------- + None. + + """ + + constraints = self._assemble_constraints(self.constraints) + initial_params = _get_initial_params(self.constraints) +>>>>>>> 50186cb (Clean up optimize evoked and example) >>>>>>> e101c24 (Draft opt class and functions based on comments) opt_params, obj, net_ = self._run_opt(self.net, - params, + constraints, + initial_params, self._set_params, self.obj_fun, +<<<<<<< HEAD <<<<<<< HEAD target_statistic, <<<<<<< HEAD @@ -182,10 +209,13 @@ def _get_params_bayesian(net, constraints): >>>>>>> a4f67f6 (add optimize rhythmic function) ======= self.scaling, +======= + self.scale_factor, + self.smooth_window_len, + self.tstop, +>>>>>>> 50186cb (Clean up optimize evoked and example) self.max_iter, - target_statistic, - self.f_bands, - self.weights) + target_statistic) >>>>>>> e101c24 (Draft opt class and functions based on comments) self.opt_params = opt_params @@ -194,18 +224,19 @@ def _get_params_bayesian(net, constraints): return def plot_convergence(self, ax=None, show=True): - """Convergence plot. + """ + Convergence plot. Parameters ---------- - ax : instance of matplotlib figure | None - The matplotlib axis. + ax : instance of matplotlib figure, None + The matplotlib axis. The default is None. show : bool - If True, show the figure. + If True, show the figure. The default is True. Returns ------- - fig : instance of plt.fig + instance of plt.fig The matplotlib figure handle. """ @@ -231,97 +262,77 @@ def plot_convergence(self, ax=None, show=True): fig.show(show) return axis.get_figure() - def plot_param_search(): - return - -def _get_initial_params(net, constraints): - """Gets parameters. +def _get_initial_params(constraints): + """ + Gets initial parameters as midpoints of parameter ranges. Parameters ---------- - net : Network - constraints : dictionary - {'drive_name': {'param_name': [min, max], - 'param_name': [min, max], - 'param_name': [min, max]}} + constraints : dict + The user-defined constraints. Returns ------- - params : dictionary - params['initial'] : list - params['constraints'] : list of tuples (min, max) - - params_to_optim : dictionary - {drive_name: ['param_name', 'param_name', 'param_name']} - might use this later to override net params in _set_params - (might have to go back to weights_ampa instead of ampa) + initial_params : dict + Keys are parameter names, values are initial parameters. + """ - # get params to optimize - # param_names = dict() - # for drive_name in constraints: - # temp = list() - # for param_name in constraints[drive_name]: - # temp.append(param_name) - # param_names.update({drive_name: temp}) - # params.update({'names': param_names}) - - params = dict() - param_names = dict() - initial_params = list() - cons = list() + initial_params = dict() + for cons_key in constraints: + initial_params.update({cons_key: + (constraints[cons_key][0] + + constraints[cons_key][1])/2}) - # get net drive names - drive_names = [key for key in net.external_drives.keys()] + return initial_params - for drive_name in constraints: - # get relevant params - if drive_name in drive_names: - drive_param_names = list() # keys ***************** +def _assemble_constraints_bayesian(constraints): + """ + Assembles constraints in format required by gp_minimize. - for param_name in constraints[drive_name]: - if param_name in ('mu', 'sigma', 'tstart', 'burst_rate', - 'burst_std'): + Parameters + ---------- + constraints : dict + The user-defined constraints. - drive_param_names.append(param_name) # ***************** + Returns + ------- + cons_bayesian : list of tuples + Lower and higher limit for each parameter. + """ - initial_params.append(net.external_drives[drive_name] - ['dynamics'][param_name]) - cons.append(constraints[drive_name][param_name]) - elif param_name in ('ampa', 'nmda'): + # assemble constraints in solver-specific format + cons_bayesian = list(constraints.values()) + return cons_bayesian - drive_param_names.append(param_name) # ***************** - weight_target_names = list() # values for ampa/nmda keys ***************** - conn_idxs = pick_connection(net, src_gids=drive_name) - for conn_idx in conn_idxs: +def _assemble_constraints_cobyla(constraints): + """ + Assembles constraints in format required by fmin_cobyla. - target_type = net.connectivity[conn_idx]['target'] # ***************** + Parameters + ---------- + constraints : dict + The user-defined constraints. - # L5_pyramidal, L2_basket, L5_basket, L2_pyramidal - target_receptor = net.connectivity[conn_idx]\ - ['receptor'] - if target_receptor == param_name: - - weight_target_names.append(target_type) # ***************** + Returns + ------- + cons_bayesian : dict + ... + """ - initial_params.append(net.connectivity[conn_idx] - ['nc_dict']['A_weight']) - cons.append(constraints[drive_name][param_name]) - - param_names.update({drive_name: dict.fromkeys(drive_param_names)}) - param_names[drive_name]['ampa'] = dict.fromkeys(weight_target_names) - - + # assemble constraints in solver-specific format + cons_cobyla = list() + for idx, cons_key in enumerate(constraints): + cons_cobyla.append(lambda x: float(constraints[cons_key][1]) - x[idx]) + cons_cobyla.append(lambda x: x[idx] - float(constraints[cons_key][0])) - # params.update({'names': param_names}) - params.update({'initial': initial_params}) - params.update({'constraints': cons}) - return params + return cons_cobyla +<<<<<<< HEAD <<<<<<< HEAD def _get_params_bayesian(net, constraints): <<<<<<< HEAD @@ -337,10 +348,16 @@ def _assemble_constraints_bayesian(set_params, constraints): >>>>>>> e101c24 (Draft opt class and functions based on comments) """Assembles constraints in format required by gp_minimize. >>>>>>> 672ce00 (Address comments for more generalized routine) +======= +def _update_params(initial_params, predicted_params): + """ + Update param_dict with predicted parameters. +>>>>>>> 50186cb (Clean up optimize evoked and example) >>>>>>> 2f308f8 (added pep8 formatting) Parameters ---------- +<<<<<<< HEAD <<<<<<< HEAD net : the Network object constraints : the user-defined constraints @@ -542,88 +559,70 @@ def _obj_func(params_cobyla): The network object. constraints : list of lists Constraints for each parameter to be optimized ([min, max]). +======= + initial_params : dict + Keys are parameter names, values are initial parameters. + predicted_params : list + Parameters selected by the optimizer. +>>>>>>> 50186cb (Clean up optimize evoked and example) Returns ------- - params : dictionary - Contains parameter names, initial parameters, and constraints. - + params_dict : dict + Keys are parameter names, values are parameters. """ - net = set_params + param_dict = dict() + for param_key, param_name in enumerate(initial_params): + param_dict.update({param_name: predicted_params[param_key]}) - # get initial params - params = _get_initial_params(net, constraints) - - # assemble constraints in solver-specific format - cons_bayesian = list() - for cons in params['constraints']: - cons_bayesian.append((cons[0], cons[1])) - params.update({'constraints': cons_bayesian}) - return params + return param_dict -def _assemble_constraints_cobyla(set_params, constraints): - """Assembles constraints in format required by fmin_cobyla. +def _run_opt_bayesian(net, constraints, initial_params, set_params, obj_fun, + scale_factor, smooth_window_len, tstop, max_iter, + target_statistic): + """ + Runs optimization routine with gp_minimize optimizer. Parameters ---------- net : Network The network object. - constraints : list of lists - Constraints for each parameter to be optimized ([min, max]). + constraints : list of tuples + Parameter constraints in solver-specific format. + initial_params : dict + Keys are parameter names, values are initial parameters.. + set_params : func + User-defined function that sets parameters in network drives. + obj_fun : func + The objective function. + scale_factor : float + The dipole scale factor. + smooth_window_len : float + The smooth window length. + tstop : float + The simulated dipole's duration. + max_iter : int + Number of calls the optimizer makes. + target_statistic : ndarray + The recorded dipole. Returns ------- - params : dictionary - Contains parameter names, initial parameters, and constraints. - + opt_params : list + Optimized parameters. + obj : list + Objective values. + net_ : Network + Optimized network object. """ - net = set_params - - # get initial params - params = _get_initial_params(net, constraints) - - # assemble constraints in solver-specific format - cons_cobyla = list() - for cons_idx, cons_val in enumerate(params['constraints']): - cons_cobyla.append(lambda x: - params['constraints'][cons_idx][1] - x[cons_idx]) - cons_cobyla.append(lambda x: - x[cons_idx] - params['constraints'][cons_idx][0]) - params.update({'constraints': cons_cobyla}) - return params - - -def _run_opt_bayesian(net, params, set_params, obj_fun, scaling, max_iter, - target_statistic, f_bands, weights): - """Uses gp_minimize optimizer. - - Parameters - ---------- - net : Network - params : dictionary - Contains parameter names, initial parameters, and constraints. - obj_fun : func - The objective function. - target_statistic : ndarray - The recorded dipole. - max_iter : int - Max number of calls. - - Returns - ------- - opt_params : list - Final parameters. - obj : list - Objective values. - net_ : Network - Optimized network object. - """ + obj_values = list() def _obj_func(predicted_params): return obj_fun(net, +<<<<<<< HEAD params['names'], <<<<<<< HEAD target_statistic, @@ -648,58 +647,93 @@ def _obj_func(predicted_params): f_bands, weights) >>>>>>> e101c24 (Draft opt class and functions based on comments) +======= + initial_params, + set_params, + predicted_params, + _update_params, + obj_values, + scale_factor, + smooth_window_len, + tstop, + target_statistic) +>>>>>>> 50186cb (Clean up optimize evoked and example) opt_results = gp_minimize(func=_obj_func, - dimensions=params['constraints'], + dimensions=constraints, acq_func='EI', n_calls=max_iter, - x0=params['initial'], - random_state=64) + x0=list(initial_params.values())) + + # get optimized params opt_params = opt_results.x - obj = [np.min(opt_results.func_vals[:i]) for i in range(1, max_iter + 1)] + + # get objective values + obj = [np.min(obj_values[:i]) for i in range(1, max_iter + 1)] + # get optimized net - net_ = set_params(net, params['names'], opt_params) + param_dict = _update_params(initial_params, opt_params) + net_ = set_params(net.copy(), param_dict) + return opt_params, obj, net_ -def _run_opt_cobyla(net, params, set_params, obj_fun, scaling, max_iter, - target_statistic, f_bands, weights): - """Uses fmin_cobyla optimizer. +def _run_opt_cobyla(net, constraints, initial_params, set_params, obj_fun, + scale_factor, smooth_window_len, tstop, max_iter, + target_statistic): + """ + Runs optimization routine with fmin_cobyla optimizer. - Parameters - ---------- - net : Network - params : dictionary - Contains parameter names, initial parameters, and constraints. - obj_fun : func - The objective function. - target_statistic : ndarray - The recorded dipole. - max_iter : int - Max number of calls. + Parameters + ---------- + net : Network + The network object. + constraints : dict + Parameter constraints in solver-specific format. + initial_params : dict + Keys are parameter names, values are initial parameters.. + set_params : func + User-defined function that sets parameters in network drives. + obj_fun : func + The objective function. + scale_factor : float + The dipole scale factor. + smooth_window_len : float + The smooth window length. + tstop : float + The simulated dipole's duration. + max_iter : int + Number of calls the optimizer makes. + target_statistic : ndarray, None + The recorded dipole. The default is None. - Returns - ------- - opt_params : list - Final parameters. - obj : list - Objective values. - net_ : Network - Optimized network object. + Returns + ------- + opt_params : list + Optimized parameters. + obj : list + Objective values. + net_ : Network + Optimized network object. """ + obj_values = list() + def _obj_func(predicted_params): return obj_fun(net, - params['names'], + initial_params, set_params, predicted_params, - scaling, - target_statistic, - f_bands, - weights) + _update_params, + obj_values, + scale_factor, + smooth_window_len, + tstop, + target_statistic) opt_results = fmin_cobyla(_obj_func, <<<<<<< HEAD +<<<<<<< HEAD <<<<<<< HEAD cons = cons, rhobeg = 0.1, @@ -713,11 +747,15 @@ def _obj_func(predicted_params): ======= cons=params['constraints'], >>>>>>> 672ce00 (Address comments for more generalized routine) +======= + cons=constraints, +>>>>>>> 50186cb (Clean up optimize evoked and example) rhobeg=0.1, rhoend=1e-4, - x0=params['initial'], + x0=list(initial_params.values()), maxfun=max_iter, catol=0.0) +<<<<<<< HEAD >>>>>>> 2f308f8 (added pep8 formatting) opt_params = opt_results <<<<<<< HEAD @@ -730,8 +768,19 @@ def _obj_func(predicted_params): ======= obj = list() +======= + + # get optimized params + opt_params = opt_results + + # get objective values + obj = [np.min(obj_values[:i]) for i in range(1, max_iter + 1)] + +>>>>>>> 50186cb (Clean up optimize evoked and example) # get optimized net - net_ = set_params(net, params['names'], opt_params) + param_dict = _update_params(initial_params, opt_params) + net_ = set_params(net.copy(), param_dict) + return opt_params, obj, net_ <<<<<<< HEAD >>>>>>> 672ce00 (Address comments for more generalized routine) diff --git a/hnn_core/opt_toy_example/metrics.py b/hnn_core/opt_toy_example/metrics.py index a8e090ec8..44a69e102 100644 --- a/hnn_core/opt_toy_example/metrics.py +++ b/hnn_core/opt_toy_example/metrics.py @@ -1,86 +1,65 @@ -from hnn_core import simulate_dipole import numpy as np +from hnn_core import simulate_dipole, MPIBackend -def _rmse_evoked(net, param_names, set_params, predicted_params, scaling, - target_statistic, f_bands, weights): - """The objective function for evoked responses. +from scipy.signal import resample - Parameters - ----------- - net : Network - param_names : dictionary - Parameters to change. - target_statistic : ndarray - The recorded dipole. - predicted_params : list - Parameters selected by the optimizer. - Returns - ------- - rmse : normalized RMSE between recorded and simulated dipole +def _rmse_evoked(net, initial_params, set_params, predicted_params, + update_params, obj_values, scale_factor, smooth_window_len, + tstop, target_statistic): """ - - # get network with predicted params - new_net = set_params(net, param_names, predicted_params) # ??? - # simulate dipole - dpl = simulate_dipole(new_net, tstop=500, n_trials=1)[0] - - # smooth & scale - dpl.scale(scaling) - dpl.smooth(20) - - # calculate rmse - obj = np.sqrt(((dpl.data['agg'] - target_statistic)**2).sum() / len(dpl.times)) / (max(target_statistic) - min(target_statistic)) - return obj - - -def _rmse_rhythmic(net, param_names, set_params, predicted_params, scaling, - target_statistic, f_bands, weights): - """The objective function for evoked responses. - - Parameters - ----------- - net : Network - param_names : dictionary - Parameters to change. - target_statistic : ndarray - Recorded dipole. - predicted_params : list - Parameters selected by the optimizer. - compute_psd : - ... - - Returns - ------- - rmse : normalized RMSE between recorded and simulated dipole + The objective function for evoked responses. + + Parameters + ---------- + net : Network + The network object. + initial_params : dict + Keys are parameter names, values are initial parameters. + set_params : func + User-defined function that sets network drives and parameters. + predicted_params : list + Parameters selected by the optimizer. + update_params : func + Function to update param_dict. + scale_factor : float + The dipole scale factor. + smooth_window_len : float + The smooth window length. + tstop : float + The simulated dipole's duration. + target_statistic : ndarray + The recorded dipole. + + Returns + ------- + obj : float + Normalized RMSE between recorded and simulated dipole. """ - from scipy.signal import periodogram + param_dict = update_params(initial_params, predicted_params) # simulate dpl with predicted params - new_net = set_params(net, param_names, predicted_params) # ??? - dpl = simulate_dipole(new_net, tstop=300, n_trials=1)[0] + new_net = set_params(net.copy(), param_dict) + with MPIBackend(n_procs=2, mpi_cmd='mpiexec'): + dpl = simulate_dipole(new_net, tstop=tstop, n_trials=1)[0] - # scale - dpl.scale(scaling) + # downsample if necessary + if (len(dpl.data['agg']) < len(target_statistic)): + target_statistic = resample(target_statistic, len(dpl.data['agg'])) + elif (len(dpl.data['agg']) > len(target_statistic)): + dpl.data['agg'] = resample(dpl.data['agg'], len(target_statistic)) - # get psd of simulated dpl - freqs_simulated, psd_simulated = periodogram(dpl.data['agg'], dpl.sfreq, - window='hamming') + # smooth & scale + dpl.scale(scale_factor) + dpl.smooth(smooth_window_len) - # for each f band - f_bands_psds = list() - for f_band_idx, f_band_val in enumerate(f_bands): - f_band_psd_idx = np.where(np.logical_and(freqs_simulated >= f_band_val[0], - freqs_simulated <= f_band_val[1]))[0] - f_bands_psds.append((-weights[f_band_idx] * sum(psd_simulated[f_band_psd_idx])) / sum(psd_simulated)) + # calculate rmse + obj = np.sqrt(((dpl.data['agg'] - target_statistic)**2).sum() / + len(dpl.times)) / (max(target_statistic) - + min(target_statistic)) - # grand sum - obj = sum(f_bands_psds) + obj_values.append(obj) return obj - - -def _rmse_poisson(): - return \ No newline at end of file diff --git a/hnn_core/opt_toy_example/optimize_evoked.py b/hnn_core/opt_toy_example/optimize_evoked.py new file mode 100644 index 000000000..45e662f9d --- /dev/null +++ b/hnn_core/opt_toy_example/optimize_evoked.py @@ -0,0 +1,162 @@ +""" +================================================= +05. Optimize simulated evoked response parameters +================================================= + +This example demonstrates how to optimize the parameters +of the model simulation to match an experimental dipole waveform. +""" + +# Authors: + +import os.path as op + +import matplotlib.pyplot as plt + +############################################################################### +# Let us import hnn_core + +import hnn_core +from hnn_core import (MPIBackend, jones_2009_model, simulate_dipole, + read_dipole) + +hnn_core_root = op.join(op.dirname(hnn_core.__file__)) + +# The number of cores may need modifying depending on your current machine. +n_procs = 2 + +############################################################################### +# First, we will load experimental data into Dipole object. +# +# This is a different experiment than the one to which the base parameters were +# tuned. So, the initial RMSE will be large, giving the optimization procedure +# a lot to work with. + +from urllib.request import urlretrieve + +data_url = ('https://raw.githubusercontent.com/jonescompneurolab/hnn/master/' + 'data/MEG_detection_data/S1_SupraT.txt') +urlretrieve(data_url, 'S1_SupraT.txt') +exp_dpl = read_dipole('S1_SupraT.txt') + +############################################################################### +# Now we start the optimization! +# +# First, we define a function that will tell the optimization routine how to +# modify the network drive parameters. The function will take in the Network +# object with no attached drives, and a dictionary of the paramters we wish to +# optimize. + + +def set_params(net, params_dict): + + # Proximal 1 + weights_ampa_p1 = {'L2_basket': params_dict['evprox1_ampa_L2_basket'], + 'L2_pyramidal': params_dict['evprox1_ampa_L2_pyramidal'], + 'L5_basket': params_dict['evprox1_ampa_L5_basket'], + 'L5_pyramidal': params_dict['evprox1_ampa_L5_pyramidal']} + synaptic_delays_p = {'L2_basket': 0.1, 'L2_pyramidal': 0.1, + 'L5_basket': 1., 'L5_pyramidal': 1.} + + net.add_evoked_drive('evprox1', + mu=params_dict['evprox1_mu'], + sigma=params_dict['evprox1_sigma'], + numspikes=1, + location='proximal', + weights_ampa=weights_ampa_p1, + synaptic_delays=synaptic_delays_p) + + # Distal + weights_ampa_d1 = {'L2_basket': params_dict['evdist1_ampa_L2_basket'], + 'L2_pyramidal': params_dict['evdist1_ampa_L2_pyramidal'], + 'L5_pyramidal': params_dict['evdist1_ampa_L5_pyramidal']} + weights_nmda_d1 = {'L2_basket': 0.019482, 'L2_pyramidal': 0.004317, + 'L5_pyramidal': 0.080074} + synaptic_delays_d1 = {'L2_basket': 0.1, 'L2_pyramidal': 0.1, + 'L5_pyramidal': 0.1} + + net.add_evoked_drive('evdist1', + mu=params_dict['evdist1_mu'], + sigma=params_dict['evdist1_sigma'], + numspikes=1, + location='distal', + weights_ampa=weights_ampa_d1, + weights_nmda=weights_nmda_d1, + synaptic_delays=synaptic_delays_d1) + + # Proximal 2 + weights_ampa_p2 = {'L2_basket': params_dict['evprox2_ampa_L2_basket'], + 'L2_pyramidal': params_dict['evprox2_ampa_L2_pyramidal'], + 'L5_basket': params_dict['evprox2_ampa_L5_basket'], + 'L5_pyramidal': params_dict['evprox2_ampa_L5_pyramidal']} + + net.add_evoked_drive('evprox2', + mu=params_dict['evprox2_mu'], + sigma=params_dict['evprox2_sigma'], + numspikes=1, + location='proximal', + weights_ampa=weights_ampa_p2, + synaptic_delays=synaptic_delays_p) + + return net + +############################################################################### +# Then, we define the constrainst. +# +# The constraints must be a dictionary of tuples where the first value in each +# tuple is the lower bound and the second value is the upper bound for the +# corresponding parameter. + + +constraints = dict() +constraints.update({'evprox1_ampa_L2_basket': (0.01, 1.), + 'evprox1_ampa_L2_pyramidal': (0.01, 1.), + 'evprox1_ampa_L5_basket': (0.01, 1.), + 'evprox1_ampa_L5_pyramidal': (0.01, 1.), + 'evprox1_mu': (5., 50.), + 'evprox1_sigma': (2., 25.), + 'evdist1_ampa_L2_basket': (0.01, 1.), + 'evdist1_ampa_L2_pyramidal': (0.01, 1.), + 'evdist1_ampa_L5_pyramidal': (0.01, 1.), + 'evdist1_mu': (50., 80.), + 'evdist1_sigma': (2., 25.), + 'evprox2_ampa_L2_basket': (0.01, 1.), + 'evprox2_ampa_L2_pyramidal': (0.01, 1.), + 'evprox2_ampa_L5_basket': (0.01, 1.), + 'evprox2_ampa_L5_pyramidal': (0.01, 1.), + 'evprox2_mu': (125., 150.), + 'evprox2_sigma': (10., 60.)}) + + +############################################################################### +# Now we define and fit the optimizer. + +from general import Optimizer + +tstop = exp_dpl.times[-1] +scale_factor = 3000 +smooth_window_len = 2 + +net = jones_2009_model() +optim = Optimizer(net, constraints, set_params, solver='cobyla', + obj_fun='evoked', scale_factor=scale_factor, + smooth_window_len=smooth_window_len, tstop=tstop) +optim.fit(exp_dpl.data['agg']) + +############################################################################### +# Finally, we can plot the experimental data alongside the post-optimization +# simulation dipole as well as the convergence plot. + +with MPIBackend(n_procs=n_procs, mpi_cmd='mpiexec'): + opt_dpl = simulate_dipole(optim.net_, tstop=tstop, n_trials=1)[0] +opt_dpl.scale(scale_factor) +opt_dpl.smooth(smooth_window_len) + +fig, axes = plt.subplots(2, 1, sharex=True, figsize=(6, 6)) + +exp_dpl.plot(ax=axes[0], layer='agg', show=False, color='tab:blue') +opt_dpl.plot(ax=axes[0], layer='agg', show=False, color='tab:green') +axes[0].legend(['experimental', 'optimized']) +optim.net_.cell_response.plot_spikes_hist(ax=axes[1]) + +fig1 = optim.plot_convergence() From 782326467226f7cf3856389f51e28e1aac0e86ad Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Thu, 22 Jun 2023 13:42:40 +0200 Subject: [PATCH 15/63] Address comments for more generalized routine --- hnn_core/opt_toy_example/general.py | 457 ++++++++++++++++++++++------ 1 file changed, 362 insertions(+), 95 deletions(-) diff --git a/hnn_core/opt_toy_example/general.py b/hnn_core/opt_toy_example/general.py index 9be50825a..e47a63080 100644 --- a/hnn_core/opt_toy_example/general.py +++ b/hnn_core/opt_toy_example/general.py @@ -3,6 +3,7 @@ <<<<<<< HEAD <<<<<<< HEAD <<<<<<< HEAD +<<<<<<< HEAD ======= <<<<<<< HEAD @@ -27,19 +28,25 @@ from hnn_core import read_params, jones_2009_model, simulate_dipole ======= +======= + +>>>>>>> 1a7e98b (Address comments for more generalized routine) from hnn_core import simulate_dipole >>>>>>> 2f308f8 (added pep8 formatting) from hnn_core.network import pick_connection + from skopt import gp_minimize -from skopt.utils import use_named_args -from skopt.space import Real from scipy.optimize import fmin_cobyla >>>>>>> df86a5d (Draft opt class and functions based on comments) class Optimizer: +<<<<<<< HEAD def __init__(self, net, constraints, set_params, solver, obj_fun, scale_factor, smooth_window_len, tstop): +======= + def __init__(self, net, constraints, solver, obj_fun): +>>>>>>> 1a7e98b (Address comments for more generalized routine) self.net = net self.constraints = constraints self._set_params = set_params @@ -50,6 +57,7 @@ def __init__(self, net, constraints, set_params, solver, obj_fun, elif solver == 'cobyla': self._assemble_constraints = _assemble_constraints_cobyla self._run_opt = _run_opt_cobyla +<<<<<<< HEAD # Response to be optimized if obj_fun == 'evoked': self.obj_fun = _rmse_evoked @@ -110,21 +118,47 @@ def fit(self, target_statistic): ======= def fit(self, target_statistic=None): >>>>>>> e101c24 (Draft opt class and functions based on comments) +======= + if obj_fun == 'evoked': + self.obj_fun = _rmse_evoked + elif obj_fun == 'rhythmic': + self.obj_fun = _rmse_rhythmic + elif obj_fun == 'poisson': + self.obj_fun = _rmse_poisson + self.net_ = None + self.obj = list() + self.opt_params = None + self.max_iter = 200 + + def __repr__(self): + class_name = self.__class__.__name__ + return '<%s | %s>' % (class_name) + + def fit(self, target_statistic): +>>>>>>> 1a7e98b (Address comments for more generalized routine) """ ... Parameters ---------- +<<<<<<< HEAD target_statistic : ndarray Recorded dipole (must have the same amount of data points as the initial, simulated dipole). scaling_factor : float ... +======= + target_statistic : + dpl + window_len : float + for smoothing dpl +>>>>>>> 1a7e98b (Address comments for more generalized routine) """ <<<<<<< HEAD <<<<<<< HEAD ======= >>>>>>> 2f308f8 (added pep8 formatting) +<<<<<<< HEAD <<<<<<< HEAD init_params, cons = self._get_params(self.net, self.constraints) opt_params, error_values = self._run_opt(self.net, @@ -134,21 +168,127 @@ def fit(self, target_statistic=None): target_statistic, window_len) <<<<<<< HEAD +======= + params = self._get_params(self.net, self.constraints) + opt_params, obj, net_ = self._run_opt(self.net, + params, + self.obj_fun, + target_statistic, + self.max_iter) +>>>>>>> 1a7e98b (Address comments for more generalized routine) self.opt_params = opt_params - self.error_values = error_values - # self.net_ = _set_params(opt_params) + self.obj = obj + self.net_ = net_ return - def plot_convergence(): - return + def plot_convergence(self, ax=None, show=True): + """Convergence plot. + + Parameters + ---------- + ax : instance of matplotlib figure | None + The matplotlib axis + show : bool + If True, show the figure + + Returns + ------- + fig : instance of plt.fig + The matplotlib figure handle. + """ + import matplotlib as mpl + import matplotlib.pyplot as plt + + if ax is None: + fig, ax = plt.subplots(constrained_layout=True) + + axis = ax if isinstance(ax, mpl.axes._axes.Axes) else ax + + x = list(range(1, self.max_iter + 1)) + y_max = max(self.obj) + 0.01 + + axis.plot(x, self.obj, color='black') + axis.set_ylim([-.01, y_max]) + axis.set_title('Convergence') + axis.set_xlabel('Number of calls') + axis.set_ylabel('Objective value') + axis.grid(visible=True) + + fig.show(show) + return axis.get_figure() def plot_param_search(): return -# gets called only once +def _get_params(net, constraints): + """Gets parameters. + + Parameters + ---------- + net : Network + constraints : + {'drive_name': {'param_name': [min, max], + 'param_name': [min, max], + 'param_name': [min, max]}} + + Returns + ------- + params : dictionary + params['initial'] : list + params['constraints'] : list of tuples (min, max) + + params_to_optim : dictionary + {drive_name: ['param_name', 'param_name', 'param_name']} + might use this later to override net params in _set_params + (might have to go back to weights_ampa instead of ampa) + """ + params = dict() + + # Get params to optimize + param_names = dict() + for drive_name in constraints: + temp = list() + for param_name in constraints[drive_name]: + temp.append(param_name) + param_names.update({drive_name: temp}) + params.update({'names': param_names}) + + # Get initial params (bayesian & cobyla can use the same format) + initial_params = list() + cons = list() + + # get net drive names + drive_names = [key for key in net.external_drives.keys()] + + for drive_name in param_names: + # get relevant params + if drive_name in drive_names: + for param_name in param_names[drive_name]: + # instead check obj_fun + if param_name in ('mu', 'sigma', 'tstart', 'burst_rate', + 'burst_std'): + initial_params.append(net.external_drives[drive_name] + ['dynamics'][param_name]) + cons.append(constraints[drive_name][param_name]) + elif param_name in ('ampa', 'nmda'): + conn_idxs = pick_connection(net, src_gids=drive_name) + for conn_idx in conn_idxs: + # L5_pyramidal, L2_basket, L5_basket, L2_pyramidal + target_receptor = net.connectivity[conn_idx]\ + ['receptor'] + if target_receptor == param_name: + initial_params.append(net.connectivity[conn_idx] + ['nc_dict']['A_weight']) + cons.append(constraints[drive_name][param_name]) + + params.update({'initial': initial_params}) + params.update({'constraints': cons}) + return params + + def _get_params_bayesian(net, constraints): - """Assembles constraints & initial parameters as required by gp_minimize. + """Assembles constraints in format required by gp_minimize. <<<<<<< HEAD ======= @@ -294,6 +434,7 @@ def _assemble_constraints_bayesian(constraints): Parameters ---------- +<<<<<<< HEAD constraints : dict The user-defined constraints. @@ -485,66 +626,138 @@ def _get_params_cobyla(net, constraints): """ <<<<<<< HEAD <<<<<<< HEAD +======= + net : Network + The network object. + constraints : list of lists + Constraints for each parameter to be optimized ([min, max]). - init_params = list() - cons = list() + Returns + ------- + params : dictionary + Contains parameter names, initial parameters, and constraints. - for drive_name in net.external_drives.keys(): - for cons_idx, cons_key in enumerate(constraints[drive_name]): - if cons_key == 'mu' or cons_key == 'sigma': - cons.append(lambda x, idx=cons_idx: - constraints[drive_name][cons_key][1] - x[idx]) - cons.append(lambda x, idx=cons_idx: - constraints[drive_name][cons_key][0] - x[idx]) - init_params.append( - net.external_drives[drive_name]['dynamics'][cons_key]) - elif cons_key == 'weights': - conn_idxs = pick_connection(net, src_gids=drive_name) - for conn_idx in conn_idxs: - cons.append(lambda x, idx=cons_idx: - constraints[drive_name][cons_key][1] - x[idx]) - cons.append(lambda x, idx=cons_idx: - constraints[drive_name][cons_key][0] - x[idx]) - init_params.append( - net.connectivity[conn_idx]['nc_dict']['A_weight']) - return init_params, cons + """ + # get initial params + params = _get_params(net, constraints) + + # assemble constraints in solver-specific format + cons_bayesian = list() + for cons in params['constraints']: + cons_bayesian.append((cons[0], cons[1])) + params.update({'constraints': cons_bayesian}) + return params + + +def _get_params_cobyla(net, constraints): + """Assembles constraints in format required by fmin_cobyla. + + Parameters + ---------- + net : Network + The network object. + constraints : list of lists + Constraints for each parameter to be optimized ([min, max]). + + Returns + ------- + params : dictionary + Contains parameter names, initial parameters, and constraints. + """ + # get initial params + params = _get_params(net, constraints) +>>>>>>> 1a7e98b (Address comments for more generalized routine) + + # assemble constraints in solver-specific format + cons_cobyla = list() + for cons_idx, cons_val in enumerate(params['constraints']): + cons_cobyla.append(lambda x: + params['constraints'][cons_idx][1] - x[cons_idx]) + cons_cobyla.append(lambda x: + x[cons_idx] - params['constraints'][cons_idx][0]) + params.update({'constraints': cons_cobyla}) + return params -def _run_opt_bayesian(net, init_params, cons, metric, target_statistic, - window_len): - @use_named_args(dimensions=cons) - def _obj_func(**params): - return metric(net, target_statistic, window_len, **params) +def _run_opt_bayesian(net, params, obj_fun, target_statistic, max_iter): + """Uses gp_minimize optimizer. + + Parameters + ---------- + net : Network + params : dictionary + Contains parameter names, initial parameters, and constraints. + obj_fun : func + The objective function. + target_statistic : Dipole + The target statistic. + max_iter : int + Max number of calls. + + Returns + ------- + opt_params : list + Final parameters. + obj : list + Objective values. + net_ : Network + Optimized network object. + """ + def _obj_func(predicted_params): + return obj_fun(net, + params['names'], + target_statistic, + predicted_params) - max_iter = 11 opt_results = gp_minimize(func=_obj_func, - dimensions=cons, + dimensions=params['constraints'], acq_func='EI', n_calls=max_iter, - x0=init_params, + x0=params['initial'], random_state=64) opt_params = opt_results.x - # get net_ - # ... - error_values = [np.min(opt_results.func_vals[:i]) - for i in range(1, max_iter + 1)] - return opt_params, error_values + obj = [np.min(opt_results.func_vals[:i]) for i in range(1, max_iter + 1)] + # get optimized net + net_ = _set_params(net, params['names'], opt_params) + return opt_params, obj, net_ -def _run_opt_cobyla(net, init_params, cons, metric, target_statistic, - window_len): +def _run_opt_cobyla(net, params, obj_fun, target_statistic, max_iter): + """Uses fmin_cobyla optimizer. + + Parameters + ---------- + net : Network + params : dictionary + Contains parameter names, initial parameters, and constraints. + obj_fun : func + The objective function. + target_statistic : Dipole + The target statistic. + max_iter : int + Max number of calls. - def _obj_func(params_cobyla): - return metric(net=net, target_statistic=target_statistic, - window_len=window_len, params_cobyla=params_cobyla) + Returns + ------- + opt_params : list + Final parameters. + obj : list + Objective values. + net_ : Network + Optimized network object. + """ + def _obj_func(predicted_params): + return obj_fun(net, + params['names'], + target_statistic, + predicted_params) - max_iter = 11 opt_results = fmin_cobyla(_obj_func, - cons=cons, + cons=params['constraints'], rhobeg=0.1, rhoend=1e-4, - x0=init_params, + x0=params['initial'], maxfun=max_iter, catol=0.0) ======= @@ -758,6 +971,7 @@ def _obj_func(predicted_params): <<<<<<< HEAD >>>>>>> 2f308f8 (added pep8 formatting) opt_params = opt_results +<<<<<<< HEAD <<<<<<< HEAD # get net_ # ... @@ -796,10 +1010,15 @@ def _get_fixed_params(net): >>>>>>> df86a5d (Draft opt class and functions based on comments) ======= +======= + obj = list() + # get optimized net + net_ = _set_params(net, params['names'], opt_params) + return opt_params, obj, net_ +>>>>>>> 1a7e98b (Address comments for more generalized routine) -def _get_fixed_params(net): - """Gets fixed params. +<<<<<<< HEAD >>>>>>> 2f308f8 (added pep8 formatting) Returns ------- @@ -978,10 +1197,14 @@ def _set_params(net, param_names, predicted_params): def _set_params(net, fixed_params, predicted_params): +======= +def _set_params(net, param_names, predicted_params): +>>>>>>> 1a7e98b (Address comments for more generalized routine) """Sets the network parameters. Parameters ---------- +<<<<<<< HEAD net : the Network object fixed_params : unchanging network parameters predicted_params : the parameters predicted by the optimizer @@ -989,6 +1212,13 @@ def _set_params(net, fixed_params, predicted_params): >>>>>>> df86a5d (Draft opt class and functions based on comments) ======= +======= + net : Network + param_names : dictionary + Parameters to change. + predicted_params : list + The parameters selected by the optimizer. +>>>>>>> 1a7e98b (Address comments for more generalized routine) >>>>>>> 2f308f8 (added pep8 formatting) Returns @@ -999,57 +1229,45 @@ def _set_params(net, fixed_params, predicted_params): <<<<<<< HEAD net_new = net.copy() - # remove existing drives, re-set them with updated parameters - for drive_name in net.external_drives.keys(): - # clear drive - del net_new.external_drives[drive_name] - # clear connectivity - conn_idxs = pick_connection(net_new, src_gids=drive_name) - net_new.connectivity = [conn for conn_idx, conn - in enumerate(net_new.connectivity) - if conn_idx not in conn_idxs] - net_new.add_evoked_drive(name=drive_name, - mu=predicted_params[drive_name]['mu'], - sigma=predicted_params[drive_name]['sigma'], - numspikes=fixed_params[drive_name] - ['numspikes'], - location=fixed_params[drive_name]['location'], - n_drive_cells=fixed_params[drive_name] - ['n_drive_cells'], - cell_specific=fixed_params[drive_name] - ['cell_specific'], - weights_ampa=predicted_params[drive_name] - ['ampa_weights'], - weights_nmda=predicted_params[drive_name] - ['nmda_weights'], - space_constant=fixed_params[drive_name] - ['space_constant'], - synaptic_delays=fixed_params[drive_name] - ['synaptic_delays'], - probability=fixed_params[drive_name] - ['probability'], - event_seed=fixed_params[drive_name] - ['event_seed'], - conn_seed=fixed_params[drive_name] - ['conn_seed']) - return net_new + # get net drive names + count = 0 + drive_names = [key for key in net_new.external_drives.keys()] + for drive_name in param_names: -def _get_opt_net(opt_params, fixed_params): - """Returns the optimized network net_.""" - return + # set relevant params + if drive_name in drive_names: + for param_name in param_names[drive_name]: + if param_name in ('mu', 'sigma'): + net_new.external_drives[drive_name]['dynamics']\ + [param_name] = predicted_params[count] + count += 1 + + elif param_name in ('ampa', 'nmda'): + conn_idxs = pick_connection(net_new, src_gids=drive_name) + for conn_idx in conn_idxs: + target_receptor = net_new.connectivity[conn_idx]\ + ['receptor'] + if target_receptor == param_name: + net_new.connectivity[conn_idx]['nc_dict']\ + ['A_weight'] = predicted_params[count] + count += 1 + return net_new # These 2 functions will go in a file called metrics.py -def _rmse_evoked(net, target_statistic, window_len, **params): - # params_cobyla = None +def _rmse_evoked(net, param_names, target_statistic, predicted_params): """The objective function for evoked responses. Parameters ----------- - net : the Network object - target_dpl : the recorded dipole - params : the constraints + net : Network + param_names : dictionary + Parameters to change. + target_statistic : Dipole + The recorded dipole. + predicted_params : list + Parameters selected by the optimizer. Returns ------- @@ -1111,6 +1329,7 @@ def _rmse_evoked(net, param_names, target_statistic, predicted_params): >>>>>>> 2f308f8 (added pep8 formatting) """ +<<<<<<< HEAD <<<<<<< HEAD fixed_params = _get_fixed_params(net) # get predicted params @@ -1148,6 +1367,20 @@ def _rmse_evoked(net, param_names, target_statistic, predicted_params): <<<<<<< HEAD <<<<<<< HEAD dpl.scale(scaling_factor) +======= + # get network with predicted params + new_net = _set_params(net, param_names, predicted_params) + # simulate dipole + dpl = simulate_dipole(new_net, tstop=100, n_trials=1)[0] + + # smooth & scale + # if dpl.smooth(window_len): + # dpl_smooth = dpl.copy().smooth(window_len) + dpl.smooth(30) + # scaling_factor = get from target_statistic + # dpl.scale(scaling_factor) + +>>>>>>> 1a7e98b (Address comments for more generalized routine) # calculate error rmse = np.sqrt(((dpl.data['agg'] - target_statistic.data['agg'])**2).sum() / len(dpl.times)) / (max(target_statistic.data['agg']) @@ -1155,7 +1388,41 @@ def _rmse_evoked(net, param_names, target_statistic, predicted_params): return rmse -def _rmse_rhythmic(): +def _rmse_rhythmic(net, param_names, target_statistic, predicted_params): + """The objective function for evoked responses. + + Parameters + ----------- + net : Network + param_names : dictionary + Parameters to change. + target_statistic : Dipole + The recorded dipole. + predicted_params : list + Parameters selected by the optimizer. + + Returns + ------- + rmse : norm + """ + + from scipy import signal + + # expose these + fmin = 0.0 + fmax = 200.0 + + new_net = _set_params(net, param_names, predicted_params) + dpl = simulate_dipole(new_net, tstop=100, n_trials=1)[0] + + f_target, psd_target = signal.periodogram(target_statistic.data['agg']) + f_simulated, psd_simulated = signal.periodogram(dpl.data['agg']) + + rmse = np.linalg.norm(psd_target - psd_simulated) + return rmse + + +def _rmse_poisson(): return ======= ======= From 83ac65f0cd77822d0e45b43d5de738c2e3c01842 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez <51917715+carolinafernandezp@users.noreply.github.com> Date: Thu, 22 Jun 2023 09:39:01 -0400 Subject: [PATCH 16/63] Update hnn_core/opt_toy_example/general.py Co-authored-by: Nicholas Tolley <55253912+ntolley@users.noreply.github.com> --- hnn_core/opt_toy_example/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hnn_core/opt_toy_example/general.py b/hnn_core/opt_toy_example/general.py index e47a63080..e5ac3eb05 100644 --- a/hnn_core/opt_toy_example/general.py +++ b/hnn_core/opt_toy_example/general.py @@ -192,7 +192,7 @@ def plot_convergence(self, ax=None, show=True): If True, show the figure Returns - ------- + ------- fig : instance of plt.fig The matplotlib figure handle. """ From 3f4d44b71ccd0ce91203a39ee8054557ec91ce0b Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Thu, 27 Jul 2023 06:53:14 -0400 Subject: [PATCH 17/63] Delete file --- .../opt_toy_example/remove_aperiodic_funcs.py | 139 ------------------ 1 file changed, 139 deletions(-) delete mode 100644 hnn_core/opt_toy_example/remove_aperiodic_funcs.py diff --git a/hnn_core/opt_toy_example/remove_aperiodic_funcs.py b/hnn_core/opt_toy_example/remove_aperiodic_funcs.py deleted file mode 100644 index b38c10b9d..000000000 --- a/hnn_core/opt_toy_example/remove_aperiodic_funcs.py +++ /dev/null @@ -1,139 +0,0 @@ -def _compute_welch_psd(target_statistic, sfreq): - """ Computes the PSD usig scipy.signal.welch. - - Parameters - ---------- - target_statistic : ndarray - The target dipole. psd_array_multitaper takes input signal in the - time-domain. - sfreq : - ... - - Returns - ------- - freqs : ndarray - The frequency points in Hz. - psd : ndarray - Power spectral density. - """ - - from scipy.signal import welch - - freqs, psd = welch(target_statistic, fs=sfreq) - - return freqs, psd - - -def _compute_multitaper_psd(target_statistic, sfreq): - """ Computes the PSD usig mne.time_frequency.psd_array_multitaper. - - Parameters - ---------- - target_statistic : ndarray - The target dipole. psd_array_multitaper takes input signal in the - time-domain. - sfreq: - ... - - Returns - ------- - freqs : array - The frequency points in Hz. - psd : ndarray - Power spectral density. - """ - - from mne.time_frequency import psd_array_multitaper - - psd, freqs = psd_array_multitaper(target_statistic, sfreq=sfreq) - - return freqs, psd - - -def _remove_aperiodic_foof(target_statistic, compute_psd, sfreq): - """Gets periodic and aperiodic parameters. Calculates the aperiodic - PSD from the Lorentzian function. Subtracts the aperiodic PSD from the - neural power spectra (PSD of the target statistic). - - Relies on the Fitting Oscillations and one-over-f (FOOOF) method - (Donoghue et al 2020) to model the PSD of EEG/MEG signals as a combination - of a periodic component (1/f) and at least one periodic component - (oscillations). - - Parameters - ---------- - target_statistic : ndarray - The target dipole. - compute_psd : - ... - sfreq : - ... - - Returns - ------- - clean_psd : ndarray - Neural power spectra minus PSD of the aperiodic approximation. - - """ - - from fooof import FOOOF - import numpy as np - - # get PSD of target statistic - freqs, psd = compute_psd(target_statistic, sfreq) - - # fit FOOOF model - fm = FOOOF(aperiodic_mode='knee') - fm.fit(freqs, psd) - - # get aperiodic parameters - offset, knee, exponent = fm.aperiodic_params_[0],\ - fm.aperiodic_params_[1],\ - fm.aperiodic_params_[2] - - # compute aperiodic component - aperiodic_component = offset - np.log10(knee + (psd**exponent)) - - clean_psd = psd - aperiodic_component - - return clean_psd - - -def _remove_aperiodic_irasa(target_statistic, compute_psd, sfreq): - """Gets periodic and aperiodic PSD components. Subtracts the aperiodic - PSD from the neural power spectra (PSD of the target statistic). - - Relies on the Irregular-Resampling Auto-Spectral Analysis (IRASA) method - (Wen & Liu 2016) to separate the aperiodic (1/f) and periodic components - (oscillations) in the PSD of EEG signals. - - - Parameters - ---------- - target_statistic : ndarray - The target dipole. - compute_psd : - ... - sfreq : - ... - - Returns - ------- - clean_psd : ndarray - Neural power spectra minus PSD of the aperiodic approximation. - - """ - - import yasa - - # get PSD of target statistic - freqs, psd = compute_psd(target_statistic, sfreq) - - # get aperiodic component - f, aperiodic_component, periodic_component = yasa.irasa(data=target_statistic, - sf=sfreq, - return_fit=False) - - clean_psd = psd - aperiodic_component - - return clean_psd From 28915fab43a6b4d3e757032bfe12cd730c839f6b Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Thu, 3 Aug 2023 01:45:56 -0400 Subject: [PATCH 18/63] Add tests and address comments --- hnn_core/opt_toy_example/general.py | 58 ++++++++++---- hnn_core/opt_toy_example/metrics.py | 35 +++++---- hnn_core/opt_toy_example/optimize_evoked.py | 17 ++-- .../opt_toy_example/test_optimize_evoked.py | 77 +++++++++++++++++++ 4 files changed, 150 insertions(+), 37 deletions(-) create mode 100644 hnn_core/opt_toy_example/test_optimize_evoked.py diff --git a/hnn_core/opt_toy_example/general.py b/hnn_core/opt_toy_example/general.py index e5ac3eb05..5da79c3e4 100644 --- a/hnn_core/opt_toy_example/general.py +++ b/hnn_core/opt_toy_example/general.py @@ -1,3 +1,10 @@ +"""Parameter optimization.""" + +# Authors: Carolina Fernandez +# Nick Tolley +# Ryan Thorpe +# Mainak Jas + import numpy as np <<<<<<< HEAD <<<<<<< HEAD @@ -16,8 +23,12 @@ from metrics import _rmse_evoked, _rmse_rhythmic, _rmse_poisson ======= +<<<<<<< HEAD from metrics import _rmse_evoked >>>>>>> 50186cb (Clean up optimize evoked and example) +======= +from metrics import _rmse_evoked # change path*** +>>>>>>> 46f1268 (Add tests and address comments) from skopt import gp_minimize from scipy.optimize import fmin_cobyla @@ -43,10 +54,14 @@ class Optimizer: <<<<<<< HEAD def __init__(self, net, constraints, set_params, solver, obj_fun, +<<<<<<< HEAD scale_factor, smooth_window_len, tstop): ======= def __init__(self, net, constraints, solver, obj_fun): >>>>>>> 1a7e98b (Address comments for more generalized routine) +======= + tstop, scale_factor=1., smooth_window_len=None): +>>>>>>> 46f1268 (Add tests and address comments) self.net = net self.constraints = constraints self._set_params = set_params @@ -106,6 +121,7 @@ def __repr__(self): <<<<<<< HEAD <<<<<<< HEAD <<<<<<< HEAD +<<<<<<< HEAD <<<<<<< HEAD def fit(self, target_statistic): >>>>>>> 672ce00 (Address comments for more generalized routine) @@ -305,12 +321,15 @@ def _get_params_bayesian(net, constraints): self.constraints) ======= def fit(self, target_statistic): +======= + def fit(self, target): +>>>>>>> 46f1268 (Add tests and address comments) """ Runs optimization routine. Parameters ---------- - target_statistic : ndarray + target : ndarray The recorded dipole. Returns @@ -355,7 +374,7 @@ def fit(self, target_statistic): self.tstop, >>>>>>> 50186cb (Clean up optimize evoked and example) self.max_iter, - target_statistic) + target) >>>>>>> e101c24 (Draft opt class and functions based on comments) self.opt_params = opt_params @@ -376,7 +395,7 @@ def plot_convergence(self, ax=None, show=True): Returns ------- - instance of plt.fig + fig : instance of plt.fig The matplotlib figure handle. """ @@ -421,9 +440,8 @@ def _get_initial_params(constraints): initial_params = dict() for cons_key in constraints: - initial_params.update({cons_key: - (constraints[cons_key][0] + - constraints[cons_key][1])/2}) + initial_params.update({cons_key: (constraints[cons_key][0] + + constraints[cons_key][1])/2}) return initial_params @@ -461,14 +479,16 @@ def _assemble_constraints_cobyla(constraints): Returns ------- cons_bayesian : dict - ... + Set of functions. """ # assemble constraints in solver-specific format cons_cobyla = list() for idx, cons_key in enumerate(constraints): - cons_cobyla.append(lambda x: float(constraints[cons_key][1]) - x[idx]) - cons_cobyla.append(lambda x: x[idx] - float(constraints[cons_key][0])) + cons_cobyla.append(lambda x, idx=idx: + float(constraints[cons_key][1]) - x[idx]) + cons_cobyla.append(lambda x, idx=idx: + x[idx] - float(constraints[cons_key][0])) return cons_cobyla @@ -794,7 +814,7 @@ def _obj_func(predicted_params): def _run_opt_bayesian(net, constraints, initial_params, set_params, obj_fun, scale_factor, smooth_window_len, tstop, max_iter, - target_statistic): + target): """ Runs optimization routine with gp_minimize optimizer. @@ -818,7 +838,7 @@ def _run_opt_bayesian(net, constraints, initial_params, set_params, obj_fun, The simulated dipole's duration. max_iter : int Number of calls the optimizer makes. - target_statistic : ndarray + target : ndarray The recorded dipole. Returns @@ -869,8 +889,12 @@ def _obj_func(predicted_params): scale_factor, smooth_window_len, tstop, +<<<<<<< HEAD target_statistic) >>>>>>> 50186cb (Clean up optimize evoked and example) +======= + target) +>>>>>>> 46f1268 (Add tests and address comments) opt_results = gp_minimize(func=_obj_func, dimensions=constraints, @@ -886,14 +910,15 @@ def _obj_func(predicted_params): # get optimized net param_dict = _update_params(initial_params, opt_params) - net_ = set_params(net.copy(), param_dict) + net_ = net.copy() + set_params(net_, param_dict) return opt_params, obj, net_ def _run_opt_cobyla(net, constraints, initial_params, set_params, obj_fun, scale_factor, smooth_window_len, tstop, max_iter, - target_statistic): + target): """ Runs optimization routine with fmin_cobyla optimizer. @@ -917,7 +942,7 @@ def _run_opt_cobyla(net, constraints, initial_params, set_params, obj_fun, The simulated dipole's duration. max_iter : int Number of calls the optimizer makes. - target_statistic : ndarray, None + target : ndarray, None The recorded dipole. The default is None. Returns @@ -942,7 +967,7 @@ def _obj_func(predicted_params): scale_factor, smooth_window_len, tstop, - target_statistic) + target) opt_results = fmin_cobyla(_obj_func, <<<<<<< HEAD @@ -993,7 +1018,8 @@ def _obj_func(predicted_params): >>>>>>> 50186cb (Clean up optimize evoked and example) # get optimized net param_dict = _update_params(initial_params, opt_params) - net_ = set_params(net.copy(), param_dict) + net_ = net.copy() + set_params(net_, param_dict) return opt_params, obj, net_ <<<<<<< HEAD diff --git a/hnn_core/opt_toy_example/metrics.py b/hnn_core/opt_toy_example/metrics.py index 44a69e102..e895b4382 100644 --- a/hnn_core/opt_toy_example/metrics.py +++ b/hnn_core/opt_toy_example/metrics.py @@ -1,3 +1,10 @@ +"""Metrics for parameter optimization.""" + +# Authors: Carolina Fernandez +# Nick Tolley +# Ryan Thorpe +# Mainak Jas + import numpy as np from hnn_core import simulate_dipole, MPIBackend @@ -7,7 +14,7 @@ def _rmse_evoked(net, initial_params, set_params, predicted_params, update_params, obj_values, scale_factor, smooth_window_len, - tstop, target_statistic): + tstop, target): """ The objective function for evoked responses. @@ -29,7 +36,7 @@ def _rmse_evoked(net, initial_params, set_params, predicted_params, The smooth window length. tstop : float The simulated dipole's duration. - target_statistic : ndarray + target : ndarray The recorded dipole. Returns @@ -41,24 +48,26 @@ def _rmse_evoked(net, initial_params, set_params, predicted_params, param_dict = update_params(initial_params, predicted_params) # simulate dpl with predicted params - new_net = set_params(net.copy(), param_dict) + new_net = net.copy() + set_params(new_net, param_dict) with MPIBackend(n_procs=2, mpi_cmd='mpiexec'): dpl = simulate_dipole(new_net, tstop=tstop, n_trials=1)[0] - # downsample if necessary - if (len(dpl.data['agg']) < len(target_statistic)): - target_statistic = resample(target_statistic, len(dpl.data['agg'])) - elif (len(dpl.data['agg']) > len(target_statistic)): - dpl.data['agg'] = resample(dpl.data['agg'], len(target_statistic)) - # smooth & scale dpl.scale(scale_factor) - dpl.smooth(smooth_window_len) + if smooth_window_len is not None: + dpl.smooth(smooth_window_len) + + # downsample if necessary + if (len(dpl.data['agg']) < len(target)): + target = resample(target, len(dpl.data['agg'])) + elif (len(dpl.data['agg']) > len(target)): + dpl.data['agg'] = resample(dpl.data['agg'], len(target)) # calculate rmse - obj = np.sqrt(((dpl.data['agg'] - target_statistic)**2).sum() / - len(dpl.times)) / (max(target_statistic) - - min(target_statistic)) + obj = np.sqrt(((dpl.data['agg'] - target)**2).sum() / + len(dpl.times)) / (max(target) - + min(target)) obj_values.append(obj) diff --git a/hnn_core/opt_toy_example/optimize_evoked.py b/hnn_core/opt_toy_example/optimize_evoked.py index 45e662f9d..a20b9745c 100644 --- a/hnn_core/opt_toy_example/optimize_evoked.py +++ b/hnn_core/opt_toy_example/optimize_evoked.py @@ -7,7 +7,9 @@ of the model simulation to match an experimental dipole waveform. """ -# Authors: +# Authors: Blake Caldwell +# Mainak Jas +# Carolina Fernandez import os.path as op @@ -98,8 +100,6 @@ def set_params(net, params_dict): weights_ampa=weights_ampa_p2, synaptic_delays=synaptic_delays_p) - return net - ############################################################################### # Then, we define the constrainst. # @@ -131,16 +131,17 @@ def set_params(net, params_dict): ############################################################################### # Now we define and fit the optimizer. -from general import Optimizer +from general import Optimizer # change path*** tstop = exp_dpl.times[-1] scale_factor = 3000 -smooth_window_len = 2 +smooth_window_len = 30 net = jones_2009_model() -optim = Optimizer(net, constraints, set_params, solver='cobyla', - obj_fun='evoked', scale_factor=scale_factor, - smooth_window_len=smooth_window_len, tstop=tstop) +optim = Optimizer(net, constraints=constraints, set_params=set_params, + solver='bayesian', obj_fun='evoked', tstop=tstop, + scale_factor=scale_factor, + smooth_window_len=smooth_window_len) optim.fit(exp_dpl.data['agg']) ############################################################################### diff --git a/hnn_core/opt_toy_example/test_optimize_evoked.py b/hnn_core/opt_toy_example/test_optimize_evoked.py new file mode 100644 index 000000000..d5bc33d38 --- /dev/null +++ b/hnn_core/opt_toy_example/test_optimize_evoked.py @@ -0,0 +1,77 @@ +# Authors: Mainak Jas +# Carolina Fernandez + +from hnn_core import jones_2009_model, simulate_dipole +from general import Optimizer # change path*** + + +def _optimize_evoked(solver): + """Test running the full routine in a reduced network.""" + + tstop = 5. + n_trials = 1 + + # simulate a dipole to establish ground-truth drive parameters + net_orig = jones_2009_model() + mu_orig = 6. + weights_ampa = {'L2_basket': 0.5, + 'L2_pyramidal': 0.5, + 'L5_basket': 0.5, + 'L5_pyramidal': 0.5} + synaptic_delays = {'L2_basket': 0.1, 'L2_pyramidal': 0.1, + 'L5_basket': 1., 'L5_pyramidal': 1.} + net_orig.add_evoked_drive('evprox', + mu=mu_orig, + sigma=1, + numspikes=1, + location='proximal', + weights_ampa=weights_ampa, + synaptic_delays=synaptic_delays) + dpl_orig = simulate_dipole(net_orig, tstop=tstop, n_trials=n_trials)[0] + + # define set_params function and constraints + net_offset = jones_2009_model() + + def set_params(net_offset, param_dict): + weights_ampa = {'L2_basket': 0.5, + 'L2_pyramidal': 0.5, + 'L5_basket': 0.5, + 'L5_pyramidal': 0.5} + synaptic_delays = {'L2_basket': 0.1, 'L2_pyramidal': 0.1, + 'L5_basket': 1., 'L5_pyramidal': 1.} + net_offset.add_evoked_drive('evprox', + mu=param_dict['mu_offset'], + sigma=1, + numspikes=1, + location='proximal', + weights_ampa=weights_ampa, + synaptic_delays=synaptic_delays) + + # define constraints + mu_offset = 4. # initial time-shifted drive + mu_range = (2, 8) + constraints = dict() + constraints.update({'mu_offset': mu_range}) + + optim = Optimizer(net_offset, constraints=constraints, + set_params=set_params, solver=solver, + obj_fun='evoked', tstop=tstop) + optim.fit(dpl_orig.data['agg']) + + opt_param = optim.opt_params + # the optimized parameter is in the range + assert opt_param[0] in range(mu_range[0], mu_range[1]), "Optimized parameter is not in user-defined range" + + obj = optim.obj + # the number of returned rmse values should be the same as max_iter + assert len(obj) <= 200, "Number of rmse values should be the same as max_iter" + # the returned rmse values should be positive + assert all(vals > 0 for vals in obj), "rmse values should be positive" + + +def test_bayesian_evoked(): + _optimize_evoked('bayesian') + + +def test_cobyla_evoked(): + _optimize_evoked('cobyla') From 64977afabceca62d95690386a19082910ba32c84 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Mon, 7 Aug 2023 14:13:44 -0400 Subject: [PATCH 19/63] Address comments and fix test script --- hnn_core/opt_toy_example/general.py | 18 ++++++++++++++++-- hnn_core/opt_toy_example/optimize_evoked.py | 5 +++-- .../opt_toy_example/test_optimize_evoked.py | 18 ++++++++++-------- 3 files changed, 29 insertions(+), 12 deletions(-) diff --git a/hnn_core/opt_toy_example/general.py b/hnn_core/opt_toy_example/general.py index 5da79c3e4..86425e947 100644 --- a/hnn_core/opt_toy_example/general.py +++ b/hnn_core/opt_toy_example/general.py @@ -61,7 +61,14 @@ def __init__(self, net, constraints, solver, obj_fun): >>>>>>> 1a7e98b (Address comments for more generalized routine) ======= tstop, scale_factor=1., smooth_window_len=None): +<<<<<<< HEAD >>>>>>> 46f1268 (Add tests and address comments) +======= + if net.external_drives: + raise ValueError("The current Network instance has external " + + "drives, provide a Network object with no " + + "drives.") +>>>>>>> 51112fc (Address comments and fix test script) self.net = net self.constraints = constraints self._set_params = set_params @@ -73,9 +80,16 @@ def __init__(self, net, constraints, solver, obj_fun): self._assemble_constraints = _assemble_constraints_cobyla self._run_opt = _run_opt_cobyla <<<<<<< HEAD +<<<<<<< HEAD +======= + else: + raise ValueError("solver must be 'bayesian' or 'cobyla'") +>>>>>>> 51112fc (Address comments and fix test script) # Response to be optimized if obj_fun == 'evoked': self.obj_fun = _rmse_evoked + else: + raise ValueError("obj_fun must be 'evoked'") self.scale_factor = scale_factor self.smooth_window_len = smooth_window_len self.tstop = tstop @@ -440,8 +454,8 @@ def _get_initial_params(constraints): initial_params = dict() for cons_key in constraints: - initial_params.update({cons_key: (constraints[cons_key][0] + - constraints[cons_key][1])/2}) + initial_params.update({cons_key: ((constraints[cons_key][0] + + constraints[cons_key][1]))/2}) return initial_params diff --git a/hnn_core/opt_toy_example/optimize_evoked.py b/hnn_core/opt_toy_example/optimize_evoked.py index a20b9745c..7e4a90bf1 100644 --- a/hnn_core/opt_toy_example/optimize_evoked.py +++ b/hnn_core/opt_toy_example/optimize_evoked.py @@ -7,9 +7,10 @@ of the model simulation to match an experimental dipole waveform. """ -# Authors: Blake Caldwell +# Authors: Carolina Fernandez +# Nick Tolley +# Ryan Thorpe # Mainak Jas -# Carolina Fernandez import os.path as op diff --git a/hnn_core/opt_toy_example/test_optimize_evoked.py b/hnn_core/opt_toy_example/test_optimize_evoked.py index d5bc33d38..2b3128ea5 100644 --- a/hnn_core/opt_toy_example/test_optimize_evoked.py +++ b/hnn_core/opt_toy_example/test_optimize_evoked.py @@ -1,5 +1,4 @@ -# Authors: Mainak Jas -# Carolina Fernandez +# Authors: Carolina Fernandez from hnn_core import jones_2009_model, simulate_dipole from general import Optimizer # change path*** @@ -8,12 +7,14 @@ def _optimize_evoked(solver): """Test running the full routine in a reduced network.""" - tstop = 5. + tstop = 10. n_trials = 1 # simulate a dipole to establish ground-truth drive parameters net_orig = jones_2009_model() - mu_orig = 6. + net_orig._N_pyr_x = 3 + net_orig._N_pyr_y = 3 + mu_orig = 2. weights_ampa = {'L2_basket': 0.5, 'L2_pyramidal': 0.5, 'L5_basket': 0.5, @@ -31,6 +32,8 @@ def _optimize_evoked(solver): # define set_params function and constraints net_offset = jones_2009_model() + net_offset._N_pyr_x = 3 + net_offset._N_pyr_y = 3 def set_params(net_offset, param_dict): weights_ampa = {'L2_basket': 0.5, @@ -48,8 +51,7 @@ def set_params(net_offset, param_dict): synaptic_delays=synaptic_delays) # define constraints - mu_offset = 4. # initial time-shifted drive - mu_range = (2, 8) + mu_range = (1, 6) constraints = dict() constraints.update({'mu_offset': mu_range}) @@ -58,9 +60,9 @@ def set_params(net_offset, param_dict): obj_fun='evoked', tstop=tstop) optim.fit(dpl_orig.data['agg']) - opt_param = optim.opt_params + opt_param = optim.opt_params[0] # the optimized parameter is in the range - assert opt_param[0] in range(mu_range[0], mu_range[1]), "Optimized parameter is not in user-defined range" + assert mu_range[0] <= opt_param <= mu_range[1], "Optimized parameter is not in user-defined range" obj = optim.obj # the number of returned rmse values should be the same as max_iter From af81c718caa676a3239bf1833ee0f73d975d857b Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Tue, 8 Aug 2023 15:42:17 -0400 Subject: [PATCH 20/63] fix test script --- hnn_core/opt_toy_example/general.py | 1178 +-------------------------- 1 file changed, 1 insertion(+), 1177 deletions(-) diff --git a/hnn_core/opt_toy_example/general.py b/hnn_core/opt_toy_example/general.py index 86425e947..5503a7aa6 100644 --- a/hnn_core/opt_toy_example/general.py +++ b/hnn_core/opt_toy_example/general.py @@ -6,69 +6,20 @@ # Mainak Jas import numpy as np -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -======= - -<<<<<<< HEAD ->>>>>>> 672ce00 (Address comments for more generalized routine) -from hnn_core import simulate_dipole -======= ->>>>>>> 18ac228 (added methods to remove 1/f and compute psd) -from hnn_core.network import pick_connection - -from metrics import _rmse_evoked, _rmse_rhythmic, _rmse_poisson -======= - -<<<<<<< HEAD -from metrics import _rmse_evoked ->>>>>>> 50186cb (Clean up optimize evoked and example) -======= -from metrics import _rmse_evoked # change path*** ->>>>>>> 46f1268 (Add tests and address comments) - -from skopt import gp_minimize -from scipy.optimize import fmin_cobyla - -======= -import matplotlib.pyplot as plt -from collections import namedtuple -from hnn_core import read_params, jones_2009_model, simulate_dipole -======= -======= - ->>>>>>> 1a7e98b (Address comments for more generalized routine) -from hnn_core import simulate_dipole ->>>>>>> 2f308f8 (added pep8 formatting) -from hnn_core.network import pick_connection +from metrics import _rmse_evoked # change path*** from skopt import gp_minimize from scipy.optimize import fmin_cobyla ->>>>>>> df86a5d (Draft opt class and functions based on comments) class Optimizer: -<<<<<<< HEAD def __init__(self, net, constraints, set_params, solver, obj_fun, -<<<<<<< HEAD - scale_factor, smooth_window_len, tstop): -======= - def __init__(self, net, constraints, solver, obj_fun): ->>>>>>> 1a7e98b (Address comments for more generalized routine) -======= tstop, scale_factor=1., smooth_window_len=None): -<<<<<<< HEAD ->>>>>>> 46f1268 (Add tests and address comments) -======= if net.external_drives: raise ValueError("The current Network instance has external " + "drives, provide a Network object with no " + "drives.") ->>>>>>> 51112fc (Address comments and fix test script) self.net = net self.constraints = constraints self._set_params = set_params @@ -79,12 +30,8 @@ def __init__(self, net, constraints, solver, obj_fun): elif solver == 'cobyla': self._assemble_constraints = _assemble_constraints_cobyla self._run_opt = _run_opt_cobyla -<<<<<<< HEAD -<<<<<<< HEAD -======= else: raise ValueError("solver must be 'bayesian' or 'cobyla'") ->>>>>>> 51112fc (Address comments and fix test script) # Response to be optimized if obj_fun == 'evoked': self.obj_fun = _rmse_evoked @@ -96,248 +43,13 @@ def __init__(self, net, constraints, solver, obj_fun): self.net_ = None self.obj = list() self.opt_params = None -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD - - def fit(self, target_statistic, window_len=None): -======= - - def fit(self, target_statistic, window_len = None): ->>>>>>> df86a5d (Draft opt class and functions based on comments) -======= - - def fit(self, target_statistic, window_len=None): ->>>>>>> 2f308f8 (added pep8 formatting) -======= - self.max_iter = 200 -======= - self.max_iter = 150 ->>>>>>> a4f67f6 (add optimize rhythmic function) -======= self.max_iter = 200 ->>>>>>> f3490e1 (added pep8 formatting) -======= - self.max_iter = 150 ->>>>>>> 9131299 (Address comments for more generalized routine) -======= - self.max_iter = 200 ->>>>>>> 50186cb (Clean up optimize evoked and example) def __repr__(self): class_name = self.__class__.__name__ return class_name -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD - def fit(self, target_statistic): ->>>>>>> 672ce00 (Address comments for more generalized routine) -======= - def fit(self, target_statistic, sfreq): ->>>>>>> 18ac228 (added methods to remove 1/f and compute psd) -======= - def fit(self, target_statistic): ->>>>>>> a4f67f6 (add optimize rhythmic function) -======= - def fit(self, target_statistic=None): ->>>>>>> e101c24 (Draft opt class and functions based on comments) -======= - if obj_fun == 'evoked': - self.obj_fun = _rmse_evoked - elif obj_fun == 'rhythmic': - self.obj_fun = _rmse_rhythmic - elif obj_fun == 'poisson': - self.obj_fun = _rmse_poisson - self.net_ = None - self.obj = list() - self.opt_params = None - self.max_iter = 200 - - def __repr__(self): - class_name = self.__class__.__name__ - return '<%s | %s>' % (class_name) - - def fit(self, target_statistic): ->>>>>>> 1a7e98b (Address comments for more generalized routine) - """ ... - - Parameters - ---------- -<<<<<<< HEAD - target_statistic : ndarray - Recorded dipole (must have the same amount of data points as - the initial, simulated dipole). - scaling_factor : float - ... -======= - target_statistic : - dpl - window_len : float - for smoothing dpl ->>>>>>> 1a7e98b (Address comments for more generalized routine) - """ -<<<<<<< HEAD -<<<<<<< HEAD -======= ->>>>>>> 2f308f8 (added pep8 formatting) - -<<<<<<< HEAD -<<<<<<< HEAD - init_params, cons = self._get_params(self.net, self.constraints) - opt_params, error_values = self._run_opt(self.net, - init_params, - cons, - self.metric, - target_statistic, - window_len) -<<<<<<< HEAD -======= - params = self._get_params(self.net, self.constraints) - opt_params, obj, net_ = self._run_opt(self.net, - params, - self.obj_fun, - target_statistic, - self.max_iter) ->>>>>>> 1a7e98b (Address comments for more generalized routine) - self.opt_params = opt_params - self.obj = obj - self.net_ = net_ - return - - def plot_convergence(self, ax=None, show=True): - """Convergence plot. - - Parameters - ---------- - ax : instance of matplotlib figure | None - The matplotlib axis - show : bool - If True, show the figure - - Returns - ------- - fig : instance of plt.fig - The matplotlib figure handle. - """ - import matplotlib as mpl - import matplotlib.pyplot as plt - - if ax is None: - fig, ax = plt.subplots(constrained_layout=True) - - axis = ax if isinstance(ax, mpl.axes._axes.Axes) else ax - - x = list(range(1, self.max_iter + 1)) - y_max = max(self.obj) + 0.01 - - axis.plot(x, self.obj, color='black') - axis.set_ylim([-.01, y_max]) - axis.set_title('Convergence') - axis.set_xlabel('Number of calls') - axis.set_ylabel('Objective value') - axis.grid(visible=True) - - fig.show(show) - return axis.get_figure() - - def plot_param_search(): - return - - -def _get_params(net, constraints): - """Gets parameters. - - Parameters - ---------- - net : Network - constraints : - {'drive_name': {'param_name': [min, max], - 'param_name': [min, max], - 'param_name': [min, max]}} - - Returns - ------- - params : dictionary - params['initial'] : list - params['constraints'] : list of tuples (min, max) - - params_to_optim : dictionary - {drive_name: ['param_name', 'param_name', 'param_name']} - might use this later to override net params in _set_params - (might have to go back to weights_ampa instead of ampa) - """ - params = dict() - - # Get params to optimize - param_names = dict() - for drive_name in constraints: - temp = list() - for param_name in constraints[drive_name]: - temp.append(param_name) - param_names.update({drive_name: temp}) - params.update({'names': param_names}) - - # Get initial params (bayesian & cobyla can use the same format) - initial_params = list() - cons = list() - - # get net drive names - drive_names = [key for key in net.external_drives.keys()] - - for drive_name in param_names: - # get relevant params - if drive_name in drive_names: - for param_name in param_names[drive_name]: - # instead check obj_fun - if param_name in ('mu', 'sigma', 'tstart', 'burst_rate', - 'burst_std'): - initial_params.append(net.external_drives[drive_name] - ['dynamics'][param_name]) - cons.append(constraints[drive_name][param_name]) - elif param_name in ('ampa', 'nmda'): - conn_idxs = pick_connection(net, src_gids=drive_name) - for conn_idx in conn_idxs: - # L5_pyramidal, L2_basket, L5_basket, L2_pyramidal - target_receptor = net.connectivity[conn_idx]\ - ['receptor'] - if target_receptor == param_name: - initial_params.append(net.connectivity[conn_idx] - ['nc_dict']['A_weight']) - cons.append(constraints[drive_name][param_name]) - - params.update({'initial': initial_params}) - params.update({'constraints': cons}) - return params - - -def _get_params_bayesian(net, constraints): - """Assembles constraints in format required by gp_minimize. - -<<<<<<< HEAD -======= - - init_params, cons = self._get_params(self.net, self.constraints) - opt_params, error_values = self._run_opt(self.net, init_params, cons, self.metric, target_statistic, window_len) - -======= ->>>>>>> 2f308f8 (added pep8 formatting) -======= - params = self._get_params(self.net, self.constraints) -======= - params = self.__assemble_constraints(self._set_params, - self.constraints) -======= - def fit(self, target_statistic): -======= def fit(self, target): ->>>>>>> 46f1268 (Add tests and address comments) """ Runs optimization routine. @@ -354,43 +66,18 @@ def fit(self, target): constraints = self._assemble_constraints(self.constraints) initial_params = _get_initial_params(self.constraints) ->>>>>>> 50186cb (Clean up optimize evoked and example) ->>>>>>> e101c24 (Draft opt class and functions based on comments) opt_params, obj, net_ = self._run_opt(self.net, constraints, initial_params, self._set_params, self.obj_fun, -<<<<<<< HEAD -<<<<<<< HEAD - target_statistic, -<<<<<<< HEAD - self.max_iter) ->>>>>>> 672ce00 (Address comments for more generalized routine) -======= - self.max_iter, -<<<<<<< HEAD - self.obj_fun_type, - self._remove_aperiodic, - self._compute_psd, - sfreq) ->>>>>>> 18ac228 (added methods to remove 1/f and compute psd) -======= - self.f_bands, - self.weights) ->>>>>>> a4f67f6 (add optimize rhythmic function) -======= - self.scaling, -======= self.scale_factor, self.smooth_window_len, self.tstop, ->>>>>>> 50186cb (Clean up optimize evoked and example) self.max_iter, target) ->>>>>>> e101c24 (Draft opt class and functions based on comments) self.opt_params = opt_params self.obj = obj self.net_ = net_ @@ -466,7 +153,6 @@ def _assemble_constraints_bayesian(constraints): Parameters ---------- -<<<<<<< HEAD constraints : dict The user-defined constraints. @@ -507,311 +193,16 @@ def _assemble_constraints_cobyla(constraints): return cons_cobyla -<<<<<<< HEAD -<<<<<<< HEAD -def _get_params_bayesian(net, constraints): -<<<<<<< HEAD -<<<<<<< HEAD - """Assembles constraints & initial parameters as required by gp_minimize. - ->>>>>>> df86a5d (Draft opt class and functions based on comments) -======= - """Assembles constraints & initial parameters as required by gp_minimize. -======= -======= -def _assemble_constraints_bayesian(set_params, constraints): ->>>>>>> e101c24 (Draft opt class and functions based on comments) - """Assembles constraints in format required by gp_minimize. ->>>>>>> 672ce00 (Address comments for more generalized routine) -======= def _update_params(initial_params, predicted_params): """ Update param_dict with predicted parameters. ->>>>>>> 50186cb (Clean up optimize evoked and example) - ->>>>>>> 2f308f8 (added pep8 formatting) - Parameters - ---------- -<<<<<<< HEAD -<<<<<<< HEAD - net : the Network object - constraints : the user-defined constraints -<<<<<<< HEAD -<<<<<<< HEAD - - Returns - ------- - init_params : list - [drive_name_mu, - drive_name_sigma, - drive_name_ampa_target, - drive_name_nmda_target] -======= - - Returns - ------- - init_params : list - [{drive_name}_mu, {drive_name}_sigma, {drive_name}_ampa_target, {drive_name}_nmda_target] ->>>>>>> df86a5d (Draft opt class and functions based on comments) -======= - - Returns - ------- - init_params : list - [drive_name_mu, - drive_name_sigma, - drive_name_ampa_target, - drive_name_nmda_target] ->>>>>>> 2f308f8 (added pep8 formatting) - cons : list - [Real(name='{drive_name}_mu', low=x, high=x), - Real(name='{drive_name}_sigma', low=x, high=x), - Real(name='{drive_name}_ampa_target', low=x, high=x)] - Real(name='{drive_name}_nmda_target', low=x0.01, high=x)] - """ -<<<<<<< HEAD -<<<<<<< HEAD - - init_params = list() - cons = list() - for drive_name in net.external_drives.keys(): - for cons_key in constraints[drive_name]: - if cons_key == 'mu': - cons.append(Real(name=f'{drive_name}_{cons_key}', - low=constraints[drive_name][cons_key][0], - high=constraints[drive_name][cons_key][1])) - init_params.append( - net.external_drives[drive_name]['dynamics']['mu']) - elif cons_key == 'sigma': - cons.append(Real(name=f'{drive_name}_{cons_key}', - low=constraints[drive_name][cons_key][0], - high=constraints[drive_name][cons_key][1])) - init_params.append( - net.external_drives[drive_name]['dynamics']['sigma']) - elif cons_key == 'weights': - conn_idxs = pick_connection(net, src_gids=drive_name) - for conn_idx in conn_idxs: - target_type = net.connectivity[conn_idx]['target_type'] - target_receptor = net.connectivity[conn_idx]['receptor'] - cons.append( - Real(name=f'{drive_name}_{target_receptor}_{target_type}', - low=constraints[drive_name][cons_key][0], - high=constraints[drive_name][cons_key][1])) - init_params.append( - net.connectivity[conn_idx]['nc_dict']['A_weight']) - return init_params, cons - - -# gets called only once -def _get_params_cobyla(net, constraints): - """Assembles constraints & initial parameters as required by fmin_cobyla. - - Returns -======= - -======= - ->>>>>>> 2f308f8 (added pep8 formatting) - init_params = list() - cons = list() - for drive_name in net.external_drives.keys(): - for cons_key in constraints[drive_name]: - if cons_key == 'mu': - cons.append(Real(name=f'{drive_name}_{cons_key}', - low=constraints[drive_name][cons_key][0], - high=constraints[drive_name][cons_key][1])) - init_params.append( - net.external_drives[drive_name]['dynamics']['mu']) - elif cons_key == 'sigma': - cons.append(Real(name=f'{drive_name}_{cons_key}', - low=constraints[drive_name][cons_key][0], - high=constraints[drive_name][cons_key][1])) - init_params.append( - net.external_drives[drive_name]['dynamics']['sigma']) - elif cons_key == 'weights': - conn_idxs = pick_connection(net, src_gids=drive_name) - for conn_idx in conn_idxs: - target_type = net.connectivity[conn_idx]['target_type'] - target_receptor = net.connectivity[conn_idx]['receptor'] - cons.append( - Real(name=f'{drive_name}_{target_receptor}_{target_type}', - low=constraints[drive_name][cons_key][0], - high=constraints[drive_name][cons_key][1])) - init_params.append( - net.connectivity[conn_idx]['nc_dict']['A_weight']) - return init_params, cons - - -# gets called only once -def _get_params_cobyla(net, constraints): -<<<<<<< HEAD - """Assembles constraints & initial parameters as required by fmin_cobyla. - - Returns ->>>>>>> df86a5d (Draft opt class and functions based on comments) -======= - """Assembles constraints & initial parameters as required by fmin_cobyla. - - Returns ->>>>>>> 2f308f8 (added pep8 formatting) - ------- - init_params : list - cons : list - """ -<<<<<<< HEAD -<<<<<<< HEAD -======= - net : Network - The network object. - constraints : list of lists - Constraints for each parameter to be optimized ([min, max]). - - Returns - ------- - params : dictionary - Contains parameter names, initial parameters, and constraints. - - """ - # get initial params - params = _get_params(net, constraints) - - # assemble constraints in solver-specific format - cons_bayesian = list() - for cons in params['constraints']: - cons_bayesian.append((cons[0], cons[1])) - params.update({'constraints': cons_bayesian}) - return params - - -def _get_params_cobyla(net, constraints): - """Assembles constraints in format required by fmin_cobyla. Parameters ---------- - net : Network - The network object. - constraints : list of lists - Constraints for each parameter to be optimized ([min, max]). - - Returns - ------- - params : dictionary - Contains parameter names, initial parameters, and constraints. - - """ - # get initial params - params = _get_params(net, constraints) ->>>>>>> 1a7e98b (Address comments for more generalized routine) - - # assemble constraints in solver-specific format - cons_cobyla = list() - for cons_idx, cons_val in enumerate(params['constraints']): - cons_cobyla.append(lambda x: - params['constraints'][cons_idx][1] - x[cons_idx]) - cons_cobyla.append(lambda x: - x[cons_idx] - params['constraints'][cons_idx][0]) - params.update({'constraints': cons_cobyla}) - return params - - -def _run_opt_bayesian(net, params, obj_fun, target_statistic, max_iter): - """Uses gp_minimize optimizer. - - Parameters - ---------- - net : Network - params : dictionary - Contains parameter names, initial parameters, and constraints. - obj_fun : func - The objective function. - target_statistic : Dipole - The target statistic. - max_iter : int - Max number of calls. - - Returns - ------- - opt_params : list - Final parameters. - obj : list - Objective values. - net_ : Network - Optimized network object. - """ - def _obj_func(predicted_params): - return obj_fun(net, - params['names'], - target_statistic, - predicted_params) - - opt_results = gp_minimize(func=_obj_func, - dimensions=params['constraints'], - acq_func='EI', - n_calls=max_iter, - x0=params['initial'], - random_state=64) - opt_params = opt_results.x - obj = [np.min(opt_results.func_vals[:i]) for i in range(1, max_iter + 1)] - # get optimized net - net_ = _set_params(net, params['names'], opt_params) - return opt_params, obj, net_ - - -def _run_opt_cobyla(net, params, obj_fun, target_statistic, max_iter): - """Uses fmin_cobyla optimizer. - - Parameters - ---------- - net : Network - params : dictionary - Contains parameter names, initial parameters, and constraints. - obj_fun : func - The objective function. - target_statistic : Dipole - The target statistic. - max_iter : int - Max number of calls. - - Returns - ------- - opt_params : list - Final parameters. - obj : list - Objective values. - net_ : Network - Optimized network object. - """ - def _obj_func(predicted_params): - return obj_fun(net, - params['names'], - target_statistic, - predicted_params) - - opt_results = fmin_cobyla(_obj_func, - cons=params['constraints'], - rhobeg=0.1, - rhoend=1e-4, - x0=params['initial'], - maxfun=max_iter, - catol=0.0) -======= - -======= - ->>>>>>> 2f308f8 (added pep8 formatting) - init_params = list() - cons = list() -======= - net : Network - The network object. - constraints : list of lists - Constraints for each parameter to be optimized ([min, max]). -======= initial_params : dict Keys are parameter names, values are initial parameters. predicted_params : list Parameters selected by the optimizer. ->>>>>>> 50186cb (Clean up optimize evoked and example) Returns ------- @@ -869,32 +260,6 @@ def _run_opt_bayesian(net, constraints, initial_params, set_params, obj_fun, def _obj_func(predicted_params): return obj_fun(net, -<<<<<<< HEAD - params['names'], -<<<<<<< HEAD - target_statistic, -<<<<<<< HEAD - predicted_params) ->>>>>>> 672ce00 (Address comments for more generalized routine) -======= - predicted_params, -<<<<<<< HEAD - compute_psd) ->>>>>>> 18ac228 (added methods to remove 1/f and compute psd) -======= - f_bands, - weights, - _set_params) ->>>>>>> a4f67f6 (add optimize rhythmic function) -======= - set_params, - predicted_params, - scaling, - target_statistic, - f_bands, - weights) ->>>>>>> e101c24 (Draft opt class and functions based on comments) -======= initial_params, set_params, predicted_params, @@ -903,12 +268,7 @@ def _obj_func(predicted_params): scale_factor, smooth_window_len, tstop, -<<<<<<< HEAD - target_statistic) ->>>>>>> 50186cb (Clean up optimize evoked and example) -======= target) ->>>>>>> 46f1268 (Add tests and address comments) opt_results = gp_minimize(func=_obj_func, dimensions=constraints, @@ -984,44 +344,12 @@ def _obj_func(predicted_params): target) opt_results = fmin_cobyla(_obj_func, -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD - cons = cons, - rhobeg = 0.1, - rhoend = 1e-4, - x0 = init_params, - maxfun = max_iter, - catol = 0.0) ->>>>>>> df86a5d (Draft opt class and functions based on comments) -======= - cons=cons, -======= - cons=params['constraints'], ->>>>>>> 672ce00 (Address comments for more generalized routine) -======= cons=constraints, ->>>>>>> 50186cb (Clean up optimize evoked and example) rhobeg=0.1, rhoend=1e-4, x0=list(initial_params.values()), maxfun=max_iter, catol=0.0) -<<<<<<< HEAD ->>>>>>> 2f308f8 (added pep8 formatting) - opt_params = opt_results -<<<<<<< HEAD -<<<<<<< HEAD - # get net_ - # ... - error_values = list() - return opt_params, error_values -<<<<<<< HEAD -<<<<<<< HEAD - -======= - obj = list() -======= # get optimized params opt_params = opt_results @@ -1029,513 +357,9 @@ def _obj_func(predicted_params): # get objective values obj = [np.min(obj_values[:i]) for i in range(1, max_iter + 1)] ->>>>>>> 50186cb (Clean up optimize evoked and example) # get optimized net param_dict = _update_params(initial_params, opt_params) net_ = net.copy() set_params(net_, param_dict) return opt_params, obj, net_ -<<<<<<< HEAD ->>>>>>> 672ce00 (Address comments for more generalized routine) - -<<<<<<< HEAD - -<<<<<<< HEAD -======= - -def _get_fixed_params(net): - """Gets fixed params (we need this function bc we have to remove and reset each drive). - ->>>>>>> df86a5d (Draft opt class and functions based on comments) -======= - -======= - obj = list() - # get optimized net - net_ = _set_params(net, params['names'], opt_params) - return opt_params, obj, net_ ->>>>>>> 1a7e98b (Address comments for more generalized routine) - - -<<<<<<< HEAD ->>>>>>> 2f308f8 (added pep8 formatting) - Returns - ------- - {'drive_name': {'numspikes': x, - 'location': 'proximal', - 'n_drive_cells': 'n_cells', - 'cell_specific': True, - 'space_constant': {'L2_pyramidal': x, - 'L2_basket': x, - 'L5_basket': x, - 'L5_pyramidal': x}, - 'synaptic_delays': {'L5_basket': x, - 'L5_pyramidal': x, - 'L2_basket': x, - 'L2_pyramidal': x}, - 'probability': {'L5_basket': x, - 'L5_pyramidal': x, - 'L2_basket': x, - 'L2_pyramidal': x}, - 'event_seed': x, - 'conn_seed': x}} - """ -<<<<<<< HEAD -<<<<<<< HEAD - - fixed_params = dict() - for drive_name in net.external_drives.keys(): - drive = net.external_drives[drive_name] - conn_idxs = pick_connection(net, src_gids=drive_name) -======= - - fixed_params = dict() - for drive_name in net.external_drives.keys(): - drive = net.external_drives[drive_name] - conn_idxs = pick_connection(net, src_gids = drive_name) ->>>>>>> df86a5d (Draft opt class and functions based on comments) -======= - - fixed_params = dict() - for drive_name in net.external_drives.keys(): - drive = net.external_drives[drive_name] - conn_idxs = pick_connection(net, src_gids=drive_name) ->>>>>>> 2f308f8 (added pep8 formatting) - delays = dict() - probabilities = dict() - conn_fixed_params = dict() - for conn_idx in conn_idxs: - target_type = net.connectivity[conn_idx]['target_type'] - delay = net.connectivity[conn_idx]['nc_dict']['A_delay'] - space_constant = net.connectivity[conn_idx]['nc_dict']['lamtha'] - probability = net.connectivity[conn_idx]['probability'] - delays.update({f'{target_type}': delay}) -<<<<<<< HEAD -<<<<<<< HEAD - probabilities.update({f'{target_type}': probability}) -======= - probabilities.update({f'{target_type}': probability}) ->>>>>>> df86a5d (Draft opt class and functions based on comments) -======= - probabilities.update({f'{target_type}': probability}) ->>>>>>> 2f308f8 (added pep8 formatting) - conn_fixed_params.update({'numspikes': drive['dynamics']['numspikes']}) - conn_fixed_params.update({'location': drive['location']}) - if drive['cell_specific']: - conn_fixed_params.update({'n_drive_cells': 'n_cells'}) - else: - conn_fixed_params.update({'n_drive_cells': drive['n_drive_cells']}) - conn_fixed_params.update({'cell_specific': drive['cell_specific']}) - conn_fixed_params.update({'space_constant': space_constant}) - conn_fixed_params.update({'synaptic_delays': delays}) - conn_fixed_params.update({'probability': probabilities}) - conn_fixed_params.update({'event_seed': drive['event_seed']}) - conn_fixed_params.update({'conn_seed': drive['conn_seed']}) - fixed_params.update({drive_name: conn_fixed_params}) - return fixed_params - -<<<<<<< HEAD -<<<<<<< HEAD - -def _get_predicted_params(net, **params): - """Assembles the parameters to be passed to the simulation. - -======= -def _get_predicted_params(net, **params): - """Assembles the parameters to be passed to the simulation. - ->>>>>>> df86a5d (Draft opt class and functions based on comments) -======= - -def _get_predicted_params(net, **params): - """Assembles the parameters to be passed to the simulation. - ->>>>>>> 2f308f8 (added pep8 formatting) - Returns - ------- - predicted_params : a dictionary () - {'drive_name': {'mu': x, - 'sigma': x, - 'ampa_weights': {'L5_basket': x, - 'L2_basket': x, - 'L5_pyramidal': x, - 'L2_pyramidal': x}, - 'nmda_weights': {'L5_basket': x, - 'L2_basket': x, - 'L5_pyramidal': x, - 'L2_pyramidal': x} - } - """ -<<<<<<< HEAD -<<<<<<< HEAD - -======= - ->>>>>>> df86a5d (Draft opt class and functions based on comments) -======= - ->>>>>>> 2f308f8 (added pep8 formatting) - predicted_params = dict() - for drive_name in net.external_drives.keys(): - drive_predicted_params = dict() - drive_predicted_params.update({'mu': params[f'{drive_name}_mu']}) - drive_predicted_params.update({'sigma': params[f'{drive_name}_sigma']}) - ampa_weights = dict() - nmda_weights = dict() -<<<<<<< HEAD -<<<<<<< HEAD - conn_idxs = pick_connection(net, src_gids=drive_name) - for conn_idx in conn_idxs: - target_type = net.connectivity[conn_idx]['target_type'] - target_receptor = net.connectivity[conn_idx]['receptor'] - if target_receptor == 'ampa': - ampa_weights.update( - {target_type: params[f'{drive_name}_ampa_{target_type}']}) - elif target_receptor == 'nmda': - nmda_weights.update( - {target_type: params[f'{drive_name}_nmda_{target_type}']}) - drive_predicted_params.update({'ampa_weights': ampa_weights}) - drive_predicted_params.update({'nmda_weights': nmda_weights}) - predicted_params.update({drive_name: drive_predicted_params}) - return predicted_params - - -def _set_params(net, fixed_params, predicted_params): -======= -def _set_params(net, param_names, predicted_params): ->>>>>>> 672ce00 (Address comments for more generalized routine) - """Sets the network parameters. - - Parameters - ---------- - net : Network - param_names : dictionary - Parameters to change. - predicted_params : list - The parameters selected by the optimizer. - -======= - conn_idxs = pick_connection(net, src_gids = drive_name) - for conn_idx in conn_idxs: -======= - conn_idxs = pick_connection(net, src_gids=drive_name) - for conn_idx in conn_idxs: ->>>>>>> 2f308f8 (added pep8 formatting) - target_type = net.connectivity[conn_idx]['target_type'] - target_receptor = net.connectivity[conn_idx]['receptor'] - if target_receptor == 'ampa': - ampa_weights.update( - {target_type: params[f'{drive_name}_ampa_{target_type}']}) - elif target_receptor == 'nmda': - nmda_weights.update( - {target_type: params[f'{drive_name}_nmda_{target_type}']}) - drive_predicted_params.update({'ampa_weights': ampa_weights}) - drive_predicted_params.update({'nmda_weights': nmda_weights}) - predicted_params.update({drive_name: drive_predicted_params}) - return predicted_params - - -def _set_params(net, fixed_params, predicted_params): -======= -def _set_params(net, param_names, predicted_params): ->>>>>>> 1a7e98b (Address comments for more generalized routine) - """Sets the network parameters. - - Parameters - ---------- -<<<<<<< HEAD - net : the Network object - fixed_params : unchanging network parameters - predicted_params : the parameters predicted by the optimizer -<<<<<<< HEAD - ->>>>>>> df86a5d (Draft opt class and functions based on comments) -======= -======= - net : Network - param_names : dictionary - Parameters to change. - predicted_params : list - The parameters selected by the optimizer. ->>>>>>> 1a7e98b (Address comments for more generalized routine) - ->>>>>>> 2f308f8 (added pep8 formatting) - Returns - ------- - net : Network - """ -<<<<<<< HEAD -<<<<<<< HEAD - - net_new = net.copy() - - # get net drive names - count = 0 - drive_names = [key for key in net_new.external_drives.keys()] - for drive_name in param_names: - - # set relevant params - if drive_name in drive_names: - for param_name in param_names[drive_name]: - if param_name in ('mu', 'sigma'): - net_new.external_drives[drive_name]['dynamics']\ - [param_name] = predicted_params[count] - count += 1 - - elif param_name in ('ampa', 'nmda'): - conn_idxs = pick_connection(net_new, src_gids=drive_name) - for conn_idx in conn_idxs: - target_receptor = net_new.connectivity[conn_idx]\ - ['receptor'] - if target_receptor == param_name: - net_new.connectivity[conn_idx]['nc_dict']\ - ['A_weight'] = predicted_params[count] - count += 1 - return net_new - - -# These 2 functions will go in a file called metrics.py -def _rmse_evoked(net, param_names, target_statistic, predicted_params): - """The objective function for evoked responses. - - Parameters - ----------- - net : Network - param_names : dictionary - Parameters to change. - target_statistic : Dipole - The recorded dipole. - predicted_params : list - Parameters selected by the optimizer. - - Returns - ------- - rmse : normalized RMSE between recorded and simulated dipole -======= - -======= - ->>>>>>> 2f308f8 (added pep8 formatting) - net_new = net.copy() - - # get net drive names - count = 0 - drive_names = [key for key in net_new.external_drives.keys()] - for drive_name in param_names: - - # set relevant params - if drive_name in drive_names: - for param_name in param_names[drive_name]: - if param_name in ('mu', 'sigma', 'tstart', 'burst_rate', - 'burst_std'): - net_new.external_drives[drive_name]['dynamics']\ - [param_name] = predicted_params[count] - count += 1 - elif param_name in ('ampa', 'nmda'): - conn_idxs = pick_connection(net_new, src_gids=drive_name) - for conn_idx in conn_idxs: - target_receptor = net_new.connectivity[conn_idx]\ - ['receptor'] - if target_receptor == param_name: - net_new.connectivity[conn_idx]['nc_dict']\ - ['A_weight'] = predicted_params[count] - count += 1 - return net_new -<<<<<<< HEAD - - -# These 2 functions will go in a file called metrics.py -def _rmse_evoked(net, param_names, target_statistic, predicted_params): - """The objective function for evoked responses. - - Parameters - ----------- - net : Network - param_names : dictionary - Parameters to change. - target_statistic : Dipole - The recorded dipole. - predicted_params : list - Parameters selected by the optimizer. - - Returns - ------- -<<<<<<< HEAD - rmse : normalized root mean squared error between recorded and simulated dipole ->>>>>>> df86a5d (Draft opt class and functions based on comments) -======= - rmse : normalized RMSE between recorded and simulated dipole ->>>>>>> 2f308f8 (added pep8 formatting) - """ - -<<<<<<< HEAD -<<<<<<< HEAD - fixed_params = _get_fixed_params(net) - # get predicted params -<<<<<<< HEAD -<<<<<<< HEAD -======= ->>>>>>> 2f308f8 (added pep8 formatting) - # gp_minimize & fmin_cobyla return params in different formats - # (bc we are using gp_minimize w/ named args), - # we will probably need 2 different _get_predicted_params functions - predicted_params = _get_predicted_params(net, **params) - # get network with predicted params -<<<<<<< HEAD - new_net = _set_params(net, fixed_params, predicted_params) - # simulate dipole - dpl = simulate_dipole(new_net, tstop=100, n_trials=1)[0] -======= - # gp_minimize & fmin_cobyla return predicted parameters that are formatted differently (bc we are using gp_minimize w/ named args), - # we will probably need 2 different _get_predicted_params functions - predicted_params = _get_predicted_params(net, **params) - # get network with predicted params - new_net = _set_params(net, fixed_params, predicted_params) - # simulate dipole - dpl = simulate_dipole(new_net, tstop = 100, n_trials = 1)[0] ->>>>>>> df86a5d (Draft opt class and functions based on comments) -======= - new_net = _set_params(net, fixed_params, predicted_params) - # simulate dipole - dpl = simulate_dipole(new_net, tstop=100, n_trials=1)[0] ->>>>>>> 2f308f8 (added pep8 formatting) - # smooth & scale - if window_len: - dpl.smooth(window_len) - scaling_factor = target_statistic.scale -<<<<<<< HEAD -<<<<<<< HEAD - dpl.scale(scaling_factor) -======= - # get network with predicted params - new_net = _set_params(net, param_names, predicted_params) - # simulate dipole - dpl = simulate_dipole(new_net, tstop=100, n_trials=1)[0] - - # smooth & scale - # if dpl.smooth(window_len): - # dpl_smooth = dpl.copy().smooth(window_len) - dpl.smooth(30) - # scaling_factor = get from target_statistic - # dpl.scale(scaling_factor) - ->>>>>>> 1a7e98b (Address comments for more generalized routine) - # calculate error - rmse = np.sqrt(((dpl.data['agg'] - target_statistic.data['agg'])**2).sum() - / len(dpl.times)) / (max(target_statistic.data['agg']) - - min(target_statistic.data['agg'])) - return rmse - - -def _rmse_rhythmic(net, param_names, target_statistic, predicted_params): - """The objective function for evoked responses. - - Parameters - ----------- - net : Network - param_names : dictionary - Parameters to change. - target_statistic : Dipole - The recorded dipole. - predicted_params : list - Parameters selected by the optimizer. - - Returns - ------- - rmse : norm - """ - - from scipy import signal - - # expose these - fmin = 0.0 - fmax = 200.0 - - new_net = _set_params(net, param_names, predicted_params) - dpl = simulate_dipole(new_net, tstop=100, n_trials=1)[0] - - f_target, psd_target = signal.periodogram(target_statistic.data['agg']) - f_simulated, psd_simulated = signal.periodogram(dpl.data['agg']) - - rmse = np.linalg.norm(psd_target - psd_simulated) - return rmse - - -def _rmse_poisson(): - return -======= -======= - dpl.scale(scaling_factor) ->>>>>>> 2f308f8 (added pep8 formatting) -======= - # get network with predicted params - new_net = _set_params(net, param_names, predicted_params) - # simulate dipole - dpl = simulate_dipole(new_net, tstop=100, n_trials=1)[0] - - # smooth & scale - # if dpl.smooth(window_len): - # dpl_smooth = dpl.copy().smooth(window_len) - dpl.smooth(30) - # scaling_factor = get from target_statistic - # dpl.scale(scaling_factor) - ->>>>>>> 672ce00 (Address comments for more generalized routine) - # calculate error - rmse = np.sqrt(((dpl.data['agg'] - target_statistic.data['agg'])**2).sum() - / len(dpl.times)) / (max(target_statistic.data['agg']) - - min(target_statistic.data['agg'])) - return rmse - - -<<<<<<< HEAD -<<<<<<< HEAD ->>>>>>> df86a5d (Draft opt class and functions based on comments) -======= -def _rmse_rhythmic(): -======= -def _rmse_rhythmic(net, param_names, target_statistic, predicted_params): - """The objective function for evoked responses. - - Parameters - ----------- - net : Network - param_names : dictionary - Parameters to change. - target_statistic : Dipole - The recorded dipole. - predicted_params : list - Parameters selected by the optimizer. - - Returns - ------- - rmse : norm - """ - - from scipy import signal - - # expose these - fmin = 0.0 - fmax = 200.0 - - new_net = _set_params(net, param_names, predicted_params) - dpl = simulate_dipole(new_net, tstop=100, n_trials=1)[0] - - f_target, psd_target = signal.periodogram(target_statistic.data['agg']) - f_simulated, psd_simulated = signal.periodogram(dpl.data['agg']) - - rmse = np.linalg.norm(psd_target - psd_simulated) - return rmse - - -def _rmse_poisson(): ->>>>>>> 672ce00 (Address comments for more generalized routine) - return ->>>>>>> 2f308f8 (added pep8 formatting) -======= ->>>>>>> 18ac228 (added methods to remove 1/f and compute psd) -======= ->>>>>>> e101c24 (Draft opt class and functions based on comments) -======= ->>>>>>> 9131299 (Address comments for more generalized routine) From fe1a3fe22c60050e1607df9119966dff3f9ae13b Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Thu, 10 Aug 2023 01:38:28 -0400 Subject: [PATCH 21/63] put files in right path, address comments --- examples/howto/optimize_evoked.py | 194 +++++++++++------- hnn_core/__init__.py | 1 + hnn_core/opt_toy_example/optimize_evoked.py | 164 --------------- hnn_core/optimization/__init__.py | 1 + .../metrics.py | 16 +- .../general.py => optimization/optimizer.py} | 114 +++++----- .../test_optimize_evoked.py | 20 +- 7 files changed, 211 insertions(+), 299 deletions(-) delete mode 100644 hnn_core/opt_toy_example/optimize_evoked.py create mode 100644 hnn_core/optimization/__init__.py rename hnn_core/{opt_toy_example => optimization}/metrics.py (81%) rename hnn_core/{opt_toy_example/general.py => optimization/optimizer.py} (79%) rename hnn_core/{opt_toy_example => tests}/test_optimize_evoked.py (81%) diff --git a/examples/howto/optimize_evoked.py b/examples/howto/optimize_evoked.py index 04c7de45f..c2f0a3ab1 100644 --- a/examples/howto/optimize_evoked.py +++ b/examples/howto/optimize_evoked.py @@ -7,26 +7,26 @@ of the model simulation to match an experimental dipole waveform. """ -# Authors: Blake Caldwell +# Authors: Carolina Fernandez +# Nick Tolley +# Ryan Thorpe # Mainak Jas import os.path as op -import numpy as np import matplotlib.pyplot as plt ############################################################################### # Let us import hnn_core import hnn_core -from hnn_core import (MPIBackend, jones_2009_model, read_params, - simulate_dipole, read_dipole) - +from hnn_core import (MPIBackend, jones_2009_model, simulate_dipole, + read_dipole) hnn_core_root = op.join(op.dirname(hnn_core.__file__)) # The number of cores may need modifying depending on your current machine. -n_procs = 10 +n_procs = 2 ############################################################################### # First, we will load experimental data into Dipole object. @@ -34,6 +34,7 @@ # This is a different experiment than the one to which the base parameters were # tuned. So, the initial RMSE will be large, giving the optimization procedure # a lot to work with. + from urllib.request import urlretrieve data_url = ('https://raw.githubusercontent.com/jonescompneurolab/hnn/master/' @@ -41,83 +42,134 @@ urlretrieve(data_url, 'S1_SupraT.txt') exp_dpl = read_dipole('S1_SupraT.txt') -############################################################################### -# Read the base parameters from a file -params_fname = op.join(hnn_core_root, 'param', 'default.json') -params = read_params(params_fname) - -############################################################################### -# Let's first simulate the dipole with some initial parameters. The parameter -# definitions also contain the drives. Even though we could add drives -# explicitly through our API -# (see :ref:`sphx_glr_auto_examples_workflows_plot_simulate_evoked.py`), -# for conciseness, -# we add them automatically from the parameter files - -scale_factor = 3000. -smooth_window_len = 30. -tstop = exp_dpl.times[-1] -net = jones_2009_model(params=params, add_drives_from_params=True) -with MPIBackend(n_procs=n_procs): - print("Running simulation with initial parameters") - initial_dpl = simulate_dipole(net, tstop=tstop, n_trials=1)[0] - initial_dpl = initial_dpl.scale(scale_factor).smooth(smooth_window_len) - ############################################################################### # Now we start the optimization! +# +# First, we define a function that will tell the optimization routine how to +# modify the network drive parameters. The function will take in the Network +# object with no attached drives, and a dictionary of the paramters we wish to +# optimize. + + +def set_params(net, params): + + # Proximal 1 + weights_ampa_p1 = {'L2_basket': + params['evprox1_ampa_L2_basket'], + 'L2_pyramidal': + params['evprox1_ampa_L2_pyramidal'], + 'L5_basket': + params['evprox1_ampa_L5_basket'], + 'L5_pyramidal': + params['evprox1_ampa_L5_pyramidal']} + synaptic_delays_p = {'L2_basket': 0.1, 'L2_pyramidal': 0.1, + 'L5_basket': 1., 'L5_pyramidal': 1.} + + net.add_evoked_drive('evprox1', + mu=params['evprox1_mu'], + sigma=params['evprox1_sigma'], + numspikes=1, + location='proximal', + weights_ampa=weights_ampa_p1, + synaptic_delays=synaptic_delays_p) + + # Distal + weights_ampa_d1 = {'L2_basket': + params['evdist1_ampa_L2_basket'], + 'L2_pyramidal': + params['evdist1_ampa_L2_pyramidal'], + 'L5_pyramidal': + params['evdist1_ampa_L5_pyramidal']} + weights_nmda_d1 = {'L2_basket': 0.019482, 'L2_pyramidal': 0.004317, + 'L5_pyramidal': 0.080074} + synaptic_delays_d1 = {'L2_basket': 0.1, 'L2_pyramidal': 0.1, + 'L5_pyramidal': 0.1} + + net.add_evoked_drive('evdist1', + mu=params['evdist1_mu'], + sigma=params['evdist1_sigma'], + numspikes=1, + location='distal', + weights_ampa=weights_ampa_d1, + weights_nmda=weights_nmda_d1, + synaptic_delays=synaptic_delays_d1) + + # Proximal 2 + weights_ampa_p2 = {'L2_basket': + params['evprox2_ampa_L2_basket'], + 'L2_pyramidal': + params['evprox2_ampa_L2_pyramidal'], + 'L5_basket': + params['evprox2_ampa_L5_basket'], + 'L5_pyramidal': + params['evprox2_ampa_L5_pyramidal']} + + net.add_evoked_drive('evprox2', + mu=params['evprox2_mu'], + sigma=params['evprox2_sigma'], + numspikes=1, + location='proximal', + weights_ampa=weights_ampa_p2, + synaptic_delays=synaptic_delays_p) -from hnn_core.optimization import optimize_evoked +############################################################################### +# Then, we define the constrainst. +# +# The constraints must be a dictionary of tuples where the first value in each +# tuple is the lower bound and the second value is the upper bound for the +# corresponding parameter. + + +constraints = dict() +constraints.update({'evprox1_ampa_L2_basket': (0.01, 1.), + 'evprox1_ampa_L2_pyramidal': (0.01, 1.), + 'evprox1_ampa_L5_basket': (0.01, 1.), + 'evprox1_ampa_L5_pyramidal': (0.01, 1.), + 'evprox1_mu': (5., 50.), + 'evprox1_sigma': (2., 25.), + 'evdist1_ampa_L2_basket': (0.01, 1.), + 'evdist1_ampa_L2_pyramidal': (0.01, 1.), + 'evdist1_ampa_L5_pyramidal': (0.01, 1.), + 'evdist1_mu': (50., 80.), + 'evdist1_sigma': (2., 25.), + 'evprox2_ampa_L2_basket': (0.01, 1.), + 'evprox2_ampa_L2_pyramidal': (0.01, 1.), + 'evprox2_ampa_L5_basket': (0.01, 1.), + 'evprox2_ampa_L5_pyramidal': (0.01, 1.), + 'evprox2_mu': (125., 150.), + 'evprox2_sigma': (10., 60.)}) -with MPIBackend(n_procs=n_procs): - net_opt = optimize_evoked(net, tstop=tstop, n_trials=1, - target_dpl=exp_dpl, initial_dpl=initial_dpl, - scale_factor=scale_factor, - smooth_window_len=smooth_window_len) ############################################################################### -# Now, let's simulate the dipole with the optimized drive parameters. -with MPIBackend(n_procs=n_procs): - best_dpl = simulate_dipole(net_opt, tstop=tstop, n_trials=1)[0] - best_dpl = best_dpl.scale(scale_factor).smooth(smooth_window_len) +# Now we define and fit the optimizer. -############################################################################### -# Then, we can plot the pre- and post-optimization simulations alongside the -# experimental data. +from hnn_core import Optimizer -fig, axes = plt.subplots(2, 1, sharex=True, figsize=(6, 6)) +tstop = exp_dpl.times[-1] +scale_factor = 3000 +smooth_window_len = 30 -exp_dpl.plot(ax=axes[0], layer='agg', show=False, color='tab:blue') -initial_dpl.plot(ax=axes[0], layer='agg', show=False, color='tab:orange') -best_dpl.plot(ax=axes[0], layer='agg', show=False, color='tab:green') -axes[0].legend(['experimental', 'initial', 'optimized']) -net_opt.cell_response.plot_spikes_hist(ax=axes[1]) +net = jones_2009_model() +optim = Optimizer(net, constraints=constraints, set_params=set_params, + solver='bayesian', obj_fun='evoked', tstop=tstop, + scale_factor=scale_factor, + smooth_window_len=smooth_window_len) +optim.fit(exp_dpl.data['agg']) ############################################################################### -# Finally, let's explore which parameters were changed to cause the -# improved dipole fit. -# As an example, we will look at the new dynamics of the first proximal drive, -# as well as the synaptic weight of its layer 2/3 pyramidal AMPA receptors. - -from hnn_core.network import pick_connection +# Finally, we can plot the experimental data alongside the post-optimization +# simulation dipole as well as the convergence plot. -dynamics_opt = net_opt.external_drives['evdist1']['dynamics'] +with MPIBackend(n_procs=n_procs, mpi_cmd='mpiexec'): + opt_dpl = simulate_dipole(optim.net_, tstop=tstop, n_trials=1)[0] +opt_dpl.scale(scale_factor) +opt_dpl.smooth(smooth_window_len) -conn_indices = pick_connection(net=net_opt, src_gids='evprox1', target_gids='L2_pyramidal', - loc='proximal', receptor='ampa') -conn_idx = conn_indices[0] -weight_opt = net_opt.connectivity[conn_idx] - -print("Optimized dynamic properties: ", dynamics_opt) -print("\nOptimized weight: ", weight_opt) - -############################################################################### -# Let's compare to the initial dynamic properties and weight. -dynamics_initial = net.external_drives['evdist1']['dynamics'] +fig, axes = plt.subplots(2, 1, sharex=True, figsize=(6, 6)) -conn_indices = pick_connection(net=net, src_gids='evprox1', target_gids='L2_pyramidal', - loc='proximal', receptor='ampa') -conn_idx = conn_indices[0] -weight_initial = net.connectivity[conn_idx] +exp_dpl.plot(ax=axes[0], layer='agg', show=False, color='tab:blue') +opt_dpl.plot(ax=axes[0], layer='agg', show=False, color='tab:green') +axes[0].legend(['experimental', 'optimized']) +optim.net_.cell_response.plot_spikes_hist(ax=axes[1]) -print("Initial dynamic properties: ", dynamics_initial) -print("\nInitial weight: ", weight_initial) +fig1 = optim.plot_convergence() diff --git a/hnn_core/__init__.py b/hnn_core/__init__.py index c17c68a7f..857e0e7e8 100644 --- a/hnn_core/__init__.py +++ b/hnn_core/__init__.py @@ -6,5 +6,6 @@ from .cell_response import CellResponse, read_spikes from .cells_default import pyramidal, basket from .parallel_backends import MPIBackend, JoblibBackend +from optimization.optimizer import Optimizer __version__ = '0.4.dev0' diff --git a/hnn_core/opt_toy_example/optimize_evoked.py b/hnn_core/opt_toy_example/optimize_evoked.py deleted file mode 100644 index 7e4a90bf1..000000000 --- a/hnn_core/opt_toy_example/optimize_evoked.py +++ /dev/null @@ -1,164 +0,0 @@ -""" -================================================= -05. Optimize simulated evoked response parameters -================================================= - -This example demonstrates how to optimize the parameters -of the model simulation to match an experimental dipole waveform. -""" - -# Authors: Carolina Fernandez -# Nick Tolley -# Ryan Thorpe -# Mainak Jas - -import os.path as op - -import matplotlib.pyplot as plt - -############################################################################### -# Let us import hnn_core - -import hnn_core -from hnn_core import (MPIBackend, jones_2009_model, simulate_dipole, - read_dipole) - -hnn_core_root = op.join(op.dirname(hnn_core.__file__)) - -# The number of cores may need modifying depending on your current machine. -n_procs = 2 - -############################################################################### -# First, we will load experimental data into Dipole object. -# -# This is a different experiment than the one to which the base parameters were -# tuned. So, the initial RMSE will be large, giving the optimization procedure -# a lot to work with. - -from urllib.request import urlretrieve - -data_url = ('https://raw.githubusercontent.com/jonescompneurolab/hnn/master/' - 'data/MEG_detection_data/S1_SupraT.txt') -urlretrieve(data_url, 'S1_SupraT.txt') -exp_dpl = read_dipole('S1_SupraT.txt') - -############################################################################### -# Now we start the optimization! -# -# First, we define a function that will tell the optimization routine how to -# modify the network drive parameters. The function will take in the Network -# object with no attached drives, and a dictionary of the paramters we wish to -# optimize. - - -def set_params(net, params_dict): - - # Proximal 1 - weights_ampa_p1 = {'L2_basket': params_dict['evprox1_ampa_L2_basket'], - 'L2_pyramidal': params_dict['evprox1_ampa_L2_pyramidal'], - 'L5_basket': params_dict['evprox1_ampa_L5_basket'], - 'L5_pyramidal': params_dict['evprox1_ampa_L5_pyramidal']} - synaptic_delays_p = {'L2_basket': 0.1, 'L2_pyramidal': 0.1, - 'L5_basket': 1., 'L5_pyramidal': 1.} - - net.add_evoked_drive('evprox1', - mu=params_dict['evprox1_mu'], - sigma=params_dict['evprox1_sigma'], - numspikes=1, - location='proximal', - weights_ampa=weights_ampa_p1, - synaptic_delays=synaptic_delays_p) - - # Distal - weights_ampa_d1 = {'L2_basket': params_dict['evdist1_ampa_L2_basket'], - 'L2_pyramidal': params_dict['evdist1_ampa_L2_pyramidal'], - 'L5_pyramidal': params_dict['evdist1_ampa_L5_pyramidal']} - weights_nmda_d1 = {'L2_basket': 0.019482, 'L2_pyramidal': 0.004317, - 'L5_pyramidal': 0.080074} - synaptic_delays_d1 = {'L2_basket': 0.1, 'L2_pyramidal': 0.1, - 'L5_pyramidal': 0.1} - - net.add_evoked_drive('evdist1', - mu=params_dict['evdist1_mu'], - sigma=params_dict['evdist1_sigma'], - numspikes=1, - location='distal', - weights_ampa=weights_ampa_d1, - weights_nmda=weights_nmda_d1, - synaptic_delays=synaptic_delays_d1) - - # Proximal 2 - weights_ampa_p2 = {'L2_basket': params_dict['evprox2_ampa_L2_basket'], - 'L2_pyramidal': params_dict['evprox2_ampa_L2_pyramidal'], - 'L5_basket': params_dict['evprox2_ampa_L5_basket'], - 'L5_pyramidal': params_dict['evprox2_ampa_L5_pyramidal']} - - net.add_evoked_drive('evprox2', - mu=params_dict['evprox2_mu'], - sigma=params_dict['evprox2_sigma'], - numspikes=1, - location='proximal', - weights_ampa=weights_ampa_p2, - synaptic_delays=synaptic_delays_p) - -############################################################################### -# Then, we define the constrainst. -# -# The constraints must be a dictionary of tuples where the first value in each -# tuple is the lower bound and the second value is the upper bound for the -# corresponding parameter. - - -constraints = dict() -constraints.update({'evprox1_ampa_L2_basket': (0.01, 1.), - 'evprox1_ampa_L2_pyramidal': (0.01, 1.), - 'evprox1_ampa_L5_basket': (0.01, 1.), - 'evprox1_ampa_L5_pyramidal': (0.01, 1.), - 'evprox1_mu': (5., 50.), - 'evprox1_sigma': (2., 25.), - 'evdist1_ampa_L2_basket': (0.01, 1.), - 'evdist1_ampa_L2_pyramidal': (0.01, 1.), - 'evdist1_ampa_L5_pyramidal': (0.01, 1.), - 'evdist1_mu': (50., 80.), - 'evdist1_sigma': (2., 25.), - 'evprox2_ampa_L2_basket': (0.01, 1.), - 'evprox2_ampa_L2_pyramidal': (0.01, 1.), - 'evprox2_ampa_L5_basket': (0.01, 1.), - 'evprox2_ampa_L5_pyramidal': (0.01, 1.), - 'evprox2_mu': (125., 150.), - 'evprox2_sigma': (10., 60.)}) - - -############################################################################### -# Now we define and fit the optimizer. - -from general import Optimizer # change path*** - -tstop = exp_dpl.times[-1] -scale_factor = 3000 -smooth_window_len = 30 - -net = jones_2009_model() -optim = Optimizer(net, constraints=constraints, set_params=set_params, - solver='bayesian', obj_fun='evoked', tstop=tstop, - scale_factor=scale_factor, - smooth_window_len=smooth_window_len) -optim.fit(exp_dpl.data['agg']) - -############################################################################### -# Finally, we can plot the experimental data alongside the post-optimization -# simulation dipole as well as the convergence plot. - -with MPIBackend(n_procs=n_procs, mpi_cmd='mpiexec'): - opt_dpl = simulate_dipole(optim.net_, tstop=tstop, n_trials=1)[0] -opt_dpl.scale(scale_factor) -opt_dpl.smooth(smooth_window_len) - -fig, axes = plt.subplots(2, 1, sharex=True, figsize=(6, 6)) - -exp_dpl.plot(ax=axes[0], layer='agg', show=False, color='tab:blue') -opt_dpl.plot(ax=axes[0], layer='agg', show=False, color='tab:green') -axes[0].legend(['experimental', 'optimized']) -optim.net_.cell_response.plot_spikes_hist(ax=axes[1]) - -fig1 = optim.plot_convergence() diff --git a/hnn_core/optimization/__init__.py b/hnn_core/optimization/__init__.py new file mode 100644 index 000000000..845f695eb --- /dev/null +++ b/hnn_core/optimization/__init__.py @@ -0,0 +1 @@ +from .metrics import _rmse_evoked \ No newline at end of file diff --git a/hnn_core/opt_toy_example/metrics.py b/hnn_core/optimization/metrics.py similarity index 81% rename from hnn_core/opt_toy_example/metrics.py rename to hnn_core/optimization/metrics.py index e895b4382..dc6b487b4 100644 --- a/hnn_core/opt_toy_example/metrics.py +++ b/hnn_core/optimization/metrics.py @@ -7,16 +7,15 @@ import numpy as np -from hnn_core import simulate_dipole, MPIBackend +from hnn_core import simulate_dipole from scipy.signal import resample def _rmse_evoked(net, initial_params, set_params, predicted_params, update_params, obj_values, scale_factor, smooth_window_len, - tstop, target): - """ - The objective function for evoked responses. + target, tstop): + """The objective function for evoked responses. Parameters ---------- @@ -29,7 +28,7 @@ def _rmse_evoked(net, initial_params, set_params, predicted_params, predicted_params : list Parameters selected by the optimizer. update_params : func - Function to update param_dict. + Function to update params. scale_factor : float The dipole scale factor. smooth_window_len : float @@ -45,13 +44,12 @@ def _rmse_evoked(net, initial_params, set_params, predicted_params, Normalized RMSE between recorded and simulated dipole. """ - param_dict = update_params(initial_params, predicted_params) + params = update_params(initial_params, predicted_params) # simulate dpl with predicted params new_net = net.copy() - set_params(new_net, param_dict) - with MPIBackend(n_procs=2, mpi_cmd='mpiexec'): - dpl = simulate_dipole(new_net, tstop=tstop, n_trials=1)[0] + set_params(new_net, params) + dpl = simulate_dipole(new_net, tstop=tstop, n_trials=1)[0] # smooth & scale dpl.scale(scale_factor) diff --git a/hnn_core/opt_toy_example/general.py b/hnn_core/optimization/optimizer.py similarity index 79% rename from hnn_core/opt_toy_example/general.py rename to hnn_core/optimization/optimizer.py index 5503a7aa6..2072040cb 100644 --- a/hnn_core/opt_toy_example/general.py +++ b/hnn_core/optimization/optimizer.py @@ -7,7 +7,7 @@ import numpy as np -from metrics import _rmse_evoked # change path*** +from optimization import _rmse_evoked from skopt import gp_minimize from scipy.optimize import fmin_cobyla @@ -16,6 +16,29 @@ class Optimizer: def __init__(self, net, constraints, set_params, solver, obj_fun, tstop, scale_factor=1., smooth_window_len=None): + """Parameter optimization. + + Parameters + ---------- + net : Network + The network object. + constraints : dict + The user-defined constraints. + set_params : func + User-defined function that sets parameters in network drives. + solver : string + The optimizer, 'bayesian' or 'cobyla'. + obj_fun : string + The type of drive to be optimized, 'evoked'. + tstop : float + The simulated dipole's duration. + scale_factor : float, optional + The dipole scale factor. The default is 1. + smooth_window_len : float, optional + The smooth window length. The default is None. + + """ + if net.external_drives: raise ValueError("The current Network instance has external " + "drives, provide a Network object with no " + @@ -23,6 +46,7 @@ def __init__(self, net, constraints, set_params, solver, obj_fun, self.net = net self.constraints = constraints self._set_params = set_params + self.max_iter = 200 # Optimizer method if solver == 'bayesian': self._assemble_constraints = _assemble_constraints_bayesian @@ -41,27 +65,31 @@ def __init__(self, net, constraints, set_params, solver, obj_fun, self.smooth_window_len = smooth_window_len self.tstop = tstop self.net_ = None - self.obj = list() - self.opt_params = None - self.max_iter = 200 + self.obj_ = list() + self.opt_params_ = None def __repr__(self): class_name = self.__class__.__name__ - return class_name + is_fit = False + if self.net_ is not None: + is_fit = True + s = ( + f'{class_name}: ' + f'solver={self.solver}, ' + f'response type={self.obj_fun}, ' + f'has been fit={is_fit}' + ) + + return s def fit(self, target): - """ - Runs optimization routine. + """Runs optimization routine. Parameters ---------- target : ndarray The recorded dipole. - Returns - ------- - None. - """ constraints = self._assemble_constraints(self.constraints) @@ -78,18 +106,16 @@ def fit(self, target): self.max_iter, target) - self.opt_params = opt_params - self.obj = obj self.net_ = net_ - return + self.obj_ = obj + self.opt_params_ = opt_params def plot_convergence(self, ax=None, show=True): - """ - Convergence plot. + """Convergence plot. Parameters ---------- - ax : instance of matplotlib figure, None + ax : instance of matplotlib figure, optional The matplotlib axis. The default is None. show : bool If True, show the figure. The default is True. @@ -109,10 +135,10 @@ def plot_convergence(self, ax=None, show=True): axis = ax if isinstance(ax, mpl.axes._axes.Axes) else ax x = list(range(1, self.max_iter + 1)) - y_min = min(self.obj) - 0.01 - y_max = max(self.obj) + 0.01 + y_min = min(self.obj_) - 0.01 + y_max = max(self.obj_) + 0.01 - axis.plot(x, self.obj, color='black') + axis.plot(x, self.obj_, color='black') axis.set_ylim([y_min, y_max]) axis.set_title('Convergence') axis.set_xlabel('Number of calls') @@ -124,8 +150,7 @@ def plot_convergence(self, ax=None, show=True): def _get_initial_params(constraints): - """ - Gets initial parameters as midpoints of parameter ranges. + """Gets initial parameters as midpoints of parameter ranges. Parameters ---------- @@ -148,8 +173,7 @@ def _get_initial_params(constraints): def _assemble_constraints_bayesian(constraints): - """ - Assembles constraints in format required by gp_minimize. + """Assembles constraints in format required by gp_minimize. Parameters ---------- @@ -168,8 +192,7 @@ def _assemble_constraints_bayesian(constraints): def _assemble_constraints_cobyla(constraints): - """ - Assembles constraints in format required by fmin_cobyla. + """Assembles constraints in format required by fmin_cobyla. Parameters ---------- @@ -194,8 +217,7 @@ def _assemble_constraints_cobyla(constraints): def _update_params(initial_params, predicted_params): - """ - Update param_dict with predicted parameters. + """Update params with predicted parameters. Parameters ---------- @@ -206,22 +228,21 @@ def _update_params(initial_params, predicted_params): Returns ------- - params_dict : dict + params : dict Keys are parameter names, values are parameters. """ - param_dict = dict() + params = dict() for param_key, param_name in enumerate(initial_params): - param_dict.update({param_name: predicted_params[param_key]}) + params.update({param_name: predicted_params[param_key]}) - return param_dict + return params def _run_opt_bayesian(net, constraints, initial_params, set_params, obj_fun, scale_factor, smooth_window_len, tstop, max_iter, target): - """ - Runs optimization routine with gp_minimize optimizer. + """Runs optimization routine with gp_minimize optimizer. Parameters ---------- @@ -267,8 +288,8 @@ def _obj_func(predicted_params): obj_values, scale_factor, smooth_window_len, - tstop, - target) + target, + tstop) opt_results = gp_minimize(func=_obj_func, dimensions=constraints, @@ -280,12 +301,12 @@ def _obj_func(predicted_params): opt_params = opt_results.x # get objective values - obj = [np.min(obj_values[:i]) for i in range(1, max_iter + 1)] + obj = [np.min(obj_values[:idx]) for idx in range(1, max_iter + 1)] # get optimized net - param_dict = _update_params(initial_params, opt_params) + params = _update_params(initial_params, opt_params) net_ = net.copy() - set_params(net_, param_dict) + set_params(net_, params) return opt_params, obj, net_ @@ -293,8 +314,7 @@ def _obj_func(predicted_params): def _run_opt_cobyla(net, constraints, initial_params, set_params, obj_fun, scale_factor, smooth_window_len, tstop, max_iter, target): - """ - Runs optimization routine with fmin_cobyla optimizer. + """Runs optimization routine with fmin_cobyla optimizer. Parameters ---------- @@ -316,7 +336,7 @@ def _run_opt_cobyla(net, constraints, initial_params, set_params, obj_fun, The simulated dipole's duration. max_iter : int Number of calls the optimizer makes. - target : ndarray, None + target : ndarray, optional The recorded dipole. The default is None. Returns @@ -340,8 +360,8 @@ def _obj_func(predicted_params): obj_values, scale_factor, smooth_window_len, - tstop, - target) + target, + tstop) opt_results = fmin_cobyla(_obj_func, cons=constraints, @@ -355,11 +375,11 @@ def _obj_func(predicted_params): opt_params = opt_results # get objective values - obj = [np.min(obj_values[:i]) for i in range(1, max_iter + 1)] + obj = [np.min(obj_values[:idx]) for idx in range(1, max_iter + 1)] # get optimized net - param_dict = _update_params(initial_params, opt_params) + params = _update_params(initial_params, opt_params) net_ = net.copy() - set_params(net_, param_dict) + set_params(net_, params) return opt_params, obj, net_ diff --git a/hnn_core/opt_toy_example/test_optimize_evoked.py b/hnn_core/tests/test_optimize_evoked.py similarity index 81% rename from hnn_core/opt_toy_example/test_optimize_evoked.py rename to hnn_core/tests/test_optimize_evoked.py index 2b3128ea5..2a151d3f5 100644 --- a/hnn_core/opt_toy_example/test_optimize_evoked.py +++ b/hnn_core/tests/test_optimize_evoked.py @@ -1,7 +1,9 @@ # Authors: Carolina Fernandez +# Nick Tolley +# Ryan Thorpe +# Mainak Jas -from hnn_core import jones_2009_model, simulate_dipole -from general import Optimizer # change path*** +from hnn_core import jones_2009_model, simulate_dipole, Optimizer def _optimize_evoked(solver): @@ -35,7 +37,7 @@ def _optimize_evoked(solver): net_offset._N_pyr_x = 3 net_offset._N_pyr_y = 3 - def set_params(net_offset, param_dict): + def set_params(net_offset, params): weights_ampa = {'L2_basket': 0.5, 'L2_pyramidal': 0.5, 'L5_basket': 0.5, @@ -43,7 +45,7 @@ def set_params(net_offset, param_dict): synaptic_delays = {'L2_basket': 0.1, 'L2_pyramidal': 0.1, 'L5_basket': 1., 'L5_pyramidal': 1.} net_offset.add_evoked_drive('evprox', - mu=param_dict['mu_offset'], + mu=params['mu_offset'], sigma=1, numspikes=1, location='proximal', @@ -60,13 +62,15 @@ def set_params(net_offset, param_dict): obj_fun='evoked', tstop=tstop) optim.fit(dpl_orig.data['agg']) - opt_param = optim.opt_params[0] + opt_param = optim.opt_params_[0] # the optimized parameter is in the range - assert mu_range[0] <= opt_param <= mu_range[1], "Optimized parameter is not in user-defined range" + assert mu_range[0] <= opt_param <= mu_range[1], \ + "Optimized parameter is not in user-defined range" - obj = optim.obj + obj = optim.obj_ # the number of returned rmse values should be the same as max_iter - assert len(obj) <= 200, "Number of rmse values should be the same as max_iter" + assert len(obj) <= 200, \ + "Number of rmse values should be the same as max_iter" # the returned rmse values should be positive assert all(vals > 0 for vals in obj), "rmse values should be positive" From e08509e6c15451ffb7b4c91b4dbbc77bd576ecca Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Thu, 10 Aug 2023 01:42:17 -0400 Subject: [PATCH 22/63] change n_procs --- examples/howto/optimize_evoked.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/howto/optimize_evoked.py b/examples/howto/optimize_evoked.py index c2f0a3ab1..e12721459 100644 --- a/examples/howto/optimize_evoked.py +++ b/examples/howto/optimize_evoked.py @@ -26,7 +26,7 @@ hnn_core_root = op.join(op.dirname(hnn_core.__file__)) # The number of cores may need modifying depending on your current machine. -n_procs = 2 +n_procs = 10 ############################################################################### # First, we will load experimental data into Dipole object. From fcd3e8e6a7263c0151baa0aea2f44990895fad98 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Thu, 10 Aug 2023 01:46:25 -0400 Subject: [PATCH 23/63] remove blank spaces --- hnn_core/optimization/optimizer.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/hnn_core/optimization/optimizer.py b/hnn_core/optimization/optimizer.py index 2072040cb..00d84e437 100644 --- a/hnn_core/optimization/optimizer.py +++ b/hnn_core/optimization/optimizer.py @@ -36,7 +36,6 @@ def __init__(self, net, constraints, set_params, solver, obj_fun, The dipole scale factor. The default is 1. smooth_window_len : float, optional The smooth window length. The default is None. - """ if net.external_drives: @@ -89,7 +88,6 @@ def fit(self, target): ---------- target : ndarray The recorded dipole. - """ constraints = self._assemble_constraints(self.constraints) @@ -161,7 +159,6 @@ def _get_initial_params(constraints): ------- initial_params : dict Keys are parameter names, values are initial parameters. - """ initial_params = dict() From f107131a2e2e65a97ffe8f85e483edb0c5998c7d Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Thu, 10 Aug 2023 13:04:53 -0400 Subject: [PATCH 24/63] address comments --- examples/howto/optimize_evoked.py | 3 ++- hnn_core/__init__.py | 2 +- hnn_core/optimization/__init__.py | 1 - hnn_core/optimization/optimizer.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/howto/optimize_evoked.py b/examples/howto/optimize_evoked.py index e12721459..e2cc0b87b 100644 --- a/examples/howto/optimize_evoked.py +++ b/examples/howto/optimize_evoked.py @@ -154,7 +154,8 @@ def set_params(net, params): solver='bayesian', obj_fun='evoked', tstop=tstop, scale_factor=scale_factor, smooth_window_len=smooth_window_len) -optim.fit(exp_dpl.data['agg']) +with MPIBackend(n_procs=n_procs, mpi_cmd='mpiexec'): + optim.fit(exp_dpl.data['agg']) ############################################################################### # Finally, we can plot the experimental data alongside the post-optimization diff --git a/hnn_core/__init__.py b/hnn_core/__init__.py index 857e0e7e8..f26d3be6f 100644 --- a/hnn_core/__init__.py +++ b/hnn_core/__init__.py @@ -6,6 +6,6 @@ from .cell_response import CellResponse, read_spikes from .cells_default import pyramidal, basket from .parallel_backends import MPIBackend, JoblibBackend -from optimization.optimizer import Optimizer +from .optimization.optimizer import Optimizer __version__ = '0.4.dev0' diff --git a/hnn_core/optimization/__init__.py b/hnn_core/optimization/__init__.py index 845f695eb..e69de29bb 100644 --- a/hnn_core/optimization/__init__.py +++ b/hnn_core/optimization/__init__.py @@ -1 +0,0 @@ -from .metrics import _rmse_evoked \ No newline at end of file diff --git a/hnn_core/optimization/optimizer.py b/hnn_core/optimization/optimizer.py index 00d84e437..85d730d9b 100644 --- a/hnn_core/optimization/optimizer.py +++ b/hnn_core/optimization/optimizer.py @@ -7,7 +7,7 @@ import numpy as np -from optimization import _rmse_evoked +from .metrics import _rmse_evoked from skopt import gp_minimize from scipy.optimize import fmin_cobyla From c3aee67f40f41070e30b358813b4ccb6a322c2c8 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Thu, 10 Aug 2023 13:06:39 -0400 Subject: [PATCH 25/63] move optimization.py file --- hnn_core/optimization.py | 636 --------------------------------------- 1 file changed, 636 deletions(-) delete mode 100644 hnn_core/optimization.py diff --git a/hnn_core/optimization.py b/hnn_core/optimization.py deleted file mode 100644 index 932be0b16..000000000 --- a/hnn_core/optimization.py +++ /dev/null @@ -1,636 +0,0 @@ -"""Parameter optimization functions.""" - -# Authors: Blake Caldwell -# Mainak Jas - -from math import ceil, floor -from collections import OrderedDict -import fnmatch -from functools import partial - -import numpy as np -import scipy.stats as stats -from scipy.optimize import fmin_cobyla - -from .dipole import simulate_dipole, _rmse, average_dipoles -from .network import pick_connection - - -def _get_range(val, multiplier): - """Get range of values to sweep over.""" - range_min = max(0, val - val * multiplier / 100.) - range_max = val + val * multiplier / 100. - ranges = {'initial': val, 'minval': range_min, 'maxval': range_max} - return ranges - - -def _split_by_evinput(drive_names, drive_dynamics, drive_syn_weights, tstop, - sigma_range_multiplier, timing_range_multiplier, - synweight_range_multiplier): - """ Sorts parameter ranges by evoked inputs into a dictionary - - Parameters - ---------- - drive_names : list of str, shape (n_drives, ) - Names corresponding to n Network drives. - drive_dynamics : list of dict, shape (n_drives, ) - Dynamics parameters for each drive specified in drive_names. - drive_syn_weights : list of dict, shape (n_drives, ) - Synaptic weight parameters for each drive specified in drive_names. - tstop : float - The simulation stop time (ms). - sigma_range_multiplier : float - The scale of sigma values to sweep over. - timing_range_multiplier : float - The scale of timing values to sweep over. - synweight_range_multiplier : float - The scale of input synaptic weights to sweep over. - - Returns - ------- - sorted_evinput_params: dict - Dictionary with parameters grouped by evoked inputs. - Keys for each evoked input are - 'mean', 'sigma', 'ranges', 'start', and 'end'. - sorted_evinput_params['evprox1']['ranges'] is a dict - with keys as the parameters to be optimized and values - indicating the ranges over which they should be - optimized. E.g., - sorted_evinput['evprox1']['ranges'] = - { - 'gbar_evprox_1_L2Pyr_ampa': - { - 'initial': 0.05125, - 'minval': 0, - 'maxval': 0.0915 - } - } - Elements are sorted by their start time. - """ - - evinput_params = {} - for drive_idx, drive_name in enumerate(drive_names): - timing_mean = drive_dynamics[drive_idx]['mu'] - timing_sigma = drive_dynamics[drive_idx]['sigma'] - - if timing_sigma == 0.0: - # sigma of 0 will not produce a CDF - timing_sigma = 0.01 - - evinput_params[drive_name] = {'mean': timing_mean, - 'sigma': timing_sigma, - 'ranges': {}} - - evinput_params[drive_name]['ranges'][f'{drive_name}_sigma'] = \ - _get_range(timing_sigma, sigma_range_multiplier) - - # calculate range for time - timing_bound = timing_sigma * timing_range_multiplier - range_min = max(0, timing_mean - timing_bound) - range_max = min(tstop, timing_mean + timing_bound) - - evinput_params[drive_name]['start'] = range_min - evinput_params[drive_name]['end'] = range_max - evinput_params[drive_name]['ranges'][f'{drive_name}_mu'] = \ - {'initial': timing_mean, 'minval': range_min, 'maxval': range_max} - - # calculate ranges for syn. weights - for syn_weight_key in drive_syn_weights[drive_idx]: - new_key = f'{drive_name}_gbar_{syn_weight_key}' - weight = drive_syn_weights[drive_idx][syn_weight_key] - ranges = _get_range(weight, synweight_range_multiplier) - if weight == 0.0: - ranges['minval'] = weight - ranges['maxval'] = 1.0 - evinput_params[drive_name]['ranges'][new_key] = ranges - - sorted_evinput_params = OrderedDict(sorted(evinput_params.items(), - key=lambda x: x[1]['start'])) - return sorted_evinput_params - - -def _generate_weights(evinput_params, tstop, dt, decay_multiplier): - """Calculation of weight function for wRMSE calcuation - - Returns - ------- - evinput_params : dict - Adds the keys 'weights', 'opt_start', 'opt_end' - to evinput_params[input_name] and removes 'mean' - and 'sigma' which were needed to compute 'weights'. - """ - num_step = ceil(tstop / dt) + 1 - times = np.linspace(0, tstop, num_step) - - for evinput_this in evinput_params.values(): - # calculate cdf using start time (minival of optimization range) - evinput_this['cdf'] = stats.norm.cdf( - times, evinput_this['start'], evinput_this['sigma']) - - for input_name, evinput_this in evinput_params.items(): - evinput_this['weights'] = evinput_this['cdf'].copy() - - for other_input, evinput_other in evinput_params.items(): - # check ordering to only use inputs after us - # and don't subtract our own cdf(s) - if (evinput_other['mean'] < evinput_this['mean'] or - input_name == other_input): - continue - - decay_factor = decay_multiplier * \ - (evinput_other['mean'] - evinput_this['mean']) / tstop - evinput_this['weights'] -= evinput_other['cdf'] * decay_factor - - # weights should not drop below 0 - np.clip(evinput_this['weights'], a_min=0, a_max=None, - out=evinput_this['weights']) - - # start and stop optimization where the weights are insignificant - indices = np.where(evinput_this['weights'] > 0.01) - evinput_this['opt_start'] = min(evinput_this['start'], - times[indices][0]) - evinput_this['opt_end'] = max(evinput_this['end'], - times[indices][-1]) - - # convert to multiples of dt - evinput_this['opt_start'] = floor(evinput_this['opt_start'] / dt) * dt - evinput_params[input_name]['opt_end'] = ceil( - evinput_this['opt_end'] / dt) * dt - - for evinput_this in evinput_params.values(): - del evinput_this['mean'], evinput_this['sigma'], evinput_this['cdf'] - - return evinput_params - - -def _create_last_chunk(input_chunks): - """ This creates a chunk that combines parameters for - all chunks in input_chunks (final step) - - Parameters - ---------- - input_chunks: List - List of ordered chunks for optimization - - Returns - ------- - chunk: dict - Dictionary of with parameters for combined - chunk (final step) - """ - chunk = {'inputs': [], 'ranges': {}, 'opt_start': 0.0, - 'opt_end': 0.0} - - for evinput in input_chunks: - chunk['inputs'].extend(evinput['inputs']) - chunk['ranges'].update(evinput['ranges']) - if evinput['opt_end'] > chunk['opt_end']: - chunk['opt_end'] = evinput['opt_end'] - - # wRMSE with weights of 1's is the same as regular RMSE. - chunk['weights'] = np.ones_like(input_chunks[-1]['weights']) - - return chunk - - -def _consolidate_chunks(inputs): - """Consolidates inputs into optimization "chunks" defined by - opt_start and opt_end - - Parameters - ---------- - inputs: dict - Sorted dictionary of inputs with their parameters - and weight functions - - Returns - ------- - chunks: list - Combine the evinput_params whenever the end is overlapping - with the next. - """ - chunks = list() - for input_name in inputs: - input_dict = inputs[input_name].copy() - input_dict['inputs'] = [input_name] - - if (len(chunks) > 0 and - input_dict['start'] <= chunks[-1]['end']): - # update previous chunk - chunks[-1]['inputs'].extend(input_dict['inputs']) - chunks[-1]['end'] = input_dict['end'] - chunks[-1]['ranges'].update(input_dict['ranges']) - chunks[-1]['opt_end'] = max(chunks[-1]['opt_end'], - input_dict['opt_end']) - # average the weights - chunks[-1]['weights'] = (chunks[-1]['weights'] + - input_dict['weights']) / 2 - else: - # new chunk - chunks.append(input_dict) - - # add one last chunk to the end - if len(chunks) > 1: - last_chunk = _create_last_chunk(chunks) - chunks.append(last_chunk) - - return chunks - - -def _optrun(drive_params_updated, drive_params_static, net, tstop, dt, - n_trials, opt_params, opt_dpls, scale_factor, smooth_window_len, - return_rmse): - """This is the function to run a simulation - - Parameters - ---------- - drive_params_updated : array-like, shape (n_params, ) - List or numpy array with the parameters chosen by - optimization engine. Order is consistent with - opt_params['ranges']. - drive_params_static : dict - Drive parameters that remain constant throughout optimization. Keys - correspond to the drive names that are being optimized in this chunk. - net : Network instance - Network instance with attached drives. This object will be modified - in-place. - tstop : float - The simulation stop time (ms). - dt : float - The integration time step (ms) of h.CVode during simulation. - n_trials : int - The number of trials to simulate. - opt_params : dict - The optimization parameters. - opt_dpls : dict - Dictionary with keys 'target_dpl' and 'best' for - the experimental dipole and best dipole. - scale_factor : float - Scales the simulated dipoles by scale_factor to match - exp_dpl. - smooth_window_len : int - The length of the hamming window (in samples) to smooth the - simulated dipole waveform in each optimization step. - return_rmse : bool - Returns list of unweighted RMSEs between data in dpl and exp_dpl - for each optimization step - - Returns - ------- - avg_rmse: float - Weighted RMSE between data in dpl and exp_dpl - """ - print("Optimization step %d, iteration %d" % (opt_params['cur_step'] + 1, - opt_params['optiter'] + 1)) - - # match parameter values contained in list to their respective key names - params_dict = dict() - for param_name, test_value in zip(opt_params['ranges'].keys(), - drive_params_updated): - # tiny negative weights are possible. Clip them to 0. - if test_value < 0: - test_value = 0 - params_dict[param_name] = test_value - - # modify drives according to the drive names in the current chunk - for drive_name in opt_params['inputs']: - - # clear drive and its connectivity - del net.external_drives[drive_name] - conn_idxs = pick_connection(net, src_gids=drive_name) - net.connectivity = [conn for conn_idx, conn - in enumerate(net.connectivity) - if conn_idx not in conn_idxs] - - # extract syn weights: final weights dicts should have keys that - # correspond to cell types - keys_ampa = fnmatch.filter(params_dict.keys(), - f'{drive_name}_gbar_ampa_*') - keys_nmda = fnmatch.filter(params_dict.keys(), - f'{drive_name}_gbar_nmda_*') - weights_ampa = {key.lstrip(f'{drive_name}_gbar_ampa_'): - params_dict[key] for key in keys_ampa} - weights_nmda = {key.lstrip(f'{drive_name}_gbar_nmda_'): - params_dict[key] for key in keys_nmda} - - net.add_evoked_drive( - name=drive_name, - mu=params_dict[drive_name + '_mu'], - sigma=params_dict[drive_name + '_sigma'], - numspikes=drive_params_static[drive_name]['numspikes'], - location=drive_params_static[drive_name]['location'], - n_drive_cells=drive_params_static[drive_name]['n_drive_cells'], - cell_specific=drive_params_static[drive_name]['cell_specific'], - weights_ampa=weights_ampa, - weights_nmda=weights_nmda, - space_constant=drive_params_static[drive_name]['space_constant'], - synaptic_delays=drive_params_static[drive_name]['synaptic_delays'], - probability=drive_params_static[drive_name]['probability'], - event_seed=drive_params_static[drive_name]['event_seed'], - conn_seed=drive_params_static[drive_name]['conn_seed'] - ) - - # run the simulation - dpls = simulate_dipole(net, tstop=tstop, dt=dt, n_trials=n_trials) - # order of operations: scale, smooth, then average - dpls = [dpl.scale(scale_factor) for dpl in dpls] - if smooth_window_len is not None: - dpls = [dpl.smooth(smooth_window_len) for dpl in dpls] - avg_dpl = average_dipoles(dpls) - - avg_rmse = _rmse(avg_dpl, opt_dpls['target_dpl'], - tstart=opt_params['opt_start'], - tstop=opt_params['opt_end'], - weights=opt_params['weights']) - avg_rmse_unweighted = _rmse(avg_dpl, opt_dpls['target_dpl'], - tstart=opt_params['opt_start'], - tstop=tstop, weights=None) - - if return_rmse: - opt_params['iter_avg_rmse'].append(avg_rmse_unweighted) - opt_params['stepminopterr'] = avg_rmse - opt_dpls['best_dpl'] = avg_dpl - - print("weighted RMSE: %.2e over range [%3.3f-%3.3f] ms" % - (avg_rmse, opt_params['opt_start'], opt_params['opt_end'])) - - opt_params['optiter'] += 1 - - return avg_rmse - - -def _run_optimization(maxiter, param_ranges, optrun): - - cons = list() - x0 = list() - for idx, param_name in enumerate(param_ranges): - x0.append(param_ranges[param_name]['initial']) - cons.append( - lambda x, idx=idx: param_ranges[param_name]['maxval'] - x[idx]) - cons.append( - lambda x, idx=idx: x[idx] - param_ranges[param_name]['minval']) - result = fmin_cobyla(func=optrun, cons=cons, rhobeg=0.1, rhoend=1e-4, - x0=x0, maxfun=maxiter, catol=0.0) - return result - - -def _get_drive_params(net, drive_names): - """Get evoked drive parameters from a Network instance.""" - - drive_dynamics = list() - drive_syn_weights = list() - drive_static_params = dict() - for drive_name in drive_names: - drive = net.external_drives[drive_name] - drive_dynamics.append(drive['dynamics'].copy()) - conn_idxs = pick_connection(net, src_gids=drive_name) - weights = dict() - delays = dict() - probabilities = dict() - for conn_idx in conn_idxs: - target_type = net.connectivity[conn_idx]['target_type'] - target_receptor = net.connectivity[conn_idx]['receptor'] - weight = net.connectivity[conn_idx]['nc_dict']['A_weight'] - # note that for each drive, the weights dict should be unnested - # accross target cell types and receptors for ease-of-use when - # these values get restructured into a list downstream - - # legacy_mode hack: don't include invalid connections that have - # been added in Network when legacy_mode=True - if not (drive['location'] == 'distal' and - target_type == 'L5_basket'): - if target_receptor == "ampa": - weights.update({f'ampa_{target_type}': weight}) - if target_receptor == "nmda": - weights.update({f'nmda_{target_type}': weight}) - # delay should be constant across AMPA and NMDA receptor types - delay = net.connectivity[conn_idx]['nc_dict']['A_delay'] - delays.update({target_type: delay}) - # space constant should be constant across drive connections - space_const = net.connectivity[conn_idx]['nc_dict']['lamtha'] - # probability should be constant across AMPA and NMDA receptor - # types - probability = net.connectivity[conn_idx]['probability'] - probabilities.update({target_type: probability}) - - drive_syn_weights.append(weights) - - static_params = dict() - static_params['numspikes'] = drive['dynamics']['numspikes'] - static_params['location'] = drive['location'] - if drive['cell_specific']: - static_params['n_drive_cells'] = 'n_cells' - else: - static_params['n_drive_cells'] = drive['n_drive_cells'] - static_params['cell_specific'] = drive['cell_specific'] - static_params['space_constant'] = space_const - static_params['synaptic_delays'] = delays - static_params['probability'] = probabilities - static_params['event_seed'] = drive['event_seed'] - static_params['conn_seed'] = drive['conn_seed'] - drive_static_params.update({drive_name: static_params}) - - return drive_dynamics, drive_syn_weights, drive_static_params - - -def optimize_evoked(net, tstop, n_trials, target_dpl, initial_dpl, maxiter=50, - timing_range_multiplier=3.0, sigma_range_multiplier=50.0, - synweight_range_multiplier=500.0, decay_multiplier=1.6, - scale_factor=1., smooth_window_len=None, dt=0.025, - which_drives='all', return_rmse=False): - """Optimize drives to generate evoked response. - - Parameters - ---------- - net : Network instance - An instance of the Network object with attached evoked drives. Timing - and synaptic weight parameters will be optimized for each attached - evoked drive. Note that no new drives will be created or old drives - destroyed. - tstop : float - The simulation stop time (ms). - n_trials : int - The number of trials to simulate. - target_dpl : instance of Dipole - The target experimental dipole. - initial_dpl : instance of Dipole - The initial dipole to start the optimization. - maxiter : int - The maximum number of simulations to run for optimizing - one "chunk". - timing_range_multiplier : float - The scale of timing values to sweep over. - sigma_range_multiplier : float - The scale of sigma values to sweep over. - synweight_range_multiplier : float - The scale of input synaptic weights to sweep over. - decay_multiplier : float - The decay multiplier. - scale_factor : float - Scales the simulated dipoles by scale_factor to match - target_dpl. - smooth_window_len : int - The length of the hamming window (in samples) to smooth the - simulated dipole waveform in each optimization step. - dt : float - The integration time step (ms) of h.CVode during simulation. - which_drives: 'all' or list - Evoked drives to optimize. If 'all', will opimize all evoked drives. - If a subset list of evoked drives, will optimize only the evoked drives in the list. - return_rmse : bool - Returns list of unweighted RMSEs between the simulated and experimental dipole - waveforms for each optimization step - - Returns - ------- - net : Network instance - An instance of the Network object with the optimized configuration of - attached drives. - iter_avg_rmse : list of float - Unweighted RMSE between data in dpl and exp_dpl for each iteration. Returned only - if return_rmse is True - - Notes - ----- - This optimization protocol utilizes the Constrained Optimization - By Linear Approximation (COBYLA) method: - https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fmin_cobyla.html # noqa - """ - - net = net.copy() - - evoked_drive_names = [key for key in net.external_drives.keys() - if net.external_drives[key]['type'] == 'evoked'] - - if len(evoked_drive_names) == 0: - raise ValueError('The current Network instance lacks any evoked ' - 'drives. Consider adding drives using ' - 'net.add_evoked_drive') - elif which_drives == 'all': - drive_names = evoked_drive_names - else: - drive_names = [mydrive for mydrive in np.unique(which_drives) - if mydrive in evoked_drive_names] - if len(drive_names) == 0: - raise ValueError('The drives selected to be optimized are not evoked ' - 'drives. Optimization works only evoked drives.') - - drive_dynamics, drive_syn_weights, drive_static_params = \ - _get_drive_params(net, drive_names) - - # Create a sorted dictionary with the inputs and parameters - # belonging to each. - # Then, calculate the appropriate weight function to be used - # in RMSE using the CDF defined by the input timing's mean and - # std. deviation parameters. - # Lastly, use the weights to define non-overlapping "chunks" of - # the simulation timeframe to optimize. Chunks are consolidated if - # more than one input should - # be optimized at a time. - evinput_params = _split_by_evinput(drive_names, - drive_dynamics, - drive_syn_weights, - tstop, - sigma_range_multiplier, - timing_range_multiplier, - synweight_range_multiplier) - evinput_params = _generate_weights(evinput_params, tstop, dt, - decay_multiplier) - param_chunks = _consolidate_chunks(evinput_params) - - best_rmse = _rmse(initial_dpl, target_dpl, tstop=tstop) - opt_dpls = dict(best_dpl=initial_dpl, target_dpl=target_dpl) - print("Initial RMSE: %.2e" % best_rmse) - - opt_params = dict() - - if return_rmse is True: - opt_params['iter_avg_rmse'] = list() - - for step in range(len(param_chunks)): - opt_params['cur_step'] = step - total_steps = len(param_chunks) - - # param_chunks is the optimization information for all steps. - # opt_params is a pointer to the params for each step - opt_params.update(param_chunks[step]) - - if maxiter == 0: - print("Skipping optimization step %d (0 simulations)" % (step + 1)) - continue - - if (opt_params['cur_step'] > 0 and - opt_params['cur_step'] == total_steps - 1): - # For the last step (all inputs), recalculate ranges and update - # param_chunks. If previous optimization steps increased - # std. dev. this could result in fewer optimization steps as - # inputs may be deemed too close together and be grouped in a - # single optimization step. - # - # The purpose of the last step (with regular RMSE) is to clean up - # overfitting introduced by local weighted RMSE optimization. - - evinput_params = _split_by_evinput(drive_names, - drive_dynamics, - drive_syn_weights, - tstop, - sigma_range_multiplier, - timing_range_multiplier, - synweight_range_multiplier) - evinput_params = _generate_weights(evinput_params, tstop, dt, - decay_multiplier) - param_chunks = _consolidate_chunks(evinput_params) - - # reload opt_params for the last step in case the number of - # steps was changed by updateoptparams() - opt_params.update(param_chunks[total_steps - 1]) - - print("Starting optimization step %d/%d" % (step + 1, total_steps)) - - opt_params['optiter'] = 0 - opt_params['stepminopterr'] = _rmse(opt_dpls['best_dpl'], - opt_dpls['target_dpl'], - tstart=opt_params['opt_start'], - tstop=opt_params['opt_end'], - weights=opt_params['weights']) - - net_opt = net.copy() - # drive_params_updated must be a list for compatability with the args - # in the optimization engine, scipy.optimize.fmin_cobyla - _myoptrun = partial(_optrun, - drive_params_static=drive_static_params, - net=net_opt, - tstop=tstop, - dt=dt, - n_trials=n_trials, - opt_params=opt_params, - opt_dpls=opt_dpls, - scale_factor=scale_factor, - smooth_window_len=smooth_window_len, - return_rmse=return_rmse) - - print('Optimizing from [%3.3f-%3.3f] ms' % (opt_params['opt_start'], - opt_params['opt_end'])) - opt_results = _run_optimization(maxiter=maxiter, - param_ranges=opt_params['ranges'], - optrun=_myoptrun) - # tiny negative weights are possible. Clip them to 0. - opt_results[opt_results < 0] = 0 - - # update opt_params for the next round if total rmse decreased - avg_rmse = _rmse(opt_dpls['best_dpl'], - opt_dpls['target_dpl'], - tstop=tstop, - weights=None) - if avg_rmse <= best_rmse: - best_rmse = avg_rmse - for var_name, value in zip(opt_params['ranges'], opt_results): - opt_params['ranges'][var_name]['initial'] = value - - net = net_opt - - print("Final RMSE: %.2e" % best_rmse) - - if return_rmse is True: - return net, opt_params['iter_avg_rmse'] - return net From 7bef1564e1ec3abdbe1378c3a17dafadbc8664c3 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Thu, 10 Aug 2023 13:10:04 -0400 Subject: [PATCH 26/63] add old optimization.py file to new optimization dir --- hnn_core/optimization/optimization.py | 638 ++++++++++++++++++++++++++ 1 file changed, 638 insertions(+) create mode 100644 hnn_core/optimization/optimization.py diff --git a/hnn_core/optimization/optimization.py b/hnn_core/optimization/optimization.py new file mode 100644 index 000000000..436af95c1 --- /dev/null +++ b/hnn_core/optimization/optimization.py @@ -0,0 +1,638 @@ +"""Parameter optimization functions.""" + +# Authors: Blake Caldwell +# Mainak Jas + +from math import ceil, floor +from collections import OrderedDict +import fnmatch +from functools import partial + +import numpy as np +import scipy.stats as stats +from scipy.optimize import fmin_cobyla + +from .dipole import simulate_dipole, _rmse, average_dipoles +from .network import pick_connection + + +def _get_range(val, multiplier): + """Get range of values to sweep over.""" + range_min = max(0, val - val * multiplier / 100.) + range_max = val + val * multiplier / 100. + ranges = {'initial': val, 'minval': range_min, 'maxval': range_max} + return ranges + + +def _split_by_evinput(drive_names, drive_dynamics, drive_syn_weights, tstop, + sigma_range_multiplier, timing_range_multiplier, + synweight_range_multiplier): + """ Sorts parameter ranges by evoked inputs into a dictionary + + Parameters + ---------- + drive_names : list of str, shape (n_drives, ) + Names corresponding to n Network drives. + drive_dynamics : list of dict, shape (n_drives, ) + Dynamics parameters for each drive specified in drive_names. + drive_syn_weights : list of dict, shape (n_drives, ) + Synaptic weight parameters for each drive specified in drive_names. + tstop : float + The simulation stop time (ms). + sigma_range_multiplier : float + The scale of sigma values to sweep over. + timing_range_multiplier : float + The scale of timing values to sweep over. + synweight_range_multiplier : float + The scale of input synaptic weights to sweep over. + + Returns + ------- + sorted_evinput_params: dict + Dictionary with parameters grouped by evoked inputs. + Keys for each evoked input are + 'mean', 'sigma', 'ranges', 'start', and 'end'. + sorted_evinput_params['evprox1']['ranges'] is a dict + with keys as the parameters to be optimized and values + indicating the ranges over which they should be + optimized. E.g., + sorted_evinput['evprox1']['ranges'] = + { + 'gbar_evprox_1_L2Pyr_ampa': + { + 'initial': 0.05125, + 'minval': 0, + 'maxval': 0.0915 + } + } + Elements are sorted by their start time. + """ + + evinput_params = {} + for drive_idx, drive_name in enumerate(drive_names): + timing_mean = drive_dynamics[drive_idx]['mu'] + timing_sigma = drive_dynamics[drive_idx]['sigma'] + + if timing_sigma == 0.0: + # sigma of 0 will not produce a CDF + timing_sigma = 0.01 + + evinput_params[drive_name] = {'mean': timing_mean, + 'sigma': timing_sigma, + 'ranges': {}} + + evinput_params[drive_name]['ranges'][f'{drive_name}_sigma'] = \ + _get_range(timing_sigma, sigma_range_multiplier) + + # calculate range for time + timing_bound = timing_sigma * timing_range_multiplier + range_min = max(0, timing_mean - timing_bound) + range_max = min(tstop, timing_mean + timing_bound) + + evinput_params[drive_name]['start'] = range_min + evinput_params[drive_name]['end'] = range_max + evinput_params[drive_name]['ranges'][f'{drive_name}_mu'] = \ + {'initial': timing_mean, 'minval': range_min, 'maxval': range_max} + + # calculate ranges for syn. weights + for syn_weight_key in drive_syn_weights[drive_idx]: + new_key = f'{drive_name}_gbar_{syn_weight_key}' + weight = drive_syn_weights[drive_idx][syn_weight_key] + ranges = _get_range(weight, synweight_range_multiplier) + if weight == 0.0: + ranges['minval'] = weight + ranges['maxval'] = 1.0 + evinput_params[drive_name]['ranges'][new_key] = ranges + + sorted_evinput_params = OrderedDict(sorted(evinput_params.items(), + key=lambda x: x[1]['start'])) + return sorted_evinput_params + + +def _generate_weights(evinput_params, tstop, dt, decay_multiplier): + """Calculation of weight function for wRMSE calcuation + + Returns + ------- + evinput_params : dict + Adds the keys 'weights', 'opt_start', 'opt_end' + to evinput_params[input_name] and removes 'mean' + and 'sigma' which were needed to compute 'weights'. + """ + num_step = ceil(tstop / dt) + 1 + times = np.linspace(0, tstop, num_step) + + for evinput_this in evinput_params.values(): + # calculate cdf using start time (minival of optimization range) + evinput_this['cdf'] = stats.norm.cdf( + times, evinput_this['start'], evinput_this['sigma']) + + for input_name, evinput_this in evinput_params.items(): + evinput_this['weights'] = evinput_this['cdf'].copy() + + for other_input, evinput_other in evinput_params.items(): + # check ordering to only use inputs after us + # and don't subtract our own cdf(s) + if (evinput_other['mean'] < evinput_this['mean'] or + input_name == other_input): + continue + + decay_factor = decay_multiplier * \ + (evinput_other['mean'] - evinput_this['mean']) / tstop + evinput_this['weights'] -= evinput_other['cdf'] * decay_factor + + # weights should not drop below 0 + np.clip(evinput_this['weights'], a_min=0, a_max=None, + out=evinput_this['weights']) + + # start and stop optimization where the weights are insignificant + indices = np.where(evinput_this['weights'] > 0.01) + evinput_this['opt_start'] = min(evinput_this['start'], + times[indices][0]) + evinput_this['opt_end'] = max(evinput_this['end'], + times[indices][-1]) + + # convert to multiples of dt + evinput_this['opt_start'] = floor(evinput_this['opt_start'] / dt) * dt + evinput_params[input_name]['opt_end'] = ceil( + evinput_this['opt_end'] / dt) * dt + + for evinput_this in evinput_params.values(): + del evinput_this['mean'], evinput_this['sigma'], evinput_this['cdf'] + + return evinput_params + + +def _create_last_chunk(input_chunks): + """ This creates a chunk that combines parameters for + all chunks in input_chunks (final step) + + Parameters + ---------- + input_chunks: List + List of ordered chunks for optimization + + Returns + ------- + chunk: dict + Dictionary of with parameters for combined + chunk (final step) + """ + chunk = {'inputs': [], 'ranges': {}, 'opt_start': 0.0, + 'opt_end': 0.0} + + for evinput in input_chunks: + chunk['inputs'].extend(evinput['inputs']) + chunk['ranges'].update(evinput['ranges']) + if evinput['opt_end'] > chunk['opt_end']: + chunk['opt_end'] = evinput['opt_end'] + + # wRMSE with weights of 1's is the same as regular RMSE. + chunk['weights'] = np.ones_like(input_chunks[-1]['weights']) + + return chunk + + +def _consolidate_chunks(inputs): + """Consolidates inputs into optimization "chunks" defined by + opt_start and opt_end + + Parameters + ---------- + inputs: dict + Sorted dictionary of inputs with their parameters + and weight functions + + Returns + ------- + chunks: list + Combine the evinput_params whenever the end is overlapping + with the next. + """ + chunks = list() + for input_name in inputs: + input_dict = inputs[input_name].copy() + input_dict['inputs'] = [input_name] + + if (len(chunks) > 0 and + input_dict['start'] <= chunks[-1]['end']): + # update previous chunk + chunks[-1]['inputs'].extend(input_dict['inputs']) + chunks[-1]['end'] = input_dict['end'] + chunks[-1]['ranges'].update(input_dict['ranges']) + chunks[-1]['opt_end'] = max(chunks[-1]['opt_end'], + input_dict['opt_end']) + # average the weights + chunks[-1]['weights'] = (chunks[-1]['weights'] + + input_dict['weights']) / 2 + else: + # new chunk + chunks.append(input_dict) + + # add one last chunk to the end + if len(chunks) > 1: + last_chunk = _create_last_chunk(chunks) + chunks.append(last_chunk) + + return chunks + + +def _optrun(drive_params_updated, drive_params_static, net, tstop, dt, + n_trials, opt_params, opt_dpls, scale_factor, smooth_window_len, + return_rmse): + """This is the function to run a simulation + + Parameters + ---------- + drive_params_updated : array-like, shape (n_params, ) + List or numpy array with the parameters chosen by + optimization engine. Order is consistent with + opt_params['ranges']. + drive_params_static : dict + Drive parameters that remain constant throughout optimization. Keys + correspond to the drive names that are being optimized in this chunk. + net : Network instance + Network instance with attached drives. This object will be modified + in-place. + tstop : float + The simulation stop time (ms). + dt : float + The integration time step (ms) of h.CVode during simulation. + n_trials : int + The number of trials to simulate. + opt_params : dict + The optimization parameters. + opt_dpls : dict + Dictionary with keys 'target_dpl' and 'best' for + the experimental dipole and best dipole. + scale_factor : float + Scales the simulated dipoles by scale_factor to match + exp_dpl. + smooth_window_len : int + The length of the hamming window (in samples) to smooth the + simulated dipole waveform in each optimization step. + return_rmse : bool + Returns list of unweighted RMSEs between data in dpl and exp_dpl + for each optimization step + + Returns + ------- + avg_rmse: float + Weighted RMSE between data in dpl and exp_dpl + avg_rmse_unweighted : float + Unweighted RMSE between data in dpl and exp_dpl + """ + print("Optimization step %d, iteration %d" % (opt_params['cur_step'] + 1, + opt_params['optiter'] + 1)) + + # match parameter values contained in list to their respective key names + params_dict = dict() + for param_name, test_value in zip(opt_params['ranges'].keys(), + drive_params_updated): + # tiny negative weights are possible. Clip them to 0. + if test_value < 0: + test_value = 0 + params_dict[param_name] = test_value + + # modify drives according to the drive names in the current chunk + for drive_name in opt_params['inputs']: + + # clear drive and its connectivity + del net.external_drives[drive_name] + conn_idxs = pick_connection(net, src_gids=drive_name) + net.connectivity = [conn for conn_idx, conn + in enumerate(net.connectivity) + if conn_idx not in conn_idxs] + + # extract syn weights: final weights dicts should have keys that + # correspond to cell types + keys_ampa = fnmatch.filter(params_dict.keys(), + f'{drive_name}_gbar_ampa_*') + keys_nmda = fnmatch.filter(params_dict.keys(), + f'{drive_name}_gbar_nmda_*') + weights_ampa = {key.lstrip(f'{drive_name}_gbar_ampa_'): + params_dict[key] for key in keys_ampa} + weights_nmda = {key.lstrip(f'{drive_name}_gbar_nmda_'): + params_dict[key] for key in keys_nmda} + + net.add_evoked_drive( + name=drive_name, + mu=params_dict[drive_name + '_mu'], + sigma=params_dict[drive_name + '_sigma'], + numspikes=drive_params_static[drive_name]['numspikes'], + location=drive_params_static[drive_name]['location'], + n_drive_cells=drive_params_static[drive_name]['n_drive_cells'], + cell_specific=drive_params_static[drive_name]['cell_specific'], + weights_ampa=weights_ampa, + weights_nmda=weights_nmda, + space_constant=drive_params_static[drive_name]['space_constant'], + synaptic_delays=drive_params_static[drive_name]['synaptic_delays'], + probability=drive_params_static[drive_name]['probability'], + event_seed=drive_params_static[drive_name]['event_seed'], + conn_seed=drive_params_static[drive_name]['conn_seed'] + ) + + # run the simulation + dpls = simulate_dipole(net, tstop=tstop, dt=dt, n_trials=n_trials) + # order of operations: scale, smooth, then average + dpls = [dpl.scale(scale_factor) for dpl in dpls] + if smooth_window_len is not None: + dpls = [dpl.smooth(smooth_window_len) for dpl in dpls] + avg_dpl = average_dipoles(dpls) + + avg_rmse = _rmse(avg_dpl, opt_dpls['target_dpl'], + tstart=opt_params['opt_start'], + tstop=opt_params['opt_end'], + weights=opt_params['weights']) + avg_rmse_unweighted = _rmse(avg_dpl, opt_dpls['target_dpl'], + tstart=opt_params['opt_start'], + tstop=tstop, weights=None) + + if return_rmse: + opt_params['iter_avg_rmse'].append(avg_rmse_unweighted) + opt_params['stepminopterr'] = avg_rmse + opt_dpls['best_dpl'] = avg_dpl + + print("weighted RMSE: %.2e over range [%3.3f-%3.3f] ms" % + (avg_rmse, opt_params['opt_start'], opt_params['opt_end'])) + + opt_params['optiter'] += 1 + + return avg_rmse, avg_rmse_unweighted # nlopt expects error + + +def _run_optimization(maxiter, param_ranges, optrun): + + cons = list() + x0 = list() + for idx, param_name in enumerate(param_ranges): + x0.append(param_ranges[param_name]['initial']) + cons.append( + lambda x, idx=idx: param_ranges[param_name]['maxval'] - x[idx]) + cons.append( + lambda x, idx=idx: x[idx] - param_ranges[param_name]['minval']) + result = fmin_cobyla(optrun, cons=cons, rhobeg=0.1, rhoend=1e-4, + x0=x0, maxfun=maxiter, catol=0.0) + return result + + +def _get_drive_params(net, drive_names): + """Get evoked drive parameters from a Network instance.""" + + drive_dynamics = list() + drive_syn_weights = list() + drive_static_params = dict() + for drive_name in drive_names: + drive = net.external_drives[drive_name] + drive_dynamics.append(drive['dynamics'].copy()) + conn_idxs = pick_connection(net, src_gids=drive_name) + weights = dict() + delays = dict() + probabilities = dict() + for conn_idx in conn_idxs: + target_type = net.connectivity[conn_idx]['target_type'] + target_receptor = net.connectivity[conn_idx]['receptor'] + weight = net.connectivity[conn_idx]['nc_dict']['A_weight'] + # note that for each drive, the weights dict should be unnested + # accross target cell types and receptors for ease-of-use when + # these values get restructured into a list downstream + + # legacy_mode hack: don't include invalid connections that have + # been added in Network when legacy_mode=True + if not (drive['location'] == 'distal' and + target_type == 'L5_basket'): + if target_receptor == "ampa": + weights.update({f'ampa_{target_type}': weight}) + if target_receptor == "nmda": + weights.update({f'nmda_{target_type}': weight}) + # delay should be constant across AMPA and NMDA receptor types + delay = net.connectivity[conn_idx]['nc_dict']['A_delay'] + delays.update({target_type: delay}) + # space constant should be constant across drive connections + space_const = net.connectivity[conn_idx]['nc_dict']['lamtha'] + # probability should be constant across AMPA and NMDA receptor + # types + probability = net.connectivity[conn_idx]['probability'] + probabilities.update({target_type: probability}) + + drive_syn_weights.append(weights) + + static_params = dict() + static_params['numspikes'] = drive['dynamics']['numspikes'] + static_params['location'] = drive['location'] + if drive['cell_specific']: + static_params['n_drive_cells'] = 'n_cells' + else: + static_params['n_drive_cells'] = drive['n_drive_cells'] + static_params['cell_specific'] = drive['cell_specific'] + static_params['space_constant'] = space_const + static_params['synaptic_delays'] = delays + static_params['probability'] = probabilities + static_params['event_seed'] = drive['event_seed'] + static_params['conn_seed'] = drive['conn_seed'] + drive_static_params.update({drive_name: static_params}) + + return drive_dynamics, drive_syn_weights, drive_static_params + + +def optimize_evoked(net, tstop, n_trials, target_dpl, initial_dpl, maxiter=50, + timing_range_multiplier=3.0, sigma_range_multiplier=50.0, + synweight_range_multiplier=500.0, decay_multiplier=1.6, + scale_factor=1., smooth_window_len=None, dt=0.025, + which_drives='all', return_rmse=False): + """Optimize drives to generate evoked response. + + Parameters + ---------- + net : Network instance + An instance of the Network object with attached evoked drives. Timing + and synaptic weight parameters will be optimized for each attached + evoked drive. Note that no new drives will be created or old drives + destroyed. + tstop : float + The simulation stop time (ms). + n_trials : int + The number of trials to simulate. + target_dpl : instance of Dipole + The target experimental dipole. + initial_dpl : instance of Dipole + The initial dipole to start the optimization. + maxiter : int + The maximum number of simulations to run for optimizing + one "chunk". + timing_range_multiplier : float + The scale of timing values to sweep over. + sigma_range_multiplier : float + The scale of sigma values to sweep over. + synweight_range_multiplier : float + The scale of input synaptic weights to sweep over. + decay_multiplier : float + The decay multiplier. + scale_factor : float + Scales the simulated dipoles by scale_factor to match + target_dpl. + smooth_window_len : int + The length of the hamming window (in samples) to smooth the + simulated dipole waveform in each optimization step. + dt : float + The integration time step (ms) of h.CVode during simulation. + which_drives: 'all' or list + Evoked drives to optimize. If 'all', will opimize all evoked drives. + If a subset list of evoked drives, will optimize only the evoked drives in the list. + return_rmse : bool + Returns list of unweighted RMSEs between the simulated and experimental dipole + waveforms for each optimization step + + Returns + ------- + net : Network instance + An instance of the Network object with the optimized configuration of + attached drives. + iter_avg_rmse : list of float + Unweighted RMSE between data in dpl and exp_dpl for each iteration. Returned only + if return_rmse is True + + Notes + ----- + This optimization protocol utilizes the Constrained Optimization + By Linear Approximation (COBYLA) method: + https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fmin_cobyla.html # noqa + """ + + net = net.copy() + + evoked_drive_names = [key for key in net.external_drives.keys() + if net.external_drives[key]['type'] == 'evoked'] + + if len(evoked_drive_names) == 0: + raise ValueError('The current Network instance lacks any evoked ' + 'drives. Consider adding drives using ' + 'net.add_evoked_drive') + elif which_drives == 'all': + drive_names = evoked_drive_names + else: + drive_names = [mydrive for mydrive in np.unique(which_drives) + if mydrive in evoked_drive_names] + if len(drive_names) == 0: + raise ValueError('The drives selected to be optimized are not evoked ' + 'drives. Optimization works only evoked drives.') + + drive_dynamics, drive_syn_weights, drive_static_params = \ + _get_drive_params(net, drive_names) + + # Create a sorted dictionary with the inputs and parameters + # belonging to each. + # Then, calculate the appropriate weight function to be used + # in RMSE using the CDF defined by the input timing's mean and + # std. deviation parameters. + # Lastly, use the weights to define non-overlapping "chunks" of + # the simulation timeframe to optimize. Chunks are consolidated if + # more than one input should + # be optimized at a time. + evinput_params = _split_by_evinput(drive_names, + drive_dynamics, + drive_syn_weights, + tstop, + sigma_range_multiplier, + timing_range_multiplier, + synweight_range_multiplier) + evinput_params = _generate_weights(evinput_params, tstop, dt, + decay_multiplier) + param_chunks = _consolidate_chunks(evinput_params) + + best_rmse = _rmse(initial_dpl, target_dpl, tstop=tstop) + opt_dpls = dict(best_dpl=initial_dpl, target_dpl=target_dpl) + print("Initial RMSE: %.2e" % best_rmse) + + opt_params = dict() + + if return_rmse is True: + opt_params['iter_avg_rmse'] = list() + + for step in range(len(param_chunks)): + opt_params['cur_step'] = step + total_steps = len(param_chunks) + + # param_chunks is the optimization information for all steps. + # opt_params is a pointer to the params for each step + opt_params.update(param_chunks[step]) + + if maxiter == 0: + print("Skipping optimization step %d (0 simulations)" % (step + 1)) + continue + + if (opt_params['cur_step'] > 0 and + opt_params['cur_step'] == total_steps - 1): + # For the last step (all inputs), recalculate ranges and update + # param_chunks. If previous optimization steps increased + # std. dev. this could result in fewer optimization steps as + # inputs may be deemed too close together and be grouped in a + # single optimization step. + # + # The purpose of the last step (with regular RMSE) is to clean up + # overfitting introduced by local weighted RMSE optimization. + + evinput_params = _split_by_evinput(drive_names, + drive_dynamics, + drive_syn_weights, + tstop, + sigma_range_multiplier, + timing_range_multiplier, + synweight_range_multiplier) + evinput_params = _generate_weights(evinput_params, tstop, dt, + decay_multiplier) + param_chunks = _consolidate_chunks(evinput_params) + + # reload opt_params for the last step in case the number of + # steps was changed by updateoptparams() + opt_params.update(param_chunks[total_steps - 1]) + + print("Starting optimization step %d/%d" % (step + 1, total_steps)) + + opt_params['optiter'] = 0 + opt_params['stepminopterr'] = _rmse(opt_dpls['best_dpl'], + opt_dpls['target_dpl'], + tstart=opt_params['opt_start'], + tstop=opt_params['opt_end'], + weights=opt_params['weights']) + + net_opt = net.copy() + # drive_params_updated must be a list for compatability with the args + # in the optimization engine, scipy.optimize.fmin_cobyla + _myoptrun = partial(_optrun, + drive_params_static=drive_static_params, + net=net_opt, + tstop=tstop, + dt=dt, + n_trials=n_trials, + opt_params=opt_params, + opt_dpls=opt_dpls, + scale_factor=scale_factor, + smooth_window_len=smooth_window_len, + return_rmse=return_rmse) + + print('Optimizing from [%3.3f-%3.3f] ms' % (opt_params['opt_start'], + opt_params['opt_end'])) + opt_results = _run_optimization(maxiter=maxiter, + param_ranges=opt_params['ranges'], + optrun=_myoptrun) + # tiny negative weights are possible. Clip them to 0. + opt_results[opt_results < 0] = 0 + + # update opt_params for the next round if total rmse decreased + avg_rmse = _rmse(opt_dpls['best_dpl'], + opt_dpls['target_dpl'], + tstop=tstop, + weights=None) + if avg_rmse <= best_rmse: + best_rmse = avg_rmse + for var_name, value in zip(opt_params['ranges'], opt_results): + opt_params['ranges'][var_name]['initial'] = value + + net = net_opt + + print("Final RMSE: %.2e" % best_rmse) + + if return_rmse is True: + return net, opt_params['iter_avg_rmse'] + return net From c376fb8f8917b0d6e2143d9400db5ff89f660c4c Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Thu, 10 Aug 2023 16:09:58 -0400 Subject: [PATCH 27/63] reduce example to one drive, justify syn weight ranges, parametrize test funcs --- examples/howto/optimize_evoked.py | 42 ++++++++------------------ hnn_core/optimization/optimizer.py | 2 +- hnn_core/tests/test_optimize_evoked.py | 15 ++++----- 3 files changed, 21 insertions(+), 38 deletions(-) diff --git a/examples/howto/optimize_evoked.py b/examples/howto/optimize_evoked.py index e2cc0b87b..6da9f48c2 100644 --- a/examples/howto/optimize_evoked.py +++ b/examples/howto/optimize_evoked.py @@ -74,20 +74,16 @@ def set_params(net, params): synaptic_delays=synaptic_delays_p) # Distal - weights_ampa_d1 = {'L2_basket': - params['evdist1_ampa_L2_basket'], - 'L2_pyramidal': - params['evdist1_ampa_L2_pyramidal'], - 'L5_pyramidal': - params['evdist1_ampa_L5_pyramidal']} + weights_ampa_d1 = {'L2_basket': 0.006562, 'L2_pyramidal': 7e-06, + 'L5_pyramidal': 0.1423} weights_nmda_d1 = {'L2_basket': 0.019482, 'L2_pyramidal': 0.004317, 'L5_pyramidal': 0.080074} synaptic_delays_d1 = {'L2_basket': 0.1, 'L2_pyramidal': 0.1, 'L5_pyramidal': 0.1} net.add_evoked_drive('evdist1', - mu=params['evdist1_mu'], - sigma=params['evdist1_sigma'], + mu=63.53, + sigma=3.85, numspikes=1, location='distal', weights_ampa=weights_ampa_d1, @@ -95,18 +91,12 @@ def set_params(net, params): synaptic_delays=synaptic_delays_d1) # Proximal 2 - weights_ampa_p2 = {'L2_basket': - params['evprox2_ampa_L2_basket'], - 'L2_pyramidal': - params['evprox2_ampa_L2_pyramidal'], - 'L5_basket': - params['evprox2_ampa_L5_basket'], - 'L5_pyramidal': - params['evprox2_ampa_L5_pyramidal']} + weights_ampa_p2 = {'L2_basket': 3e-06, 'L2_pyramidal': 1.43884, + 'L5_basket': 0.008958, 'L5_pyramidal': 0.684013} net.add_evoked_drive('evprox2', - mu=params['evprox2_mu'], - sigma=params['evprox2_sigma'], + mu=137.12, + sigma=8.33, numspikes=1, location='proximal', weights_ampa=weights_ampa_p2, @@ -118,6 +108,9 @@ def set_params(net, params): # The constraints must be a dictionary of tuples where the first value in each # tuple is the lower bound and the second value is the upper bound for the # corresponding parameter. +# +# The following synaptic weight parameter ranges (units of micro-siemens) +# were chosen given that they are physiologically realistic. constraints = dict() @@ -126,18 +119,7 @@ def set_params(net, params): 'evprox1_ampa_L5_basket': (0.01, 1.), 'evprox1_ampa_L5_pyramidal': (0.01, 1.), 'evprox1_mu': (5., 50.), - 'evprox1_sigma': (2., 25.), - 'evdist1_ampa_L2_basket': (0.01, 1.), - 'evdist1_ampa_L2_pyramidal': (0.01, 1.), - 'evdist1_ampa_L5_pyramidal': (0.01, 1.), - 'evdist1_mu': (50., 80.), - 'evdist1_sigma': (2., 25.), - 'evprox2_ampa_L2_basket': (0.01, 1.), - 'evprox2_ampa_L2_pyramidal': (0.01, 1.), - 'evprox2_ampa_L5_basket': (0.01, 1.), - 'evprox2_ampa_L5_pyramidal': (0.01, 1.), - 'evprox2_mu': (125., 150.), - 'evprox2_sigma': (10., 60.)}) + 'evprox1_sigma': (2., 25.)}) ############################################################################### diff --git a/hnn_core/optimization/optimizer.py b/hnn_core/optimization/optimizer.py index 85d730d9b..9512d1125 100644 --- a/hnn_core/optimization/optimizer.py +++ b/hnn_core/optimization/optimizer.py @@ -29,7 +29,7 @@ def __init__(self, net, constraints, set_params, solver, obj_fun, solver : string The optimizer, 'bayesian' or 'cobyla'. obj_fun : string - The type of drive to be optimized, 'evoked'. + The objective function to be minimized. tstop : float The simulated dipole's duration. scale_factor : float, optional diff --git a/hnn_core/tests/test_optimize_evoked.py b/hnn_core/tests/test_optimize_evoked.py index 2a151d3f5..e3e1f70fb 100644 --- a/hnn_core/tests/test_optimize_evoked.py +++ b/hnn_core/tests/test_optimize_evoked.py @@ -5,6 +5,8 @@ from hnn_core import jones_2009_model, simulate_dipole, Optimizer +import pytest + def _optimize_evoked(solver): """Test running the full routine in a reduced network.""" @@ -45,7 +47,7 @@ def set_params(net_offset, params): synaptic_delays = {'L2_basket': 0.1, 'L2_pyramidal': 0.1, 'L5_basket': 1., 'L5_pyramidal': 1.} net_offset.add_evoked_drive('evprox', - mu=params['mu_offset'], + mu=params['mu'], sigma=1, numspikes=1, location='proximal', @@ -55,7 +57,7 @@ def set_params(net_offset, params): # define constraints mu_range = (1, 6) constraints = dict() - constraints.update({'mu_offset': mu_range}) + constraints.update({'mu': mu_range}) optim = Optimizer(net_offset, constraints=constraints, set_params=set_params, solver=solver, @@ -75,9 +77,8 @@ def set_params(net_offset, params): assert all(vals > 0 for vals in obj), "rmse values should be positive" -def test_bayesian_evoked(): - _optimize_evoked('bayesian') - +@pytest.mark.parametrize("solver", ['bayesian', 'cobyla']) +def test_optimize_evoked(solver): + """Test optimization routines for evoked drives.""" -def test_cobyla_evoked(): - _optimize_evoked('cobyla') + _optimize_evoked(solver) From 1f068cb27d390fff65d6cbdc9c2c53dcb8dbea11 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Mon, 14 Aug 2023 12:55:38 -0400 Subject: [PATCH 28/63] Address comments --- examples/howto/optimize_evoked.py | 11 +-- hnn_core/__init__.py | 1 - hnn_core/optimization/__init__.py | 1 + .../{optimizer.py => general_optimization.py} | 83 +++++++++---------- .../{optimization.py => optimize_evoked.py} | 0 hnn_core/tests/test_optimize_evoked.py | 29 ++++--- 6 files changed, 62 insertions(+), 63 deletions(-) rename hnn_core/optimization/{optimizer.py => general_optimization.py} (91%) rename hnn_core/optimization/{optimization.py => optimize_evoked.py} (100%) diff --git a/examples/howto/optimize_evoked.py b/examples/howto/optimize_evoked.py index 6da9f48c2..b159cb350 100644 --- a/examples/howto/optimize_evoked.py +++ b/examples/howto/optimize_evoked.py @@ -113,28 +113,25 @@ def set_params(net, params): # were chosen given that they are physiologically realistic. -constraints = dict() -constraints.update({'evprox1_ampa_L2_basket': (0.01, 1.), +constraints = dict({'evprox1_ampa_L2_basket': (0.01, 1.), 'evprox1_ampa_L2_pyramidal': (0.01, 1.), 'evprox1_ampa_L5_basket': (0.01, 1.), 'evprox1_ampa_L5_pyramidal': (0.01, 1.), 'evprox1_mu': (5., 50.), 'evprox1_sigma': (2., 25.)}) - ############################################################################### # Now we define and fit the optimizer. -from hnn_core import Optimizer +from hnn_core.optimization import Optimizer tstop = exp_dpl.times[-1] scale_factor = 3000 smooth_window_len = 30 net = jones_2009_model() -optim = Optimizer(net, constraints=constraints, set_params=set_params, - solver='bayesian', obj_fun='evoked', tstop=tstop, - scale_factor=scale_factor, +optim = Optimizer(net, tstop=tstop, constraints=constraints, + set_params=set_params, scale_factor=scale_factor, smooth_window_len=smooth_window_len) with MPIBackend(n_procs=n_procs, mpi_cmd='mpiexec'): optim.fit(exp_dpl.data['agg']) diff --git a/hnn_core/__init__.py b/hnn_core/__init__.py index f26d3be6f..c17c68a7f 100644 --- a/hnn_core/__init__.py +++ b/hnn_core/__init__.py @@ -6,6 +6,5 @@ from .cell_response import CellResponse, read_spikes from .cells_default import pyramidal, basket from .parallel_backends import MPIBackend, JoblibBackend -from .optimization.optimizer import Optimizer __version__ = '0.4.dev0' diff --git a/hnn_core/optimization/__init__.py b/hnn_core/optimization/__init__.py index e69de29bb..244872f7f 100644 --- a/hnn_core/optimization/__init__.py +++ b/hnn_core/optimization/__init__.py @@ -0,0 +1 @@ +from .general_optimization import Optimizer \ No newline at end of file diff --git a/hnn_core/optimization/optimizer.py b/hnn_core/optimization/general_optimization.py similarity index 91% rename from hnn_core/optimization/optimizer.py rename to hnn_core/optimization/general_optimization.py index 9512d1125..d21cd65f5 100644 --- a/hnn_core/optimization/optimizer.py +++ b/hnn_core/optimization/general_optimization.py @@ -14,14 +14,17 @@ class Optimizer: - def __init__(self, net, constraints, set_params, solver, obj_fun, - tstop, scale_factor=1., smooth_window_len=None): + def __init__(self, net, tstop, constraints, set_params, solver='bayesian', + obj_fun='dipole_rmse', scale_factor=1., + smooth_window_len=None): """Parameter optimization. Parameters ---------- net : Network The network object. + tstop : float + The simulated dipole's duration. constraints : dict The user-defined constraints. set_params : func @@ -30,8 +33,6 @@ def __init__(self, net, constraints, set_params, solver, obj_fun, The optimizer, 'bayesian' or 'cobyla'. obj_fun : string The objective function to be minimized. - tstop : float - The simulated dipole's duration. scale_factor : float, optional The dipole scale factor. The default is 1. smooth_window_len : float, optional @@ -48,18 +49,20 @@ def __init__(self, net, constraints, set_params, solver, obj_fun, self.max_iter = 200 # Optimizer method if solver == 'bayesian': + self.solver = 'bayesian' self._assemble_constraints = _assemble_constraints_bayesian self._run_opt = _run_opt_bayesian elif solver == 'cobyla': + self.solver = 'cobyla' self._assemble_constraints = _assemble_constraints_cobyla self._run_opt = _run_opt_cobyla else: raise ValueError("solver must be 'bayesian' or 'cobyla'") # Response to be optimized - if obj_fun == 'evoked': + if obj_fun == 'dipole_rmse': self.obj_fun = _rmse_evoked else: - raise ValueError("obj_fun must be 'evoked'") + raise ValueError("obj_fun must be 'dipole_rmse'") self.scale_factor = scale_factor self.smooth_window_len = smooth_window_len self.tstop = tstop @@ -68,18 +71,14 @@ def __init__(self, net, constraints, set_params, solver, obj_fun, self.opt_params_ = None def __repr__(self): - class_name = self.__class__.__name__ is_fit = False if self.net_ is not None: is_fit = True - s = ( - f'{class_name}: ' - f'solver={self.solver}, ' - f'response type={self.obj_fun}, ' - f'has been fit={is_fit}' - ) - return s + return "".format( + self.__class__.__name__, + self.solver, + is_fit) def fit(self, target): """Runs optimization routine. @@ -94,15 +93,15 @@ def fit(self, target): initial_params = _get_initial_params(self.constraints) opt_params, obj, net_ = self._run_opt(self.net, + self.tstop, constraints, - initial_params, self._set_params, self.obj_fun, - self.scale_factor, - self.smooth_window_len, - self.tstop, + initial_params, self.max_iter, - target) + target, + self.scale_factor, + self.smooth_window_len) self.net_ = net_ self.obj_ = obj @@ -236,33 +235,33 @@ def _update_params(initial_params, predicted_params): return params -def _run_opt_bayesian(net, constraints, initial_params, set_params, obj_fun, - scale_factor, smooth_window_len, tstop, max_iter, - target): +def _run_opt_bayesian(net, tstop, constraints, set_params, obj_fun, + initial_params, max_iter, target, scale_factor=1., + smooth_window_len=None): """Runs optimization routine with gp_minimize optimizer. Parameters ---------- net : Network The network object. + tstop : float + The simulated dipole's duration. constraints : list of tuples Parameter constraints in solver-specific format. - initial_params : dict - Keys are parameter names, values are initial parameters.. set_params : func User-defined function that sets parameters in network drives. obj_fun : func The objective function. - scale_factor : float - The dipole scale factor. - smooth_window_len : float - The smooth window length. - tstop : float - The simulated dipole's duration. + initial_params : dict + Keys are parameter names, values are initial parameters. max_iter : int Number of calls the optimizer makes. target : ndarray The recorded dipole. + scale_factor : float + The dipole scale factor. + smooth_window_len : float + The smooth window length. Returns ------- @@ -308,33 +307,33 @@ def _obj_func(predicted_params): return opt_params, obj, net_ -def _run_opt_cobyla(net, constraints, initial_params, set_params, obj_fun, - scale_factor, smooth_window_len, tstop, max_iter, - target): +def _run_opt_cobyla(net, tstop, constraints, set_params, obj_fun, + initial_params, max_iter, target, scale_factor=1., + smooth_window_len=None): """Runs optimization routine with fmin_cobyla optimizer. Parameters ---------- net : Network The network object. - constraints : dict + tstop : float + The simulated dipole's duration. + constraints : list of tuples Parameter constraints in solver-specific format. - initial_params : dict - Keys are parameter names, values are initial parameters.. set_params : func User-defined function that sets parameters in network drives. obj_fun : func The objective function. + initial_params : dict + Keys are parameter names, values are initial parameters. + max_iter : int + Number of calls the optimizer makes. + target : ndarray + The recorded dipole. scale_factor : float The dipole scale factor. smooth_window_len : float The smooth window length. - tstop : float - The simulated dipole's duration. - max_iter : int - Number of calls the optimizer makes. - target : ndarray, optional - The recorded dipole. The default is None. Returns ------- diff --git a/hnn_core/optimization/optimization.py b/hnn_core/optimization/optimize_evoked.py similarity index 100% rename from hnn_core/optimization/optimization.py rename to hnn_core/optimization/optimize_evoked.py diff --git a/hnn_core/tests/test_optimize_evoked.py b/hnn_core/tests/test_optimize_evoked.py index e3e1f70fb..e3dd3beb4 100644 --- a/hnn_core/tests/test_optimize_evoked.py +++ b/hnn_core/tests/test_optimize_evoked.py @@ -3,13 +3,16 @@ # Ryan Thorpe # Mainak Jas -from hnn_core import jones_2009_model, simulate_dipole, Optimizer +from hnn_core import jones_2009_model, simulate_dipole + +from hnn_core.optimization import Optimizer import pytest -def _optimize_evoked(solver): - """Test running the full routine in a reduced network.""" +@pytest.mark.parametrize("solver", ['bayesian', 'cobyla']) +def test_optimize_evoked(solver): + """Test optimization routines for evoked drives in a reduced network.""" tstop = 10. n_trials = 1 @@ -59,11 +62,18 @@ def set_params(net_offset, params): constraints = dict() constraints.update({'mu': mu_range}) - optim = Optimizer(net_offset, constraints=constraints, + optim = Optimizer(net_offset, tstop=tstop, constraints=constraints, set_params=set_params, solver=solver, - obj_fun='evoked', tstop=tstop) + obj_fun='dipole_rmse') + + # test repr before fitting + assert 'fit=False' in repr(optim), "optimizer is already fit" + optim.fit(dpl_orig.data['agg']) + # test repr after fitting + assert 'fit=True' in repr(optim), "optimizer was not fit" + opt_param = optim.opt_params_[0] # the optimized parameter is in the range assert mu_range[0] <= opt_param <= mu_range[1], \ @@ -74,11 +84,4 @@ def set_params(net_offset, params): assert len(obj) <= 200, \ "Number of rmse values should be the same as max_iter" # the returned rmse values should be positive - assert all(vals > 0 for vals in obj), "rmse values should be positive" - - -@pytest.mark.parametrize("solver", ['bayesian', 'cobyla']) -def test_optimize_evoked(solver): - """Test optimization routines for evoked drives.""" - - _optimize_evoked(solver) + assert all(vals >= 0 for vals in obj), "rmse values should be positive" From aff7ce9dbc9fbbe2e29797f3faafb93c4a80c901 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Mon, 14 Aug 2023 13:12:27 -0400 Subject: [PATCH 29/63] Update whats_new.rst --- doc/whats_new.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/doc/whats_new.rst b/doc/whats_new.rst index dea00f552..e3310c387 100644 --- a/doc/whats_new.rst +++ b/doc/whats_new.rst @@ -38,6 +38,11 @@ API - :func:`~hnn_core.Dipole.write` and :func:`~hnn_core.Dipole.read_dipoles` now support hdf5 format for read/write Dipole object, by `Rajat Partani`_ in :gh:`648` + +- Add ability to optimize parameters associated with evoked drives and plot + convergence. User can constrain parameter ranges and specify solver. + Document with an example and develop tests, by `Carolina Fernandez Pujol`_ + in :gh:`652` .. _0.3: From 465c666754a68445a6e73b51ce16fd47f1d4dffc Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Tue, 15 Aug 2023 12:40:55 -0400 Subject: [PATCH 30/63] update api.rst --- doc/api.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/api.rst b/doc/api.rst index ca432764f..ca15463b3 100644 --- a/doc/api.rst +++ b/doc/api.rst @@ -41,6 +41,7 @@ Optimization (:py:mod:`hnn_core.optimization`): :toctree: generated/ optimize_evoked + Optimizer Dipole (:py:mod:`hnn_core.dipole`): ----------------------------------- From c7443ed326aa42c56e1cfd60534f6ba1d1a10d5b Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Tue, 15 Aug 2023 13:07:48 -0400 Subject: [PATCH 31/63] Rename optimization test file --- .../{test_optimize_evoked.py => test_general_optimization.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename hnn_core/tests/{test_optimize_evoked.py => test_general_optimization.py} (100%) diff --git a/hnn_core/tests/test_optimize_evoked.py b/hnn_core/tests/test_general_optimization.py similarity index 100% rename from hnn_core/tests/test_optimize_evoked.py rename to hnn_core/tests/test_general_optimization.py From 3453dd8dbab75e2f6b2eca9622f74b0d5304ce65 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Thu, 17 Aug 2023 14:03:44 -0400 Subject: [PATCH 32/63] update whats_new.rst --- doc/whats_new.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/whats_new.rst b/doc/whats_new.rst index e3310c387..8469fb13c 100644 --- a/doc/whats_new.rst +++ b/doc/whats_new.rst @@ -40,8 +40,8 @@ API `Rajat Partani`_ in :gh:`648` - Add ability to optimize parameters associated with evoked drives and plot - convergence. User can constrain parameter ranges and specify solver. - Document with an example and develop tests, by `Carolina Fernandez Pujol`_ + convergence. User can constrain parameter ranges and specify solver, + by `Carolina Fernandez Pujol`_ in :gh:`652` .. _0.3: From aa3dbab692aa0b1cb338bece549546e133ad0d85 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Thu, 17 Aug 2023 14:08:15 -0400 Subject: [PATCH 33/63] update whats_new.rst --- doc/whats_new.rst | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/doc/whats_new.rst b/doc/whats_new.rst index 8469fb13c..3c3082a46 100644 --- a/doc/whats_new.rst +++ b/doc/whats_new.rst @@ -41,8 +41,7 @@ API - Add ability to optimize parameters associated with evoked drives and plot convergence. User can constrain parameter ranges and specify solver, - by `Carolina Fernandez Pujol`_ - in :gh:`652` + by `Carolina Fernandez Pujol`_ in :gh:`652` .. _0.3: From 02be0652e459af5a63ceccdf4576b6305774c1c5 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Thu, 17 Aug 2023 14:52:47 -0400 Subject: [PATCH 34/63] update flake8, fix imports --- doc/api.rst | 1 - hnn_core/optimization/__init__.py | 2 +- hnn_core/optimization/general_optimization.py | 2 +- .../{test_optimization.py => test_optimize_evoked.py} | 8 +++++--- 4 files changed, 7 insertions(+), 6 deletions(-) rename hnn_core/tests/{test_optimization.py => test_optimize_evoked.py} (95%) diff --git a/doc/api.rst b/doc/api.rst index ca15463b3..4169e29d0 100644 --- a/doc/api.rst +++ b/doc/api.rst @@ -40,7 +40,6 @@ Optimization (:py:mod:`hnn_core.optimization`): .. autosummary:: :toctree: generated/ - optimize_evoked Optimizer Dipole (:py:mod:`hnn_core.dipole`): diff --git a/hnn_core/optimization/__init__.py b/hnn_core/optimization/__init__.py index 244872f7f..37ddae366 100644 --- a/hnn_core/optimization/__init__.py +++ b/hnn_core/optimization/__init__.py @@ -1 +1 @@ -from .general_optimization import Optimizer \ No newline at end of file +from .optimize_evoked import optimize_evoked from .general_optimization import Optimizer \ No newline at end of file diff --git a/hnn_core/optimization/general_optimization.py b/hnn_core/optimization/general_optimization.py index d21cd65f5..b454f8632 100644 --- a/hnn_core/optimization/general_optimization.py +++ b/hnn_core/optimization/general_optimization.py @@ -163,7 +163,7 @@ def _get_initial_params(constraints): initial_params = dict() for cons_key in constraints: initial_params.update({cons_key: ((constraints[cons_key][0] + - constraints[cons_key][1]))/2}) + constraints[cons_key][1])) / 2}) return initial_params diff --git a/hnn_core/tests/test_optimization.py b/hnn_core/tests/test_optimize_evoked.py similarity index 95% rename from hnn_core/tests/test_optimization.py rename to hnn_core/tests/test_optimize_evoked.py index 219570dee..c19363209 100644 --- a/hnn_core/tests/test_optimization.py +++ b/hnn_core/tests/test_optimize_evoked.py @@ -6,9 +6,11 @@ import hnn_core from hnn_core import read_params, jones_2009_model, simulate_dipole -from hnn_core.optimization import (_consolidate_chunks, _split_by_evinput, - _generate_weights, _get_drive_params, - optimize_evoked) +from hnn_core.optimization.optimize_evoked import (_consolidate_chunks, + _split_by_evinput, + _generate_weights, + _get_drive_params, + optimize_evoked) def test_consolidate_chunks(): From 54d051f28d73c3b7b997c38519e7ee1342bfe842 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Thu, 17 Aug 2023 15:28:39 -0400 Subject: [PATCH 35/63] doc attributes, expose max_iter, update_example --- hnn_core/optimization/general_optimization.py | 30 ++++++++++++++++++- hnn_core/tests/test_general_optimization.py | 2 +- 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/hnn_core/optimization/general_optimization.py b/hnn_core/optimization/general_optimization.py index b454f8632..35e58dfb7 100644 --- a/hnn_core/optimization/general_optimization.py +++ b/hnn_core/optimization/general_optimization.py @@ -16,7 +16,7 @@ class Optimizer: def __init__(self, net, tstop, constraints, set_params, solver='bayesian', obj_fun='dipole_rmse', scale_factor=1., - smooth_window_len=None): + smooth_window_len=None, max_iter=200): """Parameter optimization. Parameters @@ -37,6 +37,34 @@ def __init__(self, net, tstop, constraints, set_params, solver='bayesian', The dipole scale factor. The default is 1. smooth_window_len : float, optional The smooth window length. The default is None. + max_iter : int, optional + The max number of calls to the objective function. The default is + 200. + + Attributes + ---------- + net : Network + The network object. + constraints : dict + The user-defined constraints. + max_iter : int + The max number of calls to the objective function. + solver : string + The optimizer, 'bayesian' or 'cobyla'. + obj_fun : func + The objective function to be minimized. + scale_factor : float + The dipole scale factor. + smooth_window_len : float + The smooth window length. + tstop : float + The simulated dipole's duration. + net_ : Network + The network object with optimized drives. + obj_ : list + The objective function values. + opt_params_ : list + The list of optimized parameter values. """ if net.external_drives: diff --git a/hnn_core/tests/test_general_optimization.py b/hnn_core/tests/test_general_optimization.py index e3dd3beb4..b7c9ff419 100644 --- a/hnn_core/tests/test_general_optimization.py +++ b/hnn_core/tests/test_general_optimization.py @@ -64,7 +64,7 @@ def set_params(net_offset, params): optim = Optimizer(net_offset, tstop=tstop, constraints=constraints, set_params=set_params, solver=solver, - obj_fun='dipole_rmse') + obj_fun='dipole_rmse', max_iter=11) # test repr before fitting assert 'fit=False' in repr(optim), "optimizer is already fit" From b7877f3d6ffbfc5209657152e3ec26a04f259197 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Mon, 21 Aug 2023 12:23:41 -0400 Subject: [PATCH 36/63] update repr, attributes, and mispelling in example --- examples/howto/optimize_evoked.py | 2 +- hnn_core/optimization/general_optimization.py | 10 ++++------ 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/examples/howto/optimize_evoked.py b/examples/howto/optimize_evoked.py index b159cb350..2b2f80455 100644 --- a/examples/howto/optimize_evoked.py +++ b/examples/howto/optimize_evoked.py @@ -103,7 +103,7 @@ def set_params(net, params): synaptic_delays=synaptic_delays_p) ############################################################################### -# Then, we define the constrainst. +# Then, we define the constraints. # # The constraints must be a dictionary of tuples where the first value in each # tuple is the lower bound and the second value is the upper bound for the diff --git a/hnn_core/optimization/general_optimization.py b/hnn_core/optimization/general_optimization.py index 35e58dfb7..a9b22abcf 100644 --- a/hnn_core/optimization/general_optimization.py +++ b/hnn_core/optimization/general_optimization.py @@ -49,8 +49,8 @@ def __init__(self, net, tstop, constraints, set_params, solver='bayesian', The user-defined constraints. max_iter : int The max number of calls to the objective function. - solver : string - The optimizer, 'bayesian' or 'cobyla'. + solver : func + The optimization function. obj_fun : func The objective function to be minimized. scale_factor : float @@ -103,10 +103,8 @@ def __repr__(self): if self.net_ is not None: is_fit = True - return "".format( - self.__class__.__name__, - self.solver, - is_fit) + name = self.__class__.__name__ + return f"<{name}\nsolver={self.solver}\nfit={is_fit}>" def fit(self, target): """Runs optimization routine. From 8a9117393f28ae6ece94c842205c15bab19b41ea Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Tue, 22 Aug 2023 15:29:09 -0400 Subject: [PATCH 37/63] update example, update relative imports, nest gp_minimize import, change opt default --- examples/howto/optimize_evoked.py | 2 +- hnn_core/optimization/general_optimization.py | 9 +++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/examples/howto/optimize_evoked.py b/examples/howto/optimize_evoked.py index 2b2f80455..3cdb20b12 100644 --- a/examples/howto/optimize_evoked.py +++ b/examples/howto/optimize_evoked.py @@ -110,7 +110,7 @@ def set_params(net, params): # corresponding parameter. # # The following synaptic weight parameter ranges (units of micro-siemens) -# were chosen given that they are physiologically realistic. +# were chosen so as to keep the model in physiologically realistic regimes. constraints = dict({'evprox1_ampa_L2_basket': (0.01, 1.), diff --git a/hnn_core/optimization/general_optimization.py b/hnn_core/optimization/general_optimization.py index a9b22abcf..237224d95 100644 --- a/hnn_core/optimization/general_optimization.py +++ b/hnn_core/optimization/general_optimization.py @@ -9,12 +9,9 @@ from .metrics import _rmse_evoked -from skopt import gp_minimize -from scipy.optimize import fmin_cobyla - class Optimizer: - def __init__(self, net, tstop, constraints, set_params, solver='bayesian', + def __init__(self, net, tstop, constraints, set_params, solver='cobyla', obj_fun='dipole_rmse', scale_factor=1., smooth_window_len=None, max_iter=200): """Parameter optimization. @@ -299,6 +296,8 @@ def _run_opt_bayesian(net, tstop, constraints, set_params, obj_fun, Optimized network object. """ + from skopt import gp_minimize + obj_values = list() def _obj_func(predicted_params): @@ -371,6 +370,8 @@ def _run_opt_cobyla(net, tstop, constraints, set_params, obj_fun, Optimized network object. """ + from scipy.optimize import fmin_cobyla + obj_values = list() def _obj_func(predicted_params): From 489d169f1b9bc1c244f632667ce50716302738af Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Thu, 24 Aug 2023 00:14:35 -0400 Subject: [PATCH 38/63] address naming conv comments, rename metrics.py, add function signature of set_params --- hnn_core/optimization/general_optimization.py | 47 ++++++++++--------- .../{metrics.py => objective_functions.py} | 8 ++-- 2 files changed, 29 insertions(+), 26 deletions(-) rename hnn_core/optimization/{metrics.py => objective_functions.py} (90%) diff --git a/hnn_core/optimization/general_optimization.py b/hnn_core/optimization/general_optimization.py index 237224d95..2cde7e92d 100644 --- a/hnn_core/optimization/general_optimization.py +++ b/hnn_core/optimization/general_optimization.py @@ -7,18 +7,18 @@ import numpy as np -from .metrics import _rmse_evoked +from .objective_functions import _rmse_evoked class Optimizer: - def __init__(self, net, tstop, constraints, set_params, solver='cobyla', - obj_fun='dipole_rmse', scale_factor=1., + def __init__(self, initial_net, tstop, constraints, set_params, + solver='cobyla', obj_fun='dipole_rmse', scale_factor=1., smooth_window_len=None, max_iter=200): """Parameter optimization. Parameters ---------- - net : Network + initial_net : instance of Network The network object. tstop : float The simulated dipole's duration. @@ -26,9 +26,14 @@ def __init__(self, net, tstop, constraints, set_params, solver='cobyla', The user-defined constraints. set_params : func User-defined function that sets parameters in network drives. - solver : string + + ``set_params(net, params) -> None`` + + where ``net`` is a Network object and ``params`` is a dictionary + of the parameters that will be set inside the function. + solver : str The optimizer, 'bayesian' or 'cobyla'. - obj_fun : string + obj_fun : str The objective function to be minimized. scale_factor : float, optional The dipole scale factor. The default is 1. @@ -40,8 +45,6 @@ def __init__(self, net, tstop, constraints, set_params, solver='cobyla', Attributes ---------- - net : Network - The network object. constraints : dict The user-defined constraints. max_iter : int @@ -56,7 +59,7 @@ def __init__(self, net, tstop, constraints, set_params, solver='cobyla', The smooth window length. tstop : float The simulated dipole's duration. - net_ : Network + net_ : instance of Network The network object with optimized drives. obj_ : list The objective function values. @@ -64,11 +67,11 @@ def __init__(self, net, tstop, constraints, set_params, solver='cobyla', The list of optimized parameter values. """ - if net.external_drives: + if initial_net.external_drives: raise ValueError("The current Network instance has external " + "drives, provide a Network object with no " + "drives.") - self.net = net + self._initial_net = initial_net self.constraints = constraints self._set_params = set_params self.max_iter = 200 @@ -115,7 +118,7 @@ def fit(self, target): constraints = self._assemble_constraints(self.constraints) initial_params = _get_initial_params(self.constraints) - opt_params, obj, net_ = self._run_opt(self.net, + opt_params, obj, net_ = self._run_opt(self._initial_net, self.tstop, constraints, self._set_params, @@ -258,14 +261,14 @@ def _update_params(initial_params, predicted_params): return params -def _run_opt_bayesian(net, tstop, constraints, set_params, obj_fun, +def _run_opt_bayesian(initial_net, tstop, constraints, set_params, obj_fun, initial_params, max_iter, target, scale_factor=1., smooth_window_len=None): """Runs optimization routine with gp_minimize optimizer. Parameters ---------- - net : Network + initial_net : instance of Network The network object. tstop : float The simulated dipole's duration. @@ -292,7 +295,7 @@ def _run_opt_bayesian(net, tstop, constraints, set_params, obj_fun, Optimized parameters. obj : list Objective values. - net_ : Network + net_ : instance of Network Optimized network object. """ @@ -301,7 +304,7 @@ def _run_opt_bayesian(net, tstop, constraints, set_params, obj_fun, obj_values = list() def _obj_func(predicted_params): - return obj_fun(net, + return obj_fun(initial_net, initial_params, set_params, predicted_params, @@ -326,20 +329,20 @@ def _obj_func(predicted_params): # get optimized net params = _update_params(initial_params, opt_params) - net_ = net.copy() + net_ = initial_net.copy() set_params(net_, params) return opt_params, obj, net_ -def _run_opt_cobyla(net, tstop, constraints, set_params, obj_fun, +def _run_opt_cobyla(initial_net, tstop, constraints, set_params, obj_fun, initial_params, max_iter, target, scale_factor=1., smooth_window_len=None): """Runs optimization routine with fmin_cobyla optimizer. Parameters ---------- - net : Network + initial_net : instance of Network The network object. tstop : float The simulated dipole's duration. @@ -366,7 +369,7 @@ def _run_opt_cobyla(net, tstop, constraints, set_params, obj_fun, Optimized parameters. obj : list Objective values. - net_ : Network + net_ : instance of Network Optimized network object. """ @@ -375,7 +378,7 @@ def _run_opt_cobyla(net, tstop, constraints, set_params, obj_fun, obj_values = list() def _obj_func(predicted_params): - return obj_fun(net, + return obj_fun(initial_net, initial_params, set_params, predicted_params, @@ -402,7 +405,7 @@ def _obj_func(predicted_params): # get optimized net params = _update_params(initial_params, opt_params) - net_ = net.copy() + net_ = initial_net.copy() set_params(net_, params) return opt_params, obj, net_ diff --git a/hnn_core/optimization/metrics.py b/hnn_core/optimization/objective_functions.py similarity index 90% rename from hnn_core/optimization/metrics.py rename to hnn_core/optimization/objective_functions.py index dc6b487b4..60f85a9f5 100644 --- a/hnn_core/optimization/metrics.py +++ b/hnn_core/optimization/objective_functions.py @@ -1,4 +1,4 @@ -"""Metrics for parameter optimization.""" +"""Objective functions for parameter optimization.""" # Authors: Carolina Fernandez # Nick Tolley @@ -12,14 +12,14 @@ from scipy.signal import resample -def _rmse_evoked(net, initial_params, set_params, predicted_params, +def _rmse_evoked(initial_net, initial_params, set_params, predicted_params, update_params, obj_values, scale_factor, smooth_window_len, target, tstop): """The objective function for evoked responses. Parameters ---------- - net : Network + initial_net : instance of Network The network object. initial_params : dict Keys are parameter names, values are initial parameters. @@ -47,7 +47,7 @@ def _rmse_evoked(net, initial_params, set_params, predicted_params, params = update_params(initial_params, predicted_params) # simulate dpl with predicted params - new_net = net.copy() + new_net = initial_net.copy() set_params(new_net, params) dpl = simulate_dipole(new_net, tstop=tstop, n_trials=1)[0] From 82e28c713dbe48190bb420709f5c1674c135c809 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Thu, 24 Aug 2023 00:44:27 -0400 Subject: [PATCH 39/63] update imports in optimization/optimize_evoked.py --- hnn_core/optimization/optimize_evoked.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hnn_core/optimization/optimize_evoked.py b/hnn_core/optimization/optimize_evoked.py index 436af95c1..13f8f07e4 100644 --- a/hnn_core/optimization/optimize_evoked.py +++ b/hnn_core/optimization/optimize_evoked.py @@ -12,8 +12,8 @@ import scipy.stats as stats from scipy.optimize import fmin_cobyla -from .dipole import simulate_dipole, _rmse, average_dipoles -from .network import pick_connection +from ..dipole import simulate_dipole, _rmse, average_dipoles +from ..network import pick_connection def _get_range(val, multiplier): From 27db0acda60dfd7ef71c8bbb69e767b01be7b731 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez <51917715+carolinafernandezp@users.noreply.github.com> Date: Thu, 24 Aug 2023 00:27:01 -0400 Subject: [PATCH 40/63] Update unit_tests.yml Add scikit-optimize as dependency --- .github/workflows/unit_tests.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index c4db7f06f..5ae07e81d 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -35,6 +35,7 @@ jobs: python -m pip install --upgrade pip pip install flake8 pytest pytest-cov pip install psutil joblib + pip install scikit-optimize if [ "${{ matrix.os }}" == "ubuntu-latest" ]; then pip install mpi4py else @@ -55,4 +56,4 @@ jobs: - name: Upload code coverage shell: bash -el {0} run: | - bash <(curl -s https://codecov.io/bash) -f ./coverage.xml \ No newline at end of file + bash <(curl -s https://codecov.io/bash) -f ./coverage.xml From cd56857f9d3d8e6e7045316e2ff933b45fd9b228 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez <51917715+carolinafernandezp@users.noreply.github.com> Date: Thu, 24 Aug 2023 00:28:03 -0400 Subject: [PATCH 41/63] Update windows_unit_tests.yml Add scikit-optimize as dependency --- .github/workflows/windows_unit_tests.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/windows_unit_tests.yml b/.github/workflows/windows_unit_tests.yml index 64a870bed..531ba3c2e 100644 --- a/.github/workflows/windows_unit_tests.yml +++ b/.github/workflows/windows_unit_tests.yml @@ -41,6 +41,7 @@ jobs: pip install flake8 pytest pytest-cov pip install psutil joblib pip install --verbose .[gui] + pip install scikit-optimize - name: Test with pytest shell: cmd run: | From 7aa7be1d51e27b7832e5806016a29227363da9b5 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Thu, 24 Aug 2023 13:15:08 -0400 Subject: [PATCH 42/63] test exception raised with pytest.raises --- hnn_core/optimization/general_optimization.py | 2 +- hnn_core/tests/test_general_optimization.py | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/hnn_core/optimization/general_optimization.py b/hnn_core/optimization/general_optimization.py index 2cde7e92d..d6d61ff89 100644 --- a/hnn_core/optimization/general_optimization.py +++ b/hnn_core/optimization/general_optimization.py @@ -70,7 +70,7 @@ def __init__(self, initial_net, tstop, constraints, set_params, if initial_net.external_drives: raise ValueError("The current Network instance has external " + "drives, provide a Network object with no " + - "drives.") + "external drives.") self._initial_net = initial_net self.constraints = constraints self._set_params = set_params diff --git a/hnn_core/tests/test_general_optimization.py b/hnn_core/tests/test_general_optimization.py index b7c9ff419..9926a6d27 100644 --- a/hnn_core/tests/test_general_optimization.py +++ b/hnn_core/tests/test_general_optimization.py @@ -66,6 +66,15 @@ def set_params(net_offset, params): set_params=set_params, solver=solver, obj_fun='dipole_rmse', max_iter=11) + # test exception raised + with pytest.raises(ValueError, match='The current Network instance has ' + 'external drives, provide a Network object with no ' + 'external drives.'): + net_with_drives = net_orig.copy() + optim = Optimizer(net_with_drives, tstop=tstop, + constraints=constraints, set_params=set_params, + solver=solver, obj_fun='dipole_rmse', max_iter=11) + # test repr before fitting assert 'fit=False' in repr(optim), "optimizer is already fit" From ac9be58d409e6882d67950f60754ca4f8d41aabe Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Thu, 24 Aug 2023 15:39:30 -0400 Subject: [PATCH 43/63] add bayesopt.py --- hnn_core/externals/bayesopt.py | 80 ++++++++++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) create mode 100644 hnn_core/externals/bayesopt.py diff --git a/hnn_core/externals/bayesopt.py b/hnn_core/externals/bayesopt.py new file mode 100644 index 000000000..f310b589d --- /dev/null +++ b/hnn_core/externals/bayesopt.py @@ -0,0 +1,80 @@ +"""Bayesian optimization according to: + +Brochu, Cora, and de Freitas' tutorial at +http://haikufactory.com/files/bayopt.pdf + +Adopted from http://atpassos.me/post/44900091837/bayesian-optimization +""" + +# Authors: Alexandre Gramfort +# Alexandre Passos +# Mainak Jas + +import warnings +from sklearn import gaussian_process +import numpy as np + +import scipy.stats as st + + +def expected_improvement(gp, best_y, x): + """The expected improvement acquisition function. + + The equation is explained in Eq (3) of the tutorial.""" + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + y, y_std = gp.predict(x[:, None], return_std=True) + Z = (y - best_y) / (y_std + 1e-12) + return (y - best_y) * st.norm.cdf(Z) + y_std * st.norm.pdf(Z) + + +def bayes_opt(f, initial_x, all_x, acquisition, max_iter=100, debug=False, + random_state=None): + """The actual bayesian optimization function. + + f is the very expensive function we want to minimize. + + initial_x is a matrix of at least two data points (preferrably + more, randomly sampled). + + acquisition is the acquisiton function we want to use to find + query points.""" + + X, y = list(), list() + for x in initial_x: + if not np.isinf(f(x)): + y.append(f(x)) + X.append(x) + + best_x = X[np.argmin(y)] + best_f = y[np.argmin(y)] + gp = gaussian_process.GaussianProcessRegressor(random_state=random_state) + + if debug: # pragma: no cover + print("iter", -1, "best_x", best_x, best_f) + + for i in range(max_iter): + gp.fit(np.array(X)[:, None], np.array(y)) + + new_x = all_x[acquisition(gp, best_f, all_x).argmin()] + new_f = f(new_x) + if not np.isinf(new_f): + X.append(new_x) + y.append(new_f) + if new_f < best_f: + best_f = new_f + best_x = new_x + + if debug: # pragma: no cover + print("iter", i, "best_x", best_x, best_f) + + if debug: # pragma: no cover + import matplotlib.pyplot as plt + scale = 1e6 + sort_idx = np.argsort(X) + plt.plot(np.array(X)[sort_idx] * scale, + np.array(y)[sort_idx] * scale, 'bo-') + plt.axvline(best_x * scale, linestyle='--') + plt.show() + + return best_x, best_f \ No newline at end of file From e07afc57a94a6f523ada989168574b065d2c297f Mon Sep 17 00:00:00 2001 From: Carolina Fernandez <51917715+carolinafernandezp@users.noreply.github.com> Date: Thu, 24 Aug 2023 15:46:14 -0400 Subject: [PATCH 44/63] Update unit_tests.yml add scikit-learn dependency --- .github/workflows/unit_tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index 5ae07e81d..6a47d7b61 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -35,7 +35,7 @@ jobs: python -m pip install --upgrade pip pip install flake8 pytest pytest-cov pip install psutil joblib - pip install scikit-optimize + pip install scikit-learn if [ "${{ matrix.os }}" == "ubuntu-latest" ]; then pip install mpi4py else From 31939e5bf0eef4fb91d7cc674b41076333703a24 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez <51917715+carolinafernandezp@users.noreply.github.com> Date: Thu, 24 Aug 2023 15:46:42 -0400 Subject: [PATCH 45/63] Update windows_unit_tests.yml add scikit-learn dependency --- .github/workflows/windows_unit_tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/windows_unit_tests.yml b/.github/workflows/windows_unit_tests.yml index 531ba3c2e..9d4a7ad54 100644 --- a/.github/workflows/windows_unit_tests.yml +++ b/.github/workflows/windows_unit_tests.yml @@ -41,7 +41,7 @@ jobs: pip install flake8 pytest pytest-cov pip install psutil joblib pip install --verbose .[gui] - pip install scikit-optimize + pip install scikit-learn - name: Test with pytest shell: cmd run: | From 7423b255dd4bcb9d8f393b1d548b8a5adef8f5e7 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Fri, 25 Aug 2023 14:00:51 -0400 Subject: [PATCH 46/63] adapt bayesian optimization functions --- hnn_core/externals/bayesopt.py | 90 ++++++++++++------- hnn_core/optimization/general_optimization.py | 17 ++-- hnn_core/tests/test_general_optimization.py | 31 ++++--- 3 files changed, 86 insertions(+), 52 deletions(-) diff --git a/hnn_core/externals/bayesopt.py b/hnn_core/externals/bayesopt.py index f310b589d..f2b0928c0 100644 --- a/hnn_core/externals/bayesopt.py +++ b/hnn_core/externals/bayesopt.py @@ -9,6 +9,7 @@ # Authors: Alexandre Gramfort # Alexandre Passos # Mainak Jas +# Carolina Fernandez import warnings from sklearn import gaussian_process @@ -18,46 +19,83 @@ def expected_improvement(gp, best_y, x): - """The expected improvement acquisition function. + """The expected improvement acquisition function. The equation is + explained in Eq (3) of the tutorial. + + Parameters + ---------- + gp : instance of GaussianProcessRegressor + The GaussianProcessRegressor object. + best_y : float + Best objective value. + x : ndarray + Randomly distributed samples. + + Returns + ------- + ndarray + Object values corresponding to x. + """ - The equation is explained in Eq (3) of the tutorial.""" with warnings.catch_warnings(): warnings.simplefilter("ignore") - y, y_std = gp.predict(x[:, None], return_std=True) + y, y_std = gp.predict(x, return_std=True) # (n_samples, n_features) Z = (y - best_y) / (y_std + 1e-12) return (y - best_y) * st.norm.cdf(Z) + y_std * st.norm.pdf(Z) -def bayes_opt(f, initial_x, all_x, acquisition, max_iter=100, debug=False, +def bayes_opt(f, initial_x, all_x, acquisition, max_iter=200, random_state=None): """The actual bayesian optimization function. - f is the very expensive function we want to minimize. - - initial_x is a matrix of at least two data points (preferrably - more, randomly sampled). - - acquisition is the acquisiton function we want to use to find - query points.""" + Parameters + ---------- + f : func + The objective functions. + initial_x : list + Initial parameters. + all_x : list of tuples + Parameter constraints in solver-specific format. + acquisition : func + Acquisiton function we want to use to find query points. + max_iter : int, optional + Number of calls the optimizer makes. The default is 200. + random_state : int, optional + Random state of the gaussian process. The default is None. + + Returns + ------- + best_x : list + Optimized parameters. + best_f : float + Best objective value. + + """ X, y = list(), list() - for x in initial_x: - if not np.isinf(f(x)): - y.append(f(x)) - X.append(x) + + # evaluate + y.append(f(initial_x)) + X.append(initial_x) best_x = X[np.argmin(y)] best_f = y[np.argmin(y)] gp = gaussian_process.GaussianProcessRegressor(random_state=random_state) - if debug: # pragma: no cover - print("iter", -1, "best_x", best_x, best_f) + # draw samples from distribution + all_x = np.random.uniform(low=[idx[0] for idx in all_x], + high=[idx[1] for idx in all_x], + size=(10000, len(all_x))) for i in range(max_iter): - gp.fit(np.array(X)[:, None], np.array(y)) + gp.fit(np.array(X), np.array(y)) # (n_samples, n_features) + + new_x = all_x[acquisition(gp, best_f, all_x).argmin()] # lowest obj + new_x = new_x.tolist() - new_x = all_x[acquisition(gp, best_f, all_x).argmin()] + # evaluate new_f = f(new_x) + if not np.isinf(new_f): X.append(new_x) y.append(new_f) @@ -65,16 +103,4 @@ def bayes_opt(f, initial_x, all_x, acquisition, max_iter=100, debug=False, best_f = new_f best_x = new_x - if debug: # pragma: no cover - print("iter", i, "best_x", best_x, best_f) - - if debug: # pragma: no cover - import matplotlib.pyplot as plt - scale = 1e6 - sort_idx = np.argsort(X) - plt.plot(np.array(X)[sort_idx] * scale, - np.array(y)[sort_idx] * scale, 'bo-') - plt.axvline(best_x * scale, linestyle='--') - plt.show() - - return best_x, best_f \ No newline at end of file + return best_x, best_f diff --git a/hnn_core/optimization/general_optimization.py b/hnn_core/optimization/general_optimization.py index d6d61ff89..091305656 100644 --- a/hnn_core/optimization/general_optimization.py +++ b/hnn_core/optimization/general_optimization.py @@ -74,7 +74,7 @@ def __init__(self, initial_net, tstop, constraints, set_params, self._initial_net = initial_net self.constraints = constraints self._set_params = set_params - self.max_iter = 200 + self.max_iter = max_iter # Optimizer method if solver == 'bayesian': self.solver = 'bayesian' @@ -299,7 +299,8 @@ def _run_opt_bayesian(initial_net, tstop, constraints, set_params, obj_fun, Optimized network object. """ - from skopt import gp_minimize + # from skopt import gp_minimize + from ..externals.bayesopt import bayes_opt, expected_improvement obj_values = list() @@ -315,14 +316,14 @@ def _obj_func(predicted_params): target, tstop) - opt_results = gp_minimize(func=_obj_func, - dimensions=constraints, - acq_func='EI', - n_calls=max_iter, - x0=list(initial_params.values())) + opt_results, _ = bayes_opt(f=_obj_func, + initial_x=list(initial_params.values()), + all_x=constraints, + acquisition=expected_improvement, + max_iter=max_iter) # get optimized params - opt_params = opt_results.x + opt_params = opt_results # get objective values obj = [np.min(obj_values[:idx]) for idx in range(1, max_iter + 1)] diff --git a/hnn_core/tests/test_general_optimization.py b/hnn_core/tests/test_general_optimization.py index 9926a6d27..547e36936 100644 --- a/hnn_core/tests/test_general_optimization.py +++ b/hnn_core/tests/test_general_optimization.py @@ -14,6 +14,7 @@ def test_optimize_evoked(solver): """Test optimization routines for evoked drives in a reduced network.""" + max_iter = 11 tstop = 10. n_trials = 1 @@ -51,29 +52,33 @@ def set_params(net_offset, params): 'L5_basket': 1., 'L5_pyramidal': 1.} net_offset.add_evoked_drive('evprox', mu=params['mu'], - sigma=1, + sigma=params['sigma'], numspikes=1, location='proximal', weights_ampa=weights_ampa, synaptic_delays=synaptic_delays) # define constraints - mu_range = (1, 6) constraints = dict() - constraints.update({'mu': mu_range}) + constraints.update({'mu': (1, 6), + 'sigma': (1, 3)}) optim = Optimizer(net_offset, tstop=tstop, constraints=constraints, set_params=set_params, solver=solver, - obj_fun='dipole_rmse', max_iter=11) + obj_fun='dipole_rmse', max_iter=max_iter) # test exception raised with pytest.raises(ValueError, match='The current Network instance has ' 'external drives, provide a Network object with no ' 'external drives.'): net_with_drives = net_orig.copy() - optim = Optimizer(net_with_drives, tstop=tstop, - constraints=constraints, set_params=set_params, - solver=solver, obj_fun='dipole_rmse', max_iter=11) + optim = Optimizer(net_with_drives, + tstop=tstop, + constraints=constraints, + set_params=set_params, + solver=solver, + obj_fun='dipole_rmse', + max_iter=max_iter) # test repr before fitting assert 'fit=False' in repr(optim), "optimizer is already fit" @@ -83,14 +88,16 @@ def set_params(net_offset, params): # test repr after fitting assert 'fit=True' in repr(optim), "optimizer was not fit" - opt_param = optim.opt_params_[0] # the optimized parameter is in the range - assert mu_range[0] <= opt_param <= mu_range[1], \ - "Optimized parameter is not in user-defined range" + for param_idx, param in enumerate(optim.opt_params_): + assert (list(constraints.values())[param_idx][0] + <= param + <= list(constraints.values())[param_idx][1], + "Optimized parameter is not in user-defined range") obj = optim.obj_ # the number of returned rmse values should be the same as max_iter - assert len(obj) <= 200, \ - "Number of rmse values should be the same as max_iter" + assert (len(obj) <= max_iter, + "Number of rmse values should be the same as max_iter") # the returned rmse values should be positive assert all(vals >= 0 for vals in obj), "rmse values should be positive" From 4a362fafb8e03f2469feb4aac90a10fe8265231f Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Fri, 25 Aug 2023 15:52:17 -0400 Subject: [PATCH 47/63] update bayesian optimizer, fix reduced net, add dependency --- hnn_core/externals/bayesopt.py | 95 +++++++++++++------ hnn_core/optimization/__init__.py | 2 +- hnn_core/optimization/general_optimization.py | 8 +- hnn_core/optimization/objective_functions.py | 4 +- hnn_core/tests/test_general_optimization.py | 23 +++-- setup.py | 3 +- 6 files changed, 90 insertions(+), 45 deletions(-) diff --git a/hnn_core/externals/bayesopt.py b/hnn_core/externals/bayesopt.py index f2b0928c0..31ee8d712 100644 --- a/hnn_core/externals/bayesopt.py +++ b/hnn_core/externals/bayesopt.py @@ -18,7 +18,7 @@ import scipy.stats as st -def expected_improvement(gp, best_y, x): +def expected_improvement(gp, best_f, all_x): """The expected improvement acquisition function. The equation is explained in Eq (3) of the tutorial. @@ -26,75 +26,85 @@ def expected_improvement(gp, best_y, x): ---------- gp : instance of GaussianProcessRegressor The GaussianProcessRegressor object. - best_y : float + best_f : float Best objective value. - x : ndarray + all_x : ndarray Randomly distributed samples. Returns ------- ndarray - Object values corresponding to x. + Object values corresponding to all_x. """ with warnings.catch_warnings(): warnings.simplefilter("ignore") - y, y_std = gp.predict(x, return_std=True) # (n_samples, n_features) - Z = (y - best_y) / (y_std + 1e-12) - return (y - best_y) * st.norm.cdf(Z) + y_std * st.norm.pdf(Z) + # (n_samples, n_features) + y, y_std = gp.predict(all_x, return_std=True) + Z = (y - best_f) / (y_std + 1e-12) + return (y - best_f) * st.norm.cdf(Z) + y_std * st.norm.pdf(Z) -def bayes_opt(f, initial_x, all_x, acquisition, max_iter=200, - random_state=None): +def bayes_opt(func, x0, cons, acquisition, maxfun=200, + debug=False, random_state=None): """The actual bayesian optimization function. Parameters ---------- - f : func - The objective functions. - initial_x : list + func : func + The objective function. + x0 : list Initial parameters. - all_x : list of tuples + cons : list of tuples Parameter constraints in solver-specific format. acquisition : func Acquisiton function we want to use to find query points. - max_iter : int, optional - Number of calls the optimizer makes. The default is 200. + maxfun : int, optional + Maximum number of function evaluations. The default is 200. + debug : bool, optional + The default is False. random_state : int, optional - Random state of the gaussian process. The default is None. + Random state of the GaussianProcessRegressor. The default is None. Returns ------- best_x : list - Optimized parameters. + The argument that minimizes f. best_f : float - Best objective value. - + The final objective value. """ X, y = list(), list() # evaluate - y.append(f(initial_x)) - X.append(initial_x) + initial_f = func(x0) + if not np.isinf(initial_f): + X.append(x0) + y.append(initial_f) best_x = X[np.argmin(y)] best_f = y[np.argmin(y)] gp = gaussian_process.GaussianProcessRegressor(random_state=random_state) - # draw samples from distribution - all_x = np.random.uniform(low=[idx[0] for idx in all_x], - high=[idx[1] for idx in all_x], - size=(10000, len(all_x))) + if debug: + print("iter", -1, "best_x", best_x, best_f) - for i in range(max_iter): - gp.fit(np.array(X), np.array(y)) # (n_samples, n_features) + for i in range(maxfun): + # draw samples from distribution + all_x = np.random.uniform(low=[idx[0] for idx in cons], + high=[idx[1] for idx in cons], + size=(10000, len(cons))) + + # (n_samples, n_features) + gp.fit(np.array(X), np.array(y)) + + # get new set of params new_x = all_x[acquisition(gp, best_f, all_x).argmin()] # lowest obj new_x = new_x.tolist() # evaluate - new_f = f(new_x) + new_f = func(new_x) if not np.isinf(new_f): X.append(new_x) @@ -103,4 +113,33 @@ def bayes_opt(f, initial_x, all_x, acquisition, max_iter=200, best_f = new_f best_x = new_x + if debug: + print("iter", i, "best_x", best_x, best_f) + + if debug: + import matplotlib.pyplot as plt + scale = 1e6 + sort_idx = np.argsort(X) + plt.plot(np.array(X)[sort_idx] * scale, + np.array(y)[sort_idx] * scale, 'bo-') + plt.axvline(best_x * scale, linestyle='--') + plt.show() + return best_x, best_f + + +if __name__ == '__main__': + import numpy as np + from scipy.optimize import rosen + + opt_params, obj_vals = bayes_opt(rosen, + [0.5, 0.6], [(-1, 1), (-1, 1)], + expected_improvement, + maxfun=200, + random_state=1) + + x = np.linspace(-1, 1, 50) + y = np.linspace(-1, 1, 50) + X, Y = np.meshgrid(x, y) + + objs = rosen(np.vstack((X.ravel(), Y.ravel()))) diff --git a/hnn_core/optimization/__init__.py b/hnn_core/optimization/__init__.py index 37ddae366..23b9c06f2 100644 --- a/hnn_core/optimization/__init__.py +++ b/hnn_core/optimization/__init__.py @@ -1 +1 @@ -from .optimize_evoked import optimize_evoked from .general_optimization import Optimizer \ No newline at end of file +from .optimize_evoked import optimize_evoked from .general_optimization import Optimizer \ No newline at end of file diff --git a/hnn_core/optimization/general_optimization.py b/hnn_core/optimization/general_optimization.py index 091305656..dbb59a21a 100644 --- a/hnn_core/optimization/general_optimization.py +++ b/hnn_core/optimization/general_optimization.py @@ -316,11 +316,11 @@ def _obj_func(predicted_params): target, tstop) - opt_results, _ = bayes_opt(f=_obj_func, - initial_x=list(initial_params.values()), - all_x=constraints, + opt_results, _ = bayes_opt(func=_obj_func, + x0=list(initial_params.values()), + cons=constraints, acquisition=expected_improvement, - max_iter=max_iter) + maxfun=max_iter) # get optimized params opt_params = opt_results diff --git a/hnn_core/optimization/objective_functions.py b/hnn_core/optimization/objective_functions.py index 60f85a9f5..ee8ae1ef0 100644 --- a/hnn_core/optimization/objective_functions.py +++ b/hnn_core/optimization/objective_functions.py @@ -7,10 +7,10 @@ import numpy as np -from hnn_core import simulate_dipole - from scipy.signal import resample +from hnn_core import simulate_dipole + def _rmse_evoked(initial_net, initial_params, set_params, predicted_params, update_params, obj_values, scale_factor, smooth_window_len, diff --git a/hnn_core/tests/test_general_optimization.py b/hnn_core/tests/test_general_optimization.py index 547e36936..da318927d 100644 --- a/hnn_core/tests/test_general_optimization.py +++ b/hnn_core/tests/test_general_optimization.py @@ -3,12 +3,14 @@ # Ryan Thorpe # Mainak Jas -from hnn_core import jones_2009_model, simulate_dipole - -from hnn_core.optimization import Optimizer +import os.path as op import pytest +import hnn_core +from hnn_core import jones_2009_model, simulate_dipole, read_params +from hnn_core.optimization import Optimizer + @pytest.mark.parametrize("solver", ['bayesian', 'cobyla']) def test_optimize_evoked(solver): @@ -19,9 +21,14 @@ def test_optimize_evoked(solver): n_trials = 1 # simulate a dipole to establish ground-truth drive parameters - net_orig = jones_2009_model() - net_orig._N_pyr_x = 3 - net_orig._N_pyr_y = 3 + hnn_core_root = op.dirname(hnn_core.__file__) + params_fname = op.join(hnn_core_root, 'param', 'default.json') + params = read_params(params_fname) + + params.update({'N_pyr_x': 3, + 'N_pyr_y': 3}) + net_orig = jones_2009_model(params) + mu_orig = 2. weights_ampa = {'L2_basket': 0.5, 'L2_pyramidal': 0.5, @@ -39,9 +46,7 @@ def test_optimize_evoked(solver): dpl_orig = simulate_dipole(net_orig, tstop=tstop, n_trials=n_trials)[0] # define set_params function and constraints - net_offset = jones_2009_model() - net_offset._N_pyr_x = 3 - net_offset._N_pyr_y = 3 + net_offset = jones_2009_model(params) def set_params(net_offset, params): weights_ampa = {'L2_basket': 0.5, diff --git a/setup.py b/setup.py index 48c712e6f..b2c367d47 100644 --- a/setup.py +++ b/setup.py @@ -106,7 +106,8 @@ def run(self): 'h5io' ], extras_require={ - 'gui': ['ipywidgets <=7.7.1', 'ipympl<0.9', 'voila<=0.3.6'] + 'gui': ['ipywidgets <=7.7.1', 'ipympl<0.9', 'voila<=0.3.6'], + 'opt': ['scikit-learn'] }, python_requires='>=3.7', packages=find_packages(), From f9d3c07ae8724df8da01cc30b66a5d4a75726b03 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Fri, 25 Aug 2023 15:54:11 -0400 Subject: [PATCH 48/63] remove comment --- hnn_core/optimization/general_optimization.py | 1 - 1 file changed, 1 deletion(-) diff --git a/hnn_core/optimization/general_optimization.py b/hnn_core/optimization/general_optimization.py index dbb59a21a..def8279af 100644 --- a/hnn_core/optimization/general_optimization.py +++ b/hnn_core/optimization/general_optimization.py @@ -299,7 +299,6 @@ def _run_opt_bayesian(initial_net, tstop, constraints, set_params, obj_fun, Optimized network object. """ - # from skopt import gp_minimize from ..externals.bayesopt import bayes_opt, expected_improvement obj_values = list() From 8a32774f19a6b037d388a310125099e4a4d61e80 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Mon, 28 Aug 2023 09:52:02 -0400 Subject: [PATCH 49/63] update email address, cleanup bayesopt.py --- examples/howto/optimize_evoked.py | 2 +- hnn_core/externals/bayesopt.py | 18 ++++++++---------- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/examples/howto/optimize_evoked.py b/examples/howto/optimize_evoked.py index 3cdb20b12..08164bbf4 100644 --- a/examples/howto/optimize_evoked.py +++ b/examples/howto/optimize_evoked.py @@ -132,7 +132,7 @@ def set_params(net, params): net = jones_2009_model() optim = Optimizer(net, tstop=tstop, constraints=constraints, set_params=set_params, scale_factor=scale_factor, - smooth_window_len=smooth_window_len) + smooth_window_len=smooth_window_len, solver='cobyla') with MPIBackend(n_procs=n_procs, mpi_cmd='mpiexec'): optim.fit(exp_dpl.data['agg']) diff --git a/hnn_core/externals/bayesopt.py b/hnn_core/externals/bayesopt.py index 31ee8d712..1b8fe5550 100644 --- a/hnn_core/externals/bayesopt.py +++ b/hnn_core/externals/bayesopt.py @@ -8,7 +8,7 @@ # Authors: Alexandre Gramfort # Alexandre Passos -# Mainak Jas +# Mainak Jas # Carolina Fernandez import warnings @@ -78,9 +78,8 @@ def bayes_opt(func, x0, cons, acquisition, maxfun=200, # evaluate initial_f = func(x0) - if not np.isinf(initial_f): - X.append(x0) - y.append(initial_f) + X.append(x0) + y.append(initial_f) best_x = X[np.argmin(y)] best_f = y[np.argmin(y)] @@ -106,12 +105,11 @@ def bayes_opt(func, x0, cons, acquisition, maxfun=200, # evaluate new_f = func(new_x) - if not np.isinf(new_f): - X.append(new_x) - y.append(new_f) - if new_f < best_f: - best_f = new_f - best_x = new_x + X.append(new_x) + y.append(new_f) + if new_f < best_f: + best_f = new_f + best_x = new_x if debug: print("iter", i, "best_x", best_x, best_f) From 817f132c510c4a27aeb86e894b132eafa8530a7e Mon Sep 17 00:00:00 2001 From: Carolina Fernandez <51917715+carolinafernandezp@users.noreply.github.com> Date: Fri, 25 Aug 2023 15:57:49 -0400 Subject: [PATCH 50/63] Update unit_tests.yml remove sklearn soft dependency --- .github/workflows/unit_tests.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index 6a47d7b61..158dc7023 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -35,7 +35,6 @@ jobs: python -m pip install --upgrade pip pip install flake8 pytest pytest-cov pip install psutil joblib - pip install scikit-learn if [ "${{ matrix.os }}" == "ubuntu-latest" ]; then pip install mpi4py else From 27cefc44e500a5e99971ca16e55a232cd83cac95 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez <51917715+carolinafernandezp@users.noreply.github.com> Date: Fri, 25 Aug 2023 15:58:29 -0400 Subject: [PATCH 51/63] Update windows_unit_tests.yml remove sklearn soft dependency --- .github/workflows/windows_unit_tests.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/windows_unit_tests.yml b/.github/workflows/windows_unit_tests.yml index 9d4a7ad54..64a870bed 100644 --- a/.github/workflows/windows_unit_tests.yml +++ b/.github/workflows/windows_unit_tests.yml @@ -41,7 +41,6 @@ jobs: pip install flake8 pytest pytest-cov pip install psutil joblib pip install --verbose .[gui] - pip install scikit-learn - name: Test with pytest shell: cmd run: | From f29330fdd837e425889c42f078482f1333df6609 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Wed, 30 Aug 2023 15:29:52 -0400 Subject: [PATCH 52/63] update example --- examples/howto/optimize_evoked.py | 142 ++++++++++++++++++++++-------- hnn_core/externals/bayesopt.py | 4 +- 2 files changed, 108 insertions(+), 38 deletions(-) diff --git a/examples/howto/optimize_evoked.py b/examples/howto/optimize_evoked.py index 08164bbf4..38e35d1df 100644 --- a/examples/howto/optimize_evoked.py +++ b/examples/howto/optimize_evoked.py @@ -42,6 +42,66 @@ urlretrieve(data_url, 'S1_SupraT.txt') exp_dpl = read_dipole('S1_SupraT.txt') +############################################################################### +# Let’s then simulate the dipole with some initial parameters. + +tstop = exp_dpl.times[-1] +scale_factor = 3000 +smooth_window_len = 30 + +net_init = jones_2009_model() + +# Proximal 1 +weights_ampa_p1 = {'L2_basket': 0.2913, 'L2_pyramidal': 0.9337, + 'L5_basket': 0.1951, 'L5_pyramidal': 0.3602} +weights_nmda_p1 = {'L2_basket': 0.9240, 'L2_pyramidal': 0.0845, + 'L5_basket': 0.5849, 'L5_pyramidal': 0.65105} +synaptic_delays_p = {'L2_basket': 0.1, 'L2_pyramidal': 0.1, + 'L5_basket': 1., 'L5_pyramidal': 1.} +net_init.add_evoked_drive('evprox1', + mu=5.6813, + sigma=20.3969, + numspikes=1, + location='proximal', + weights_ampa=weights_ampa_p1, + weights_nmda=weights_nmda_p1, + synaptic_delays=synaptic_delays_p) + +# Distal +weights_ampa_d1 = {'L2_basket': 0.8037, 'L2_pyramidal': 0.5738, + 'L5_pyramidal': 0.3626} +weights_nmda_d1 = {'L2_basket': 0.2492, 'L2_pyramidal': 0.6183, + 'L5_pyramidal': 0.1121} +synaptic_delays_d1 = {'L2_basket': 0.1, 'L2_pyramidal': 0.1, + 'L5_pyramidal': 0.1} +net_init.add_evoked_drive('evdist1', + mu=58.6539, + sigma=5.5810, + numspikes=1, + location='distal', + weights_ampa=weights_ampa_d1, + weights_nmda=weights_nmda_d1, + synaptic_delays=synaptic_delays_d1) + +# Proximal 2 +weights_ampa_p2 = {'L2_basket': 0.01, 'L2_pyramidal': 0.01, 'L5_basket': 0.01, + 'L5_pyramidal': 0.01} +weights_nmda_p2 = {'L2_basket': 0.01, 'L2_pyramidal': 0.01, 'L5_basket': 0.01, + 'L5_pyramidal': 0.01} +net_init.add_evoked_drive('evprox2', + mu=80, + sigma=1, + numspikes=1, + location='proximal', + weights_ampa=weights_ampa_p2, + weights_nmda=weights_nmda_p2, + synaptic_delays=synaptic_delays_p) + +with MPIBackend(n_procs=n_procs, mpi_cmd='mpiexec'): + init_dpl = simulate_dipole(net_init, tstop=tstop, n_trials=1)[0] +init_dpl.scale(scale_factor) +init_dpl.smooth(smooth_window_len) + ############################################################################### # Now we start the optimization! # @@ -54,36 +114,31 @@ def set_params(net, params): # Proximal 1 - weights_ampa_p1 = {'L2_basket': - params['evprox1_ampa_L2_basket'], - 'L2_pyramidal': - params['evprox1_ampa_L2_pyramidal'], - 'L5_basket': - params['evprox1_ampa_L5_basket'], - 'L5_pyramidal': - params['evprox1_ampa_L5_pyramidal']} + weights_ampa_p1 = {'L2_basket': 0.2913, 'L2_pyramidal': 0.9337, + 'L5_basket': 0.1951, 'L5_pyramidal': 0.3602} + weights_nmda_p1 = {'L2_basket': 0.9240, 'L2_pyramidal': 0.0845, + 'L5_basket': 0.5849, 'L5_pyramidal': 0.65105} synaptic_delays_p = {'L2_basket': 0.1, 'L2_pyramidal': 0.1, 'L5_basket': 1., 'L5_pyramidal': 1.} - net.add_evoked_drive('evprox1', - mu=params['evprox1_mu'], - sigma=params['evprox1_sigma'], + mu=5.6813, + sigma=20.3969, numspikes=1, location='proximal', weights_ampa=weights_ampa_p1, + weights_nmda=weights_nmda_p1, synaptic_delays=synaptic_delays_p) # Distal - weights_ampa_d1 = {'L2_basket': 0.006562, 'L2_pyramidal': 7e-06, - 'L5_pyramidal': 0.1423} - weights_nmda_d1 = {'L2_basket': 0.019482, 'L2_pyramidal': 0.004317, - 'L5_pyramidal': 0.080074} + weights_ampa_d1 = {'L2_basket': 0.8037, 'L2_pyramidal': 0.5738, + 'L5_pyramidal': 0.3626} + weights_nmda_d1 = {'L2_basket': 0.2492, 'L2_pyramidal': 0.6183, + 'L5_pyramidal': 0.1121} synaptic_delays_d1 = {'L2_basket': 0.1, 'L2_pyramidal': 0.1, 'L5_pyramidal': 0.1} - net.add_evoked_drive('evdist1', - mu=63.53, - sigma=3.85, + mu=58.6539, + sigma=5.5810, numspikes=1, location='distal', weights_ampa=weights_ampa_d1, @@ -91,15 +146,29 @@ def set_params(net, params): synaptic_delays=synaptic_delays_d1) # Proximal 2 - weights_ampa_p2 = {'L2_basket': 3e-06, 'L2_pyramidal': 1.43884, - 'L5_basket': 0.008958, 'L5_pyramidal': 0.684013} - + weights_ampa_p2 = {'L2_basket': + params['evprox2_ampa_L2_basket'], + 'L2_pyramidal': + params['evprox2_ampa_L2_pyramidal'], + 'L5_basket': + params['evprox2_ampa_L5_basket'], + 'L5_pyramidal': + params['evprox2_ampa_L5_pyramidal']} + weights_nmda_p2 = {'L2_basket': + params['evprox2_nmda_L2_basket'], + 'L2_pyramidal': + params['evprox2_nmda_L2_pyramidal'], + 'L5_basket': + params['evprox2_nmda_L5_basket'], + 'L5_pyramidal': + params['evprox2_nmda_L5_pyramidal']} net.add_evoked_drive('evprox2', - mu=137.12, - sigma=8.33, + mu=params['evprox2_mu'], + sigma=params['evprox2_sigma'], numspikes=1, location='proximal', weights_ampa=weights_ampa_p2, + weights_nmda=weights_nmda_p2, synaptic_delays=synaptic_delays_p) ############################################################################### @@ -112,27 +181,26 @@ def set_params(net, params): # The following synaptic weight parameter ranges (units of micro-siemens) # were chosen so as to keep the model in physiologically realistic regimes. - -constraints = dict({'evprox1_ampa_L2_basket': (0.01, 1.), - 'evprox1_ampa_L2_pyramidal': (0.01, 1.), - 'evprox1_ampa_L5_basket': (0.01, 1.), - 'evprox1_ampa_L5_pyramidal': (0.01, 1.), - 'evprox1_mu': (5., 50.), - 'evprox1_sigma': (2., 25.)}) +constraints = dict({'evprox2_ampa_L2_basket': (0.01, 1.), + 'evprox2_ampa_L2_pyramidal': (0.01, 1.), + 'evprox2_ampa_L5_basket': (0.01, 1.), + 'evprox2_ampa_L5_pyramidal': (0.01, 1.), + 'evprox2_nmda_L2_basket': (0.01, 1.), + 'evprox2_nmda_L2_pyramidal': (0.01, 1.), + 'evprox2_nmda_L5_basket': (0.01, 1.), + 'evprox2_nmda_L5_pyramidal': (0.01, 1.), + 'evprox2_mu': (100., 120.), + 'evprox2_sigma': (2., 30.)}) ############################################################################### # Now we define and fit the optimizer. from hnn_core.optimization import Optimizer -tstop = exp_dpl.times[-1] -scale_factor = 3000 -smooth_window_len = 30 - net = jones_2009_model() optim = Optimizer(net, tstop=tstop, constraints=constraints, set_params=set_params, scale_factor=scale_factor, - smooth_window_len=smooth_window_len, solver='cobyla') + smooth_window_len=smooth_window_len, solver='bayesian') with MPIBackend(n_procs=n_procs, mpi_cmd='mpiexec'): optim.fit(exp_dpl.data['agg']) @@ -147,9 +215,11 @@ def set_params(net, params): fig, axes = plt.subplots(2, 1, sharex=True, figsize=(6, 6)) +# plot original exp_dpl.plot(ax=axes[0], layer='agg', show=False, color='tab:blue') +init_dpl.plot(ax=axes[0], layer='agg', show=False, color='tab:orange') opt_dpl.plot(ax=axes[0], layer='agg', show=False, color='tab:green') -axes[0].legend(['experimental', 'optimized']) +axes[0].legend(['experimental', 'initial', 'optimized']) optim.net_.cell_response.plot_spikes_hist(ax=axes[1]) fig1 = optim.plot_convergence() diff --git a/hnn_core/externals/bayesopt.py b/hnn_core/externals/bayesopt.py index 1b8fe5550..c61dbacd8 100644 --- a/hnn_core/externals/bayesopt.py +++ b/hnn_core/externals/bayesopt.py @@ -16,6 +16,7 @@ import numpy as np import scipy.stats as st +from scipy.optimize import basinhopping def expected_improvement(gp, best_f, all_x): @@ -95,8 +96,7 @@ def bayes_opt(func, x0, cons, acquisition, maxfun=200, high=[idx[1] for idx in cons], size=(10000, len(cons))) - # (n_samples, n_features) - gp.fit(np.array(X), np.array(y)) + gp.fit(np.array(X), np.array(y)) # (n_samples, n_features) # get new set of params new_x = all_x[acquisition(gp, best_f, all_x).argmin()] # lowest obj From e97b3e9229f9ff3853b65f4c856d3388b82844fd Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Wed, 30 Aug 2023 17:25:51 -0400 Subject: [PATCH 53/63] update example, use dipole.py _rmse --- examples/howto/optimize_evoked.py | 17 +++-------------- hnn_core/optimization/general_optimization.py | 12 ++++++------ hnn_core/optimization/objective_functions.py | 16 ++++------------ hnn_core/tests/test_general_optimization.py | 2 +- 4 files changed, 14 insertions(+), 33 deletions(-) diff --git a/examples/howto/optimize_evoked.py b/examples/howto/optimize_evoked.py index 38e35d1df..94097b861 100644 --- a/examples/howto/optimize_evoked.py +++ b/examples/howto/optimize_evoked.py @@ -114,12 +114,6 @@ def set_params(net, params): # Proximal 1 - weights_ampa_p1 = {'L2_basket': 0.2913, 'L2_pyramidal': 0.9337, - 'L5_basket': 0.1951, 'L5_pyramidal': 0.3602} - weights_nmda_p1 = {'L2_basket': 0.9240, 'L2_pyramidal': 0.0845, - 'L5_basket': 0.5849, 'L5_pyramidal': 0.65105} - synaptic_delays_p = {'L2_basket': 0.1, 'L2_pyramidal': 0.1, - 'L5_basket': 1., 'L5_pyramidal': 1.} net.add_evoked_drive('evprox1', mu=5.6813, sigma=20.3969, @@ -130,12 +124,6 @@ def set_params(net, params): synaptic_delays=synaptic_delays_p) # Distal - weights_ampa_d1 = {'L2_basket': 0.8037, 'L2_pyramidal': 0.5738, - 'L5_pyramidal': 0.3626} - weights_nmda_d1 = {'L2_basket': 0.2492, 'L2_pyramidal': 0.6183, - 'L5_pyramidal': 0.1121} - synaptic_delays_d1 = {'L2_basket': 0.1, 'L2_pyramidal': 0.1, - 'L5_pyramidal': 0.1} net.add_evoked_drive('evdist1', mu=58.6539, sigma=5.5810, @@ -181,6 +169,7 @@ def set_params(net, params): # The following synaptic weight parameter ranges (units of micro-siemens) # were chosen so as to keep the model in physiologically realistic regimes. + constraints = dict({'evprox2_ampa_L2_basket': (0.01, 1.), 'evprox2_ampa_L2_pyramidal': (0.01, 1.), 'evprox2_ampa_L5_basket': (0.01, 1.), @@ -200,9 +189,9 @@ def set_params(net, params): net = jones_2009_model() optim = Optimizer(net, tstop=tstop, constraints=constraints, set_params=set_params, scale_factor=scale_factor, - smooth_window_len=smooth_window_len, solver='bayesian') + smooth_window_len=smooth_window_len, max_iter=40) with MPIBackend(n_procs=n_procs, mpi_cmd='mpiexec'): - optim.fit(exp_dpl.data['agg']) + optim.fit(exp_dpl) ############################################################################### # Finally, we can plot the experimental data alongside the post-optimization diff --git a/hnn_core/optimization/general_optimization.py b/hnn_core/optimization/general_optimization.py index def8279af..d060e1b02 100644 --- a/hnn_core/optimization/general_optimization.py +++ b/hnn_core/optimization/general_optimization.py @@ -111,8 +111,8 @@ def fit(self, target): Parameters ---------- - target : ndarray - The recorded dipole. + target : instance of Dipole + A dipole object with experimental data. """ constraints = self._assemble_constraints(self.constraints) @@ -282,8 +282,8 @@ def _run_opt_bayesian(initial_net, tstop, constraints, set_params, obj_fun, Keys are parameter names, values are initial parameters. max_iter : int Number of calls the optimizer makes. - target : ndarray - The recorded dipole. + target : instance of Dipole + A dipole object with experimental data. scale_factor : float The dipole scale factor. smooth_window_len : float @@ -356,8 +356,8 @@ def _run_opt_cobyla(initial_net, tstop, constraints, set_params, obj_fun, Keys are parameter names, values are initial parameters. max_iter : int Number of calls the optimizer makes. - target : ndarray - The recorded dipole. + target : instance of Dipole + A dipole object with experimental data. scale_factor : float The dipole scale factor. smooth_window_len : float diff --git a/hnn_core/optimization/objective_functions.py b/hnn_core/optimization/objective_functions.py index ee8ae1ef0..1250e6e0b 100644 --- a/hnn_core/optimization/objective_functions.py +++ b/hnn_core/optimization/objective_functions.py @@ -10,6 +10,7 @@ from scipy.signal import resample from hnn_core import simulate_dipole +from ..dipole import _rmse def _rmse_evoked(initial_net, initial_params, set_params, predicted_params, @@ -35,8 +36,8 @@ def _rmse_evoked(initial_net, initial_params, set_params, predicted_params, The smooth window length. tstop : float The simulated dipole's duration. - target : ndarray - The recorded dipole. + target : instance of Dipole + A dipole object with experimental data. Returns ------- @@ -56,16 +57,7 @@ def _rmse_evoked(initial_net, initial_params, set_params, predicted_params, if smooth_window_len is not None: dpl.smooth(smooth_window_len) - # downsample if necessary - if (len(dpl.data['agg']) < len(target)): - target = resample(target, len(dpl.data['agg'])) - elif (len(dpl.data['agg']) > len(target)): - dpl.data['agg'] = resample(dpl.data['agg'], len(target)) - - # calculate rmse - obj = np.sqrt(((dpl.data['agg'] - target)**2).sum() / - len(dpl.times)) / (max(target) - - min(target)) + obj = _rmse(dpl, target, tstop=tstop) obj_values.append(obj) diff --git a/hnn_core/tests/test_general_optimization.py b/hnn_core/tests/test_general_optimization.py index da318927d..b77c71923 100644 --- a/hnn_core/tests/test_general_optimization.py +++ b/hnn_core/tests/test_general_optimization.py @@ -88,7 +88,7 @@ def set_params(net_offset, params): # test repr before fitting assert 'fit=False' in repr(optim), "optimizer is already fit" - optim.fit(dpl_orig.data['agg']) + optim.fit(dpl_orig) # test repr after fitting assert 'fit=True' in repr(optim), "optimizer was not fit" From ecdd6f5a7ea644902b7cc5263b53a43b01d952a7 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Wed, 30 Aug 2023 17:28:08 -0400 Subject: [PATCH 54/63] update bayesopt.py --- hnn_core/externals/bayesopt.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/hnn_core/externals/bayesopt.py b/hnn_core/externals/bayesopt.py index c61dbacd8..4fe60b93c 100644 --- a/hnn_core/externals/bayesopt.py +++ b/hnn_core/externals/bayesopt.py @@ -114,15 +114,6 @@ def bayes_opt(func, x0, cons, acquisition, maxfun=200, if debug: print("iter", i, "best_x", best_x, best_f) - if debug: - import matplotlib.pyplot as plt - scale = 1e6 - sort_idx = np.argsort(X) - plt.plot(np.array(X)[sort_idx] * scale, - np.array(y)[sort_idx] * scale, 'bo-') - plt.axvline(best_x * scale, linestyle='--') - plt.show() - return best_x, best_f From 9f926853500e347006023a92e21ac7beed362ea3 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Wed, 30 Aug 2023 20:47:42 -0400 Subject: [PATCH 55/63] update README --- README.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.rst b/README.rst index 401ca0ee6..f7c8ce8e2 100644 --- a/README.rst +++ b/README.rst @@ -28,6 +28,11 @@ GUI * ipywidgets (<=7.7.1) * voila (<=0.3.6) +Optimization +~~~~~~~~~~~~ + +* scikit-learn + Parallel processing ~~~~~~~~~~~~~~~~~~~ From fa029576997286235b2118c68080d405f20c9055 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Wed, 30 Aug 2023 21:00:32 -0400 Subject: [PATCH 56/63] remove basinhopping import in bayesopt.py --- hnn_core/externals/bayesopt.py | 1 - 1 file changed, 1 deletion(-) diff --git a/hnn_core/externals/bayesopt.py b/hnn_core/externals/bayesopt.py index 4fe60b93c..bc3bee0ff 100644 --- a/hnn_core/externals/bayesopt.py +++ b/hnn_core/externals/bayesopt.py @@ -16,7 +16,6 @@ import numpy as np import scipy.stats as st -from scipy.optimize import basinhopping def expected_improvement(gp, best_f, all_x): From 7df94ea210aa17aa8dce2a52bbe44305e7cad6a8 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Wed, 30 Aug 2023 21:19:24 -0400 Subject: [PATCH 57/63] fix flake8 errors --- hnn_core/externals/bayesopt.py | 1 - hnn_core/optimization/objective_functions.py | 4 ---- hnn_core/tests/test_general_optimization.py | 12 ++++++------ 3 files changed, 6 insertions(+), 11 deletions(-) diff --git a/hnn_core/externals/bayesopt.py b/hnn_core/externals/bayesopt.py index bc3bee0ff..a55777e5e 100644 --- a/hnn_core/externals/bayesopt.py +++ b/hnn_core/externals/bayesopt.py @@ -117,7 +117,6 @@ def bayes_opt(func, x0, cons, acquisition, maxfun=200, if __name__ == '__main__': - import numpy as np from scipy.optimize import rosen opt_params, obj_vals = bayes_opt(rosen, diff --git a/hnn_core/optimization/objective_functions.py b/hnn_core/optimization/objective_functions.py index 1250e6e0b..743cd7c38 100644 --- a/hnn_core/optimization/objective_functions.py +++ b/hnn_core/optimization/objective_functions.py @@ -5,10 +5,6 @@ # Ryan Thorpe # Mainak Jas -import numpy as np - -from scipy.signal import resample - from hnn_core import simulate_dipole from ..dipole import _rmse diff --git a/hnn_core/tests/test_general_optimization.py b/hnn_core/tests/test_general_optimization.py index b77c71923..ce2ff8888 100644 --- a/hnn_core/tests/test_general_optimization.py +++ b/hnn_core/tests/test_general_optimization.py @@ -95,14 +95,14 @@ def set_params(net_offset, params): # the optimized parameter is in the range for param_idx, param in enumerate(optim.opt_params_): - assert (list(constraints.values())[param_idx][0] - <= param - <= list(constraints.values())[param_idx][1], - "Optimized parameter is not in user-defined range") + assert list(constraints.values())[param_idx][0] \ + <= param \ + <= list(constraints.values())[param_idx][1], \ + "Optimized parameter is not in user-defined range" obj = optim.obj_ # the number of returned rmse values should be the same as max_iter - assert (len(obj) <= max_iter, - "Number of rmse values should be the same as max_iter") + assert len(obj) <= max_iter, \ + "Number of rmse values should be the same as max_iter" # the returned rmse values should be positive assert all(vals >= 0 for vals in obj), "rmse values should be positive" From 4a56279160e43ba6b7cae3fff2de7995b9f40bbc Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Wed, 30 Aug 2023 21:37:18 -0400 Subject: [PATCH 58/63] fix PEP formatting in test_general_optimization.py --- hnn_core/tests/test_general_optimization.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/hnn_core/tests/test_general_optimization.py b/hnn_core/tests/test_general_optimization.py index ce2ff8888..cf697d911 100644 --- a/hnn_core/tests/test_general_optimization.py +++ b/hnn_core/tests/test_general_optimization.py @@ -95,14 +95,13 @@ def set_params(net_offset, params): # the optimized parameter is in the range for param_idx, param in enumerate(optim.opt_params_): - assert list(constraints.values())[param_idx][0] \ - <= param \ - <= list(constraints.values())[param_idx][1], \ - "Optimized parameter is not in user-defined range" + assert (list(constraints.values())[param_idx][0] <= param <= + list(constraints.values())[param_idx][1]), ( + "Optimized parameter is not in user-defined range") obj = optim.obj_ # the number of returned rmse values should be the same as max_iter - assert len(obj) <= max_iter, \ - "Number of rmse values should be the same as max_iter" + assert (len(obj) <= max_iter), ( + "Number of rmse values should be the same as max_iter") # the returned rmse values should be positive assert all(vals >= 0 for vals in obj), "rmse values should be positive" From 07f8a72ac661fd538faca75f08db910f7dcf2fcb Mon Sep 17 00:00:00 2001 From: Carolina Fernandez <51917715+carolinafernandezp@users.noreply.github.com> Date: Sat, 2 Sep 2023 15:57:55 -0400 Subject: [PATCH 59/63] Update unit_tests.yml --- .github/workflows/unit_tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index 158dc7023..790220b56 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -43,7 +43,7 @@ jobs: - name: Install HNN-core shell: bash -el {0} run: | - pip install --verbose '.[gui]' + pip install --verbose '.[gui,opt]' - name: Lint with flake8 shell: bash -el {0} run: | From ec2391c8f2d9d7c5a7bccc8292a6beb1165dbca8 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez <51917715+carolinafernandezp@users.noreply.github.com> Date: Sat, 2 Sep 2023 15:58:08 -0400 Subject: [PATCH 60/63] Update windows_unit_tests.yml --- .github/workflows/windows_unit_tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/windows_unit_tests.yml b/.github/workflows/windows_unit_tests.yml index 64a870bed..bde71c534 100644 --- a/.github/workflows/windows_unit_tests.yml +++ b/.github/workflows/windows_unit_tests.yml @@ -40,7 +40,7 @@ jobs: python -m pip install --upgrade pip pip install flake8 pytest pytest-cov pip install psutil joblib - pip install --verbose .[gui] + pip install --verbose .[gui,opt] - name: Test with pytest shell: cmd run: | From bb43e692f3b1d683ba1e287783256a193eb21df2 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Thu, 7 Sep 2023 14:36:26 -0400 Subject: [PATCH 61/63] add func to _run_optimization --- hnn_core/optimization/optimize_evoked.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hnn_core/optimization/optimize_evoked.py b/hnn_core/optimization/optimize_evoked.py index 13f8f07e4..68d973a1c 100644 --- a/hnn_core/optimization/optimize_evoked.py +++ b/hnn_core/optimization/optimize_evoked.py @@ -371,7 +371,7 @@ def _run_optimization(maxiter, param_ranges, optrun): lambda x, idx=idx: param_ranges[param_name]['maxval'] - x[idx]) cons.append( lambda x, idx=idx: x[idx] - param_ranges[param_name]['minval']) - result = fmin_cobyla(optrun, cons=cons, rhobeg=0.1, rhoend=1e-4, + result = fmin_cobyla(func=optrun, cons=cons, rhobeg=0.1, rhoend=1e-4, x0=x0, maxfun=maxiter, catol=0.0) return result From 3b4ec365ccf430c6f5372a5cb375d8df77f5fa7e Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Thu, 7 Sep 2023 14:33:28 -0400 Subject: [PATCH 62/63] update optimize_evoked.py --- hnn_core/optimization/optimize_evoked.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/hnn_core/optimization/optimize_evoked.py b/hnn_core/optimization/optimize_evoked.py index 68d973a1c..672cff1c0 100644 --- a/hnn_core/optimization/optimize_evoked.py +++ b/hnn_core/optimization/optimize_evoked.py @@ -279,8 +279,6 @@ def _optrun(drive_params_updated, drive_params_static, net, tstop, dt, ------- avg_rmse: float Weighted RMSE between data in dpl and exp_dpl - avg_rmse_unweighted : float - Unweighted RMSE between data in dpl and exp_dpl """ print("Optimization step %d, iteration %d" % (opt_params['cur_step'] + 1, opt_params['optiter'] + 1)) @@ -358,7 +356,7 @@ def _optrun(drive_params_updated, drive_params_static, net, tstop, dt, opt_params['optiter'] += 1 - return avg_rmse, avg_rmse_unweighted # nlopt expects error + return avg_rmse def _run_optimization(maxiter, param_ranges, optrun): From 95df1842c5680b5c72ff492e4950b995d57e44e6 Mon Sep 17 00:00:00 2001 From: Carolina Fernandez Date: Thu, 7 Sep 2023 15:05:00 -0400 Subject: [PATCH 63/63] update README instructions --- README.rst | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.rst b/README.rst index f7c8ce8e2..c0ee2d54b 100644 --- a/README.rst +++ b/README.rst @@ -66,6 +66,13 @@ To check if everything worked fine, you can do:: and it should not give any error messages. +**Installing optimization dependencies** + +If you are using bayesian optimization, then scikit-learn is required. Install +hnn-core with scikit-learn using the following command:: + + $ pip install hnn_core[opt] + **GUI installation** To install the GUI dependencies along with ``hnn-core``, a simple tweak to the above command is needed::