Skip to content

Commit

Permalink
Code for paper 3; Q/MI extraction from spike raster data; further sma…
Browse files Browse the repository at this point in the history
…ll additions
  • Loading branch information
jlubo committed Sep 20, 2022
1 parent 159d0cc commit 2823646
Show file tree
Hide file tree
Showing 39 changed files with 1,372 additions and 434 deletions.
2 changes: 2 additions & 0 deletions .gitattributes
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
simulation-code/build_scripts*/* linguist-vendored
notebooks/lehr_luboeinski_tetzlaff_2022/code/* linguist-vendored
7 changes: 7 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,2 +1,9 @@
*.out
*pycache*/
*_TRIPLET*/
*_F*/
*_MINCONV*/
*_MIN2N1S*/
plasticity_type.txt
*Const.*/
simulation-bin/run_binary_misc/connections.txt
43 changes: 34 additions & 9 deletions BIBTEX.md
Original file line number Diff line number Diff line change
@@ -1,23 +1,39 @@
## BibTeX code for publications
## BibTeX code for citing simulation code and related publications

@article{LuboeinskiTetzlaff2021a,
@article{LuboeinskiTetzlaff2021,
title={Memory consolidation and improvement by synaptic tagging and capture in recurrent neural networks},
author={Luboeinski, Jannik and Tetzlaff, Christian},
journal={Communications Biology},
volume={4},
number={275},
year={2021},
publisher={Nature Publishing Group},
doi={10.1038/s42003-021-01778-y}
doi={10.1038/s42003-021-01778-y},
url={https://doi.org/10.1038/s42003-021-01778-y}
}

@article{LuboeinskiTetzlaff2021b,
@article{LuboeinskiTetzlaff2022,
title={Organization and priming of long-term memory representations with two-phase plasticity},
author={Luboeinski, Jannik and Tetzlaff, Christian},
journal={bioRxiv preprint},
year={2021},
publisher={Cold Spring Harbor Laboratory},
doi={10.1101/2021.04.15.439982}
journal={Cognitive Computation},
volume={},
number={},
year={2022},
publisher={Springer},
doi={10.1007/s12559-022-10021-7},
url={https://doi.org/10.1007/s12559-022-10021-7}
}

@article{Lehr2022,
title={Neuromodulator-dependent synaptic tagging and capture retroactively controls neural coding in spiking neural networks},
author={Lehr, Andrew B. and Luboeinski, Jannik and Tetzlaff, Christian},
journal={Scientific Reports (under review)},
volume={},
number={},
year={2022},
publisher={},
doi={},
url={}
}

@phdthesis{Luboeinski2021thesis,
Expand All @@ -26,5 +42,14 @@
year={2021},
school={University of G\"{o}ttingen},
type={Dissertation},
url={http://hdl.handle.net/21.11130/00-1735-0000-0008-58f8-e}
doi={10.53846/goediss-463},
url={https://doi.org/10.53846/goediss-463}
}

@Misc{LuboeinskiLehr2022code,
title={Simulation code and analysis scripts for recurrent spiking neural networks with memory consolidation based on synaptic tagging and capture},
author={Luboeinski, Jannik and Lehr, Andrew B.},
year={2022},
doi={10.5281/zenodo.4429195},
url={https://doi.org/10.5281/zenodo.4429195}
}
91 changes: 62 additions & 29 deletions README.md

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions analysis/assemblyAttractorStatistics.py
Original file line number Diff line number Diff line change
Expand Up @@ -265,8 +265,8 @@ def spikeRasterPlot(timestamp, data_dir, output_dir, new_plots):
fout = open("spike_raster.gpl", "w")
fout.write("set term png enhanced font Sans 20 size 1280,960 lw 2.5\n")
fout.write("set output '" + plot_file1 + "'\n\n")
fout.write("Ne = 2500\n")
fout.write("Ni = 625\n")
fout.write("Ne = " + str(exc_pop_size) + "\n")
fout.write("Ni = " + str(inh_pop_size) + "\n")
fout.write("set xlabel 'Time (s)'\n")
fout.write("unset ylabel\n")
fout.write("set yrange [0:1]\n")
Expand Down
4 changes: 2 additions & 2 deletions analysis/assemblyAvalancheStatistics.py
Original file line number Diff line number Diff line change
Expand Up @@ -297,8 +297,8 @@ def spikeRasterPlot(timestamp, data_dir, output_dir, new_plots):
fout = open("spike_raster.gpl", "w")
fout.write("set term png enhanced font Sans 20 size 1280,960 lw 2.5\n")
fout.write("set output '" + plot_file1 + "'\n\n")
fout.write("Ne = 2500\n")
fout.write("Ni = 625\n")
fout.write("Ne = " + str(exc_pop_size) + "\n")
fout.write("Ni = " + str(inh_pop_size) + "\n")
fout.write("set xlabel 'Time (s)'\n")
fout.write("unset ylabel\n")
fout.write("set yrange [0:1]\n")
Expand Down
69 changes: 36 additions & 33 deletions analysis/calculateMIa.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,17 +15,40 @@
np.set_printoptions(threshold=1e10, linewidth=200) # extend console print range for numpy arrays

# calculateMIa
# Calculates mutual information of the activity distribution of the network at two timesteps and returns it along with the self-information of
# the reference distribution
# Calculates the mutual information and the self-information of the reference distribution from given firing rate distributions (reference/learning, and recall)
# v_ref: the reference activity distribution (or the activity distribution during learning)
# v_recall: the activity distribution during recall
# return: mutual information between reference/learning distribution and recall distribution, and self-information of reference distribution and recall distirbution
def calculateMIa(v_ref, v_recall):

margEntropyActL = marginalEntropy(v_ref)
print("margEntropyActL = " + str(margEntropyActL))

margEntropyActR = marginalEntropy(v_recall)
print("margEntropyActR = " + str(margEntropyActR))

jointEntropyAct = jointEntropy(v_ref, v_recall)
print("jointEntropyAct = " + str(jointEntropyAct))

MIa = mutualInformation(margEntropyActL, margEntropyActR, jointEntropyAct)
print("MIa = " + str(MIa))

return MIa, margEntropyActL, margEntropyActR

# calculateMIaFromFile
# Calculates mutual information of the activity distribution of the network at two timesteps, and the self-information of
# the reference distribution; reads recall distribution from file; reads reference distribution either from file or uses a model
# nppath: path to the network_plots directory to read the data from
# timestamp: a string containing date and time (to access correct paths)
# Nl_exc: the number of excitatory neurons in one line of a quadratic grid
# time_for_activity: the time that at which the activites shall be read out (some time during recall)
# time_ref: the reference time (for getting the activity distribution during learning)
# core: the neurons in the cell assembly (for stipulation; only required if no activity distribution during learning is available)
def calculateMIa(nppath, timestamp, Nl_exc, time_for_activity, time_ref = "11.0", core = np.array([])):

if time_ref: # use reference firing rate distribution from data (for learned cell assembly)
# time_for_activity [optional]: the time that at which the activites shall be read out (some time during recall)
# time_ref [optional]: the reference time (for getting the activity distribution during learning)
# core [optional]: the neurons in the cell assembly (for stipulation; only required if no activity distribution during learning is available)
# return: mutual information between reference/learning distribution and recall distribution, and self-information of reference distribution
def calculateMIaFromFile(nppath, timestamp, Nl_exc, time_for_activity = "20.1", time_ref = "11.0", core = np.array([])):

### Get data ###
if time_ref: # if provided, use reference firing rate distribution from file (for learned cell assembly)
times_for_readout_list = [time_ref, time_for_activity] # the simulation times at which the activities shall be read out
print("Using reference distribution at " + time_ref + "...")
else: # use model firing rate distribution (for stipulated cell assembly)
Expand All @@ -40,8 +63,6 @@ def calculateMIa(nppath, timestamp, Nl_exc, time_for_activity, time_ref = "11.0"
z = [np.zeros((Nl_exc**2,Nl_exc**2)) for x in times_for_readout_list]
v = [np.zeros((Nl_exc,Nl_exc)) for x in times_for_readout_list]

v_array = np.zeros(Nl_exc*Nl_exc) # data array

rawpaths = Path(nppath)

for i in range(len(times_for_readout_list)):
Expand All @@ -68,32 +89,14 @@ def calculateMIa(nppath, timestamp, Nl_exc, time_for_activity, time_ref = "11.0"
except OSError:
raise

### Activity ###
if time_ref: # use reference firing rate distribution from data (for learned cell assembly)
margEntropyActL = marginalEntropy(v[0])
print("margEntropyActL = " + str(margEntropyActL))

margEntropyActR = marginalEntropy(v[1])
print("margEntropyActR = " + str(margEntropyActR))

jointEntropyAct = jointEntropy(v[0],v[1])
print("jointEntropyAct = " + str(jointEntropyAct))
### Get results ###
if time_ref: # if provided, use reference firing rate distribution from file (for learned cell assembly)
MIa, self_MIa_ref = calculateMIa(v[0], v[1])

else: # use model firing rate distribution (for stipulated cell assembly)
margEntropyActL = marginalEntropy(v_model)
print("margEntropyActL = " + str(margEntropyActL))

margEntropyActR = marginalEntropy(v[0])
print("margEntropyActR = " + str(margEntropyActR))

jointEntropyAct = jointEntropy(v_model,v[0])
print("jointEntropyAct = " + str(jointEntropyAct))

### Results and Output ###
MIa = mutualInformation(margEntropyActL, margEntropyActR, jointEntropyAct)
print("MIa = " + str(MIa))
MIa, self_MIa_ref = calculateMIa(v_model, v[0])

return MIa, margEntropyActL
return MIa, self_MIa_ref

# marginalEntropy
# Computes the marginal entropy of an array
Expand Down
50 changes: 39 additions & 11 deletions analysis/calculateQ.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,21 +14,52 @@
np.set_printoptions(threshold=1e10, linewidth=200) # extend console print range for numpy arrays

# calculateQ
# Calculates Q measure for how well recall works
# From given mean firing rates of subpopulations, calculates the Q value
# that measures how well recall of an input-defined pattern works
# v_as: mean firing rate of the neuronal subpopulation that receives learning and recall stimulation
# v_as_err: uncertainty of v_as
# v_ans: mean firing rate of the neuronal subpopulation that receives learning but no recall stimulation
# v_ans_err: uncertainty of v_ans
# v_ctrl: mean firing rate of the neuronal subpopulation that receives neither learning nor recall stimulation
# v_ctrl_err: uncertainty of v_ctrl
def calculateQ(v_as, v_as_err, v_ans, v_ans_err, v_ctrl, v_ctrl_err):

Q = (v_ans - v_ctrl) / v_as

Q_err = np.sqrt( np.power( v_ans_err / v_as, 2 ) + np.power( v_ctrl_err / v_as, 2 ) \
+ np.power (v_as_err * (v_ans - v_ctrl) / np.power(v_as, 2), 2 ) ) # error propagated from v_as, v_ans, v_ctrl

return Q, Q_err

# getSubpopulations
# Retrieves the indices of the neurons belonging to the different network subpopulations
# N_pop: the number of neurons in the whole population
# core: array of the neurons belonging to the stimulated core
# p_r: the fraction of core neurons that are stimulated for recall
# return: arrays of neuron indices of network subpopulations that receive 1. learning and recall stimulation, 2. learning but no recall stimulation, 3. neither learning nor recall stimulation
def getSubpopulations(N_pop, core, p_r):

core_recall = core[0:int(np.floor(float(p_r)*core.shape[0]))]
core_norecall = core[np.logical_not(np.in1d(core, core_recall))]
control = np.delete(np.arange(N_pop), core)

return core_recall, core_norecall, control

# calculateMeanRatesAndQ
# Calculates the mean firing rates of subpopulations from given firing rate distribution across a network,
# then calculates the Q value that measures how well recall of an input-defined pattern works
# nppath: path to the network_plots directory to read the data from
# timestamp: a string containing date and time (to access correct paths)
# core: array of the neurons belonging to the stimulated core
# Nl_exc: the number of excitatory neurons in one line of a quadratic grid
# time_for_activity: the time that at whcih the activites shall be read out (some time during recall)
# time_for_activity: the time that at which the activities shall be read out (some time during recall)
# recall_fraction: the fraction of core neurons that are activated for recall
def calculateQ(nppath, timestamp, core, Nl_exc, time_for_activity, recall_fraction):

core_recall = core[0:int(np.floor(float(recall_fraction)*core.shape[0]))]
core_norecall = core[np.logical_not(np.in1d(core, core_recall))]
control = np.delete(np.arange(Nl_exc*Nl_exc), core)
def calculateMeanRatesAndQ(nppath, timestamp, core, Nl_exc, time_for_activity, recall_fraction):

path = ""

core_recall, core_norecall, control = getSubpopulations(Nl_exc*Nl_exc, core, recall_fraction)

v_array = np.zeros(Nl_exc*Nl_exc) # data array

# look for data file [timestamp]_net_[time_for_activity].txt
Expand Down Expand Up @@ -79,9 +110,6 @@ def calculateQ(nppath, timestamp, core, Nl_exc, time_for_activity, recall_fracti
v_ans_err = np.sqrt(np.sum(np.power(v_array[core_norecall], 2)) / len(core_norecall) - np.power(v_ans, 2))
v_ctrl_err = np.sqrt(np.sum(np.power(v_array[control], 2)) / len(control) - np.power(v_ctrl, 2))

Q = (v_ans - v_ctrl) / v_as

Q_err = np.sqrt( np.power( v_ans_err / v_as, 2 ) + np.power( v_ctrl_err / v_as, 2 ) \
+ np.power (v_as_err * (v_ans - v_ctrl) / np.power(v_as, 2), 2 ) ) # error propagated from v_as, v_ans, v_ctrl
Q, Q_err = calculateQ(v_as, v_as_err, v_ans, v_ans_err, v_ctrl, v_ctrl_err)

return Q, Q_err, v_as, v_as_err, v_ans, v_ans_err, v_ctrl, v_ctrl_err
39 changes: 29 additions & 10 deletions analysis/computeRateFromSpikes.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,16 @@
np.set_printoptions(threshold=1e10, linewidth=200) # extend console print range for numpy arrays

# main parameters
Ne = 2500 # number of neurons in the excitatory population
'''Ne = 2500 # number of neurons in the excitatory population
Ni = 625 # number of neurons in the inhibitory population
Na = 600 # number of neurons in one cell assembly
overlap = 0.1 # relative size of the overlap
period_duration = 0.01 # binning period (in units of seconds)
period_duration = 0.01 # binning period (in units of seconds)'''
Ne = 1600 # number of neurons in the excitatory population
Ni = 400 # number of neurons in the inhibitory population
Na = 150 # number of neurons in one cell assembly
overlap = 0 # relative size of the overlap
period_duration = 0.1 # binning period (in units of seconds)

# derived parameters
Nl = int(round(np.sqrt(Ne))) # the number of excitatory neurons in one line of a quadratic grid
Expand All @@ -31,7 +36,8 @@
# Recursively looks for a data files, extracts spiking data from them and computes mean firing rates
# directory: the directory to look into
# fout: file handle to output file
def extractRecursion(directory, fout):
# col_sep [optional]: characters separating columns in the data file
def extractRecursion(directory, fout, col_sep = '\t\t'):

data_found = False # specifies if any data has been found
rawpaths = Path(directory)
Expand All @@ -46,8 +52,10 @@ def extractRecursion(directory, fout):
full_path = str(x)
(full_path_head, filename) = os.path.split(full_path)

if x.is_file() and "_spike_raster.txt" in filename:
if x.is_file() and ("_spikes.txt" in filename or "_spike_raster.txt" in filename):

if "_spikes.txt" in filename: # Arbor data
col_sep = ' '
data_found = True

print("========================")
Expand All @@ -57,11 +65,20 @@ def extractRecursion(directory, fout):
# read the last line and compute number of periods
try:
with open(full_path, 'rb') as f:
first_line = f.readline().decode()
f.seek(-2, os.SEEK_END)
while f.read(1) != b'\n': # seek last line
f.seek(-2, os.SEEK_CUR)
last_line = f.readline().decode()
num_periods_tot = int(float(last_line.split('\t\t')[0]) / period_duration) + 1
#num_periods_tot = int(float(last_line.split(col_sep)[0]) / period_duration) + 1
t_first = float(first_line.split(col_sep)[0])
t_shift = abs(t_first) if t_first < 0 else 0 # shift if there have been spikes at negative times

if "_spikes.txt" in filename: # Arbor data
num_periods_tot = int(np.ceil((float(last_line.split(col_sep)[0]) + t_shift) / 1000 / period_duration)) + 1
else:
num_periods_tot = int(np.ceil((float(last_line.split(col_sep)[0]) + t_shift) / period_duration)) + 1

except IOError:
print('Error opening "' + filename + '"')
exit()
Expand All @@ -86,10 +103,12 @@ def extractRecursion(directory, fout):

f = open(full_path)
for line in f:
segs = line.split('\t\t')
segs = line.split(col_sep)

if (segs[0] != ""):
t = float(segs[0])
if "_spikes.txt" in filename: # Arbor data
t /= 1000 # convert ms to s
n = int(segs[1])
current_period = int(np.floor(t / period_duration))
tot_spikes[current_period] += 1
Expand Down Expand Up @@ -133,10 +152,10 @@ def extractRecursion(directory, fout):
mean_tot = tot_spikes[i] / N / period_duration

# write time (at 1/2 of a period) and values for this period
fout.write(str(round((i+0.5)*period_duration,4)) + "\t\t" + \
str(mean_A) + "\t\t" + str(mean_I_AB) + "\t\t" + str(mean_B) + "\t\t" + \
str(mean_I_AC) + "\t\t" + str(mean_I_BC) + "\t\t" + str(mean_I_ABC) + "\t\t" + str(mean_C) + "\t\t" + \
str(mean_ctrl) + "\t\t" + str(mean_inh) + "\t\t" + str(mean_tot) + "\n")
fout.write(str(round((i+0.5)*period_duration,4)) + col_sep + \
str(mean_A) + col_sep + str(mean_I_AB) + col_sep + str(mean_B) + col_sep + \
str(mean_I_AC) + col_sep + str(mean_I_BC) + col_sep + str(mean_I_ABC) + col_sep + str(mean_C) + col_sep + \
str(mean_ctrl) + col_sep + str(mean_inh) + col_sep + str(mean_tot) + "\n")


elif x.is_dir():
Expand Down
Loading

0 comments on commit 2823646

Please sign in to comment.