Skip to content

Commit 2d3c4ca

Browse files
committed
first commit
0 parents commit 2d3c4ca

36 files changed

+2046
-0
lines changed

Code/Convensembles/__init__.py

Lines changed: 176 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,176 @@
1+
from .spikes import SpikeLeanerPursuit
2+
from .ensembles import EnsembleLearnerLASSO
3+
4+
import sys, re, os
5+
import h5py as h5
6+
import numpy as np
7+
8+
def rewrite(lines):
9+
for _ in range(lines):
10+
sys.stdout.write("\x1b[A")
11+
sys.stdout.write(re.sub(r"[^\s]", " ", ""))
12+
for _ in range(lines):
13+
sys.stdout.write(" \n")
14+
for _ in range(lines):
15+
sys.stdout.write("\x1b[A")
16+
sys.stdout.write(re.sub(r"[^\s]", " ", ""))
17+
18+
def convolution(dataset,folder,dataset_name,file_name, swap_axes,\
19+
n_ensembles,iterations,lag,ensemble_penalty,\
20+
limit,start,remove,initializations,store_iterations,\
21+
warm_start_file,warm_start_dataset):
22+
'''
23+
This function solves the optimization problem
24+
:math:`\min_{\mathbf{a}, \mathbf{s}} \left\| \mathbf{Y} - \sum_i^l \mathbf{s}_i \circledast \mathbf{a}_i \right\|_F^2 + \alpha \sum_i^{l} \|\mathbf{s}_i\|_0 + \beta \sum_i^l \| \mathbf{a}_i \|_1`
25+
by using a block coordinate descent strategy.
26+
27+
Parameters
28+
-----------
29+
30+
dataset : the original *.h5 file
31+
32+
dataset_name : sheet of matrix
33+
34+
folder : name of the output folder. This folder is created in the directory where the dataset is located.
35+
36+
swap_axes : if True, the input matrix from the *.h5 file is transposed
37+
38+
n_ensembles : max number of ensembles to be found
39+
40+
iterations : number of iterations in each initialization
41+
42+
lag : max length of ensembles
43+
44+
ensemble-penalty : ensemble coefficient beta, the bigger this value is, the sparser the motifs will be
45+
46+
start : frame number from which the analysis is started, useful if only poart of the data should be analyzed
47+
48+
limit : frame number up to which the analysis is performed, useful if only part of the data should be analyzed
49+
50+
remove : removes neurons from spike matrix
51+
52+
initializations : number of random initializations, for each trail the same set of parameters is used.
53+
54+
store_iterations : stores the result of each iteration
55+
56+
warm_start_file and warm_start_dataset : name of the .h5 file and dataset that contain values for the ensembles and spikes that should be used for initialization
57+
58+
Output
59+
-----------
60+
61+
learned ensembles and spikes saved in an *.h5 file
62+
63+
'''
64+
os.makedirs(folder)
65+
fin = h5.File(dataset+'.h5', 'r')
66+
spikes_mtx = fin[dataset_name][...]
67+
spikes_mtx = spikes_mtx.astype(float)
68+
fin.close()
69+
70+
if swap_axes:
71+
spikes_mtx = np.swapaxes(spikes_mtx, 0, 1)
72+
73+
74+
if remove is not None:
75+
remove = [int(i) for i in remove.split(',')] #[ 1, 7, 11, 15, 26, 29, 43, 57, 58, 60, 62]
76+
for r in remove:
77+
spikes_mtx[r,:] = 0
78+
79+
spikes_mtx = spikes_mtx[:,start:]
80+
if limit > 0:
81+
spikes_mtx = spikes_mtx[:,:limit]
82+
83+
84+
n_neurons, n_frames = spikes_mtx.shape
85+
86+
87+
learner_spikes = SpikeLeanerPursuit()
88+
learner_ensembles = EnsembleLearnerLASSO(ensemble_penalty, n_ensembles, n_neurons, n_frames, lag)
89+
90+
spikes = np.zeros((n_ensembles, n_frames))
91+
ensembles = np.zeros((n_ensembles, n_neurons, lag))
92+
93+
txt = os.path.join(folder,"log.txt")
94+
fh = open(txt, "w")
95+
lines_of_text = ["--- finding ensembles ---\n",
96+
"Dataset: "+dataset+".h5",
97+
"Folder: " +folder,
98+
"Sheet: " +dataset_name,
99+
"H5-file: "+file_name,
100+
"Swap-axes: " +str(swap_axes),
101+
"Number of ensembles: " + str(n_ensembles),
102+
"Number of iterations: " + str(iterations),
103+
"Length of ensembles: " + str(lag),
104+
"Ensembles-Penalty: " + str(ensemble_penalty),
105+
"Limit: " + str(limit),
106+
"Start: " + str(start),
107+
"Removed neurons: " + str(remove),
108+
"Number of initializations: " +str(initializations)]
109+
fh.write('\n'.join(lines_of_text) + '\n\nReconstruction Errors (initialization, iteration):\n\n')
110+
fh.close()
111+
112+
print("\n\nThe data to be analysed consists of %d neurons observed over %d time frames. \nIf this is not correct, use the -swap option to transpose the inserted matrix.\n\n" % (n_neurons,n_frames))
113+
print("--- finding", n_ensembles,"ensembles with length", lag,"in", file_name,"\b.h5 --- \n(for more information see log.txt) \n\n\n\n\n\n\n")
114+
for init in range(initializations):
115+
rewrite(5)
116+
117+
print( "initialization %02d/%02d" % (init+1, initializations))
118+
119+
name_of_init = os.path.join(folder , file_name +'_'+str(init))
120+
fout = h5.File(name_of_init+'.h5', 'w')
121+
122+
for i in range(n_ensembles):
123+
rnd = np.random.randn(n_frames)
124+
rnd[np.where(rnd < 0)] = 0
125+
rnd[np.where(rnd > 0)] = 1
126+
spikes[i, :] = rnd
127+
128+
if warm_start_file:
129+
print('warm_start',warm_start_file)
130+
fw = h5.File(warm_start_file+'.h5', 'r')
131+
grp = fw[warm_start_dataset]
132+
ensembles = grp["ensembles"][:]
133+
spikes = grp["activations"][:]
134+
fw.close()
135+
136+
137+
print("\n\n\n")
138+
for i in range(iterations):
139+
rewrite(4)
140+
141+
print( " iteration %02d/%02d" % (i+1, iterations))
142+
143+
print( " learning ensembles")
144+
learner_ensembles.set_iter(i)
145+
ensembles = learner_ensembles.learn(spikes_mtx, spikes)
146+
147+
print( " learning activations")
148+
reco, spikes = learner_spikes.learn(spikes_mtx, ensembles)
149+
150+
fh = open(txt, "a")
151+
fh.write("RE (" +str(init)+","+ str(i)+"): "+str(reco)+"\n")
152+
fh.close()
153+
154+
if reco == np.Inf:
155+
print("ERROR: Reconstruction Error is " +str(reco)+". Decrease ensemble penalty and try again.")
156+
sys.exit()
157+
158+
if store_iterations:
159+
#grp_base = fout.require_group(store_iterations)
160+
grp_iter = fout.require_group("iter_%d" % (i+1))
161+
grp_iter.create_dataset("activations", data=spikes)
162+
grp_iter.create_dataset("ensembles", data=ensembles)
163+
fout.flush()
164+
165+
166+
resgrp = fout.require_group('/result')
167+
resgrp.create_dataset("activations", data=spikes)
168+
resgrp.create_dataset("ensembles", data=ensembles)
169+
fout.close()
170+
171+
fh = open(txt, "a")
172+
fh.write("--- finished --- ")
173+
fh.close()
174+
175+
176+

Code/Convensembles/__init__.pyc

6.06 KB
Binary file not shown.
5.7 KB
Binary file not shown.
6.73 KB
Binary file not shown.
2.28 KB
Binary file not shown.

0 commit comments

Comments
 (0)