Skip to content

Commit b6e1930

Browse files
test commit
0 parents commit b6e1930

16 files changed

+4182
-0
lines changed
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
{
2+
"cells": [],
3+
"metadata": {},
4+
"nbformat": 4,
5+
"nbformat_minor": 5
6+
}
Lines changed: 227 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,227 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "markdown",
5+
"id": "46a0a71e",
6+
"metadata": {},
7+
"source": [
8+
"# Image Identification using \"pyimagej\""
9+
]
10+
},
11+
{
12+
"cell_type": "code",
13+
"execution_count": 6,
14+
"id": "ca680db4",
15+
"metadata": {},
16+
"outputs": [],
17+
"source": [
18+
"from FIJI_Marco_For_Particle_Identification_V_stable import process_NF_particles as pnp\n",
19+
"import glob\n",
20+
"import multiprocessing as mp\n",
21+
"import numpy as np \n",
22+
"import pandas as pd\n",
23+
"import scipy.io as sio\n",
24+
"import shutil\n",
25+
"import time \n",
26+
"import gc\n",
27+
"import os"
28+
]
29+
},
30+
{
31+
"cell_type": "markdown",
32+
"id": "e56c6e8b",
33+
"metadata": {},
34+
"source": [
35+
"# Legacy sections "
36+
]
37+
},
38+
{
39+
"cell_type": "code",
40+
"execution_count": 7,
41+
"id": "33a87513",
42+
"metadata": {},
43+
"outputs": [],
44+
"source": [
45+
"mainpath = r'D:\\phi_star_measurement\\4.2mm\\7um'\n",
46+
"#experimentPath = glob.glob(os.path.normpath(mainPath)+'\\*_files');\n",
47+
"experimentPath = glob.glob(mainpath+'\\**\\*_files')"
48+
]
49+
},
50+
{
51+
"cell_type": "code",
52+
"execution_count": 8,
53+
"id": "b8266dfa",
54+
"metadata": {},
55+
"outputs": [
56+
{
57+
"data": {
58+
"text/plain": [
59+
"['D:\\\\phi_star_measurement\\\\4.2mm\\\\7um\\\\1445\\\\16h05_fluo_2x2x_180V_2_exp_3db_342fps_files',\n",
60+
" 'D:\\\\phi_star_measurement\\\\4.2mm\\\\7um\\\\300\\\\16h42_fluo_2x2x_180V_2_exp_3db_342fps_files',\n",
61+
" 'D:\\\\phi_star_measurement\\\\4.2mm\\\\7um\\\\400\\\\16h42_fluo_2x2x_180V_2_exp_3db_342fps_files',\n",
62+
" 'D:\\\\phi_star_measurement\\\\4.2mm\\\\7um\\\\570\\\\16h42_fluo_2x2x_180V_2_exp_3db_342fps_files',\n",
63+
" 'D:\\\\phi_star_measurement\\\\4.2mm\\\\7um\\\\700\\\\16h36_fluo_2x2x_180V_2_exp_0db_342fps_files',\n",
64+
" 'D:\\\\phi_star_measurement\\\\4.2mm\\\\7um\\\\795\\\\15h54_fluo_2x2x_180V_2_exp_0db_342fps_files',\n",
65+
" 'D:\\\\phi_star_measurement\\\\4.2mm\\\\7um\\\\875\\\\15h12_fluo_2x2x_180V_2_exp_0db_342fps_files',\n",
66+
" 'D:\\\\phi_star_measurement\\\\4.2mm\\\\7um\\\\975\\\\16h25_fluo_2x2x_180V_2_exp_3db_342fps_files']"
67+
]
68+
},
69+
"execution_count": 8,
70+
"metadata": {},
71+
"output_type": "execute_result"
72+
}
73+
],
74+
"source": [
75+
"experimentPath"
76+
]
77+
},
78+
{
79+
"cell_type": "code",
80+
"execution_count": null,
81+
"id": "19d09fd7",
82+
"metadata": {
83+
"code_folding": []
84+
},
85+
"outputs": [
86+
{
87+
"name": "stdout",
88+
"output_type": "stream",
89+
"text": [
90+
"time taken by D:/phi_star_measurement/4.2mm/7um/1445/16h05_fluo_2x2x_180V_2_exp_3db_342fps_files/Images/ is 35 minutes\n",
91+
"time taken by D:/phi_star_measurement/4.2mm/7um/300/16h42_fluo_2x2x_180V_2_exp_3db_342fps_files/Images/ is 29 minutes\n",
92+
"time taken by D:/phi_star_measurement/4.2mm/7um/400/16h42_fluo_2x2x_180V_2_exp_3db_342fps_files/Images/ is 32 minutes\n",
93+
"time taken by D:/phi_star_measurement/4.2mm/7um/570/16h42_fluo_2x2x_180V_2_exp_3db_342fps_files/Images/ is 27 minutes\n"
94+
]
95+
}
96+
],
97+
"source": [
98+
"# parallelization code set\n",
99+
"# Create the file architecture \n",
100+
"# add the main path \n",
101+
"\n",
102+
"mainpath = r'D:\\phi_star_measurement\\4.2mm\\7um'\n",
103+
"#experimentPath = glob.glob(os.path.normpath(mainPath)+'\\*_files');\n",
104+
"experimentPath = glob.glob(mainpath+'\\**\\*_files')\n",
105+
"\n",
106+
"# add the Images \n",
107+
"experimentPath = [s + '\\\\Images\\\\' for s in experimentPath];\n",
108+
"experimentPath = [s .replace('\\\\','/') for s in experimentPath];\n",
109+
"\n",
110+
"results = [];\n",
111+
"for Path in experimentPath:\n",
112+
" # ~~~~~~~~~~~ Count the total number of frames within the \"Image\" folder ~~~~~~~~~~~~~\n",
113+
" Total_Frames = len(glob.glob(Path +'*.tif'));\n",
114+
" if (os.path.normpath(glob.glob(Path +'*.tif')[-1]).split(os.sep)[-1] == 'ImageBackground.tif'):\n",
115+
" Total_Frames -= 1;\n",
116+
" #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n",
117+
"\n",
118+
" # ~~~~~~~~~~~~ read the total number of images ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n",
119+
" N = 10; # specify the number of workers ... do this judiciously \n",
120+
" Ram_size = 30;\n",
121+
" memory_allocation = \"-Xmx\" + str(int (Ram_size/N)) + \"g\"; # N*memory alloc. not exceed the RAM size-1\n",
122+
" pool = mp.Pool(processes=N);\n",
123+
" #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n",
124+
"\n",
125+
" #~~~~~~~~~~~~~~~~create the chunk firsts and lasts (tested robust) ~~~~~~~~~~~~~~~~~~~\n",
126+
" Chunk = (Total_Frames/N);\n",
127+
" frame_start = [];\n",
128+
" frame_finish =[];\n",
129+
" inp_for_starmap =[];\n",
130+
"\n",
131+
" for i in range(N):\n",
132+
" frame_start.append(int(np.floor(i*Chunk)+1));\n",
133+
" frame_finish.append(int(np.floor((i+1)*Chunk+1)));\n",
134+
" inp_for_starmap.append(tuple([Path,frame_start[i],frame_finish[i],memory_allocation]));\n",
135+
"\n",
136+
" #~~~~~~~~~~~~~create a \"main\" function for running the parallization code~~~~~~~~~~~~~\n",
137+
" #~~~~ the __main__ function is not necessary on mac and linux .. window 's crap!~~~~~~\n",
138+
"\n",
139+
" if __name__==\"__main__\":\n",
140+
"\n",
141+
" # ~~~~~~~~~~~~~~~~~~~ create a temporary folder in C: Drive ~~~~~~~~~~~~~~~~~~~~~~\n",
142+
" os.mkdir('C:/Temporary')\n",
143+
"\n",
144+
" start = time.perf_counter(); \n",
145+
" # creating N processes and executing them\n",
146+
" results = pool.starmap(pnp, inp_for_starmap);\n",
147+
" \n",
148+
" # convert every float64 to float32\n",
149+
" #for c in range(N):\n",
150+
" # results[c][results[c].select_dtypes(np.float64).columns] = results[c].select_dtypes(np.float64).astype(np.float32);\n",
151+
"\n",
152+
" # measure the run time\n",
153+
" finish = time.perf_counter();\n",
154+
"\n",
155+
" # delete the temprorary folder in C: drive\n",
156+
" shutil.rmtree('C:/Temporary');\n",
157+
" \n",
158+
" # closing the pool is very important!! creates problem with multiple running \n",
159+
" pool.close()\n",
160+
"\n",
161+
" # Transform the path name to the position for saving the .mat output\n",
162+
" parts = os.path.normpath(Path).split(os.sep)[:-1];\n",
163+
" parts.extend(['Analysis','1_Positions','Positions_NF.mat']);\n",
164+
" mat_file_name = '/'.join(parts);\n",
165+
"\n",
166+
" # Save the data in a .mat file \n",
167+
" dictn = {\"XYF\":np.concatenate(results)};\n",
168+
" sio.savemat(mat_file_name, dictn); \n",
169+
" \n",
170+
" # remove the large datasets from the memory and collect garbage \n",
171+
" del(results);\n",
172+
" del(dictn);\n",
173+
" gc.collect( );\n",
174+
" gc.collect(1);\n",
175+
" gc.collect(2);\n",
176+
" print(\"time taken by\",Path,\" is \", round((finish - start)/60),\"minutes\");"
177+
]
178+
},
179+
{
180+
"cell_type": "code",
181+
"execution_count": null,
182+
"id": "c6c95ca0",
183+
"metadata": {},
184+
"outputs": [],
185+
"source": []
186+
}
187+
],
188+
"metadata": {
189+
"kernelspec": {
190+
"display_name": "Python 3 (ipykernel)",
191+
"language": "python",
192+
"name": "python3"
193+
},
194+
"language_info": {
195+
"codemirror_mode": {
196+
"name": "ipython",
197+
"version": 3
198+
},
199+
"file_extension": ".py",
200+
"mimetype": "text/x-python",
201+
"name": "python",
202+
"nbconvert_exporter": "python",
203+
"pygments_lexer": "ipython3",
204+
"version": "3.9.7"
205+
},
206+
"latex_envs": {
207+
"LaTeX_envs_menu_present": true,
208+
"autoclose": true,
209+
"autocomplete": true,
210+
"bibliofile": "biblio.bib",
211+
"cite_by": "apalike",
212+
"current_citInitial": 1,
213+
"eqLabelWithNumbers": true,
214+
"eqNumInitial": 1,
215+
"hotkeys": {
216+
"equation": "Ctrl-E",
217+
"itemize": "Ctrl-I"
218+
},
219+
"labels_anchors": false,
220+
"latex_user_defs": false,
221+
"report_style_numbering": false,
222+
"user_envs_cfg": false
223+
}
224+
},
225+
"nbformat": 4,
226+
"nbformat_minor": 5
227+
}
Lines changed: 138 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,138 @@
1+
# A function to detect particles in a frame (set).
2+
3+
# ~~~~~~~~~~~ Developed by @ Samadarshi 18.maart.2022. ~~~~~~~~~~~~~~~~~~
4+
#Points to remember:
5+
#1. This function does the following in the same order:
6+
# A> enhances contrast
7+
# B> applies low pass fliter
8+
# C> finds the peaks
9+
# D> Saves the maxima (object detection) in a tsv file. This is presently done in
10+
# a "Temporary" folder, created & deleted during the parallization step
11+
#2. The function has been built to run in parallel.
12+
#3. You can edit the macro to obtain your desired series of processing steps within this function.
13+
#4. This a memory mapped version of the python code. (comfirm the eleimtaion of large RAM dependance)..!
14+
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
15+
16+
17+
# Update: there is a problem with the read and write statement in this code... do not improve..it don't ddo what it is supposed to do.
18+
# the garabange dumping still exists.
19+
20+
def process_NF_particles(path, frame_start, frame_finish, mem_alloc):
21+
22+
'''
23+
Param:-
24+
path: the folder which contains the set of images
25+
frame_start: the ID of the starting frame (name the frames in 6 digit IDs)
26+
frame_finish: the ID of the ending frame
27+
'''
28+
29+
# import everthing for each process independantly
30+
import time
31+
import numpy as np
32+
import pandas as pd
33+
import os
34+
import scyjava
35+
import imagej
36+
import gc
37+
38+
# total heap memory allocation should not exceed RAM sizes.
39+
scyjava.config.add_options(mem_alloc);
40+
41+
# create imageJ instance
42+
ij = imagej.init('net.imagej:imagej:2.3.0', mode='interactive')
43+
44+
# Set the batch mode to "true" so that the images do not pop up.
45+
A_pre = '\nsetBatchMode(true);\nopen("';
46+
47+
df = pd.DataFrame();
48+
49+
# count the total number fo frames
50+
N = frame_finish - frame_start;
51+
52+
# run the timer
53+
start_time = time.time();
54+
55+
# echo the start of a process
56+
print("Starting chunk",frame_start,"to",(frame_finish-1));
57+
58+
# define a part of the main macro
59+
macro_main = '''.tif");
60+
// ~~~~~~~~~~~~~~~~ modify the macro from here to suit your needs~~~~~~~~~~~~~~
61+
// Invert the image
62+
run("Invert");
63+
// Enhance the contrast of the image
64+
run("Enhance Contrast", "saturated=0.35");
65+
// ~~~~~~~~~Apply the gaussian blur: low pass filter: sigma set at 2~~~~~~~~~~~
66+
run("Gaussian Blur...", "sigma=2");
67+
// find the peaks after applying the filter
68+
run("Find Maxima...", "prominence=7 output=List");
69+
// ~~~~~~~~~~~~~~~~~~~~~~~~Save the data as a csv file~~~~~~~~~~~~~~~~~~~~~~~~~
70+
'''
71+
72+
# define the temporary saving route
73+
basename = '''saveAs("Results", "''';
74+
ending = '''");\n''';
75+
filename = "C:/Temporary/Results_"+ str(frame_start)+ "_to_"+ str(frame_finish)+".tsv";
76+
full_name = basename+filename+ending;
77+
78+
# name the location of the memory mapped array.
79+
filename_memmap = "C:/Temporary/Array_"+str(frame_start)+ "_to_"+ str(frame_finish)+".array";
80+
81+
# create a memory mapped array
82+
#Z = np.memmap(filename_memmap, dtype='float32', mode='w+', shape = (1,4));
83+
new_len = 1;
84+
85+
# loop for a number of frames
86+
for i in range(frame_start, frame_finish):
87+
88+
# build a image string
89+
n = str(i).zfill(6);
90+
91+
# find the positions of the particle using the macro
92+
result = ij.py.run_macro(A_pre+path+n+macro_main+full_name);
93+
94+
# read the text file into a pandas dataframe
95+
# dtype drops the precision for more memory ... trade-off
96+
df = pd.read_csv(filename,dtype = np.float32, sep='\t');
97+
98+
# add the frame number to the array
99+
df['F'] = i*np.ones(len(df.index), dtype=np.float32);
100+
101+
# Develop a code for the memory mapped approach
102+
103+
Z = np.memmap(filename_memmap, dtype='float32', mode='w+', shape = ((new_len),4));
104+
105+
# find the new length of the master array
106+
master_len = np.shape(Z)[0];
107+
frame_len = np.shape(df)[0];
108+
new_len = master_len + frame_len;
109+
110+
# update the size of Z
111+
Z = np.memmap(filename_memmap, dtype='float32', mode='r+', shape = ((new_len),4));
112+
113+
# dump the df into z
114+
Z[master_len:,:] = np.array(df);
115+
116+
# print progress status of the process
117+
print ("chunk",frame_start,"to", (frame_finish-1),":", round((i-frame_start)/N*100)," percent complete \r");
118+
119+
del(Z);
120+
gc.collect();
121+
122+
# close the Results frames and the "Results" window
123+
macro_end = '''
124+
close("Results");
125+
'''
126+
result = ij.py.run_macro(macro_end);
127+
end_time = time.time();
128+
print("the time lapse is",(end_time - start_time)/N, "per frame");
129+
130+
131+
# remove the imageJ object ... do it or kernel crash can be very severe ......
132+
del (ij);
133+
del (df);
134+
gc.collect();
135+
136+
# return the location of the chunk and the number of rows in the clunk
137+
return [filename_memmap,new_len]
138+

0 commit comments

Comments
 (0)