-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathworkflow.py
573 lines (467 loc) · 22.7 KB
/
workflow.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
from datetime import datetime as dt
import json
import os
import time
import nibabel as nib
import numpy as np
import pandas as pd
import argparse
import csv
'''ROI Labels from FreeSurferColorLUT.txt'''
'''
10 Left-Thalamus 0 118 14 0
49 Right-Thalamus 0 118 14 0
17 Left-Hippocampus 220 216 20 0
53 Right-Hippocampus 220 216 20 0
18 Left-Amygdala 103 255 255 0
54 Right-Amygdala 103 255 255 0
11 Left-Caudate 122 186 220 0
50 Right-Caudate 122 186 220 0
12 Left-Putamen 236 13 176 0
51 Right-Putamen 236 13 176 0
13 Left-Pallidum 12 48 255 0
52 Right-Pallidum 13 48 255 0
26 Left-Accumbens-area 255 165 0 0
58 Right-Accumbens-area 255 165 0 0
28 Left-VentralDC 165 42 42 0
60 Right-VentralDC 165 42 42 0
15 4th-Ventricle 42 204 164 0
7 Left-Cerebellum-White-Matter 220 248 164 0
8 Left-Cerebellum-Cortex 230 148 34 0
46 Right-Cerebellum-White-Matter 220 248 164 0
47 Right-Cerebellum-Cortex 230 148 34 0
24 CSF 60 60 60 0
'''
roi_labels = {
"Thalamus": [10, 49], # Combines left (10) and right (49)
"Left-Hippocampus": [17],
"Right-Hippocampus": [53],
"Left-Amygdala": [18],
"Right-Amygdala": [54],
"Left-Caudate": [11],
"Right-Caudate": [50],
"Left-Putamen": [12],
"Right-Putamen": [51],
"Left-Pallidum": [13],
"Right-Pallidum": [52],
"Left-Accumbens-area": [26],
"Right-Accumbens-area": [58],
"Left-VentralDC": [28],
"Right-VentralDC": [60],
"4th-Ventricle": [15],
"Left-Cerebellum": [8, 7], # Combines white matter (7) and cortex (8)
"Right-Cerebellum": [47, 46], # Combines white matter (46) and cortex (47)
"CSF": [24]
}
# Define the columns of interest for each ROI
columns_of_interest = ['StudyID', 'Age', 'Sex'] # Initial
for roi in roi_labels.keys():
columns_of_interest.extend([ # Extended
f"{roi}_Volume_mm3",
f"{roi}_ROI_Volume_mm3",
f"{roi}_Centroid_X",
f"{roi}_Centroid_Y",
f"{roi}_Centroid_Z"
])
def run_recon_all(input_mri, study_id, studies_output):
'''Step 1: Run Freesurfer's recon-all'''
cmd1 = f"recon-all -i {input_mri} -s {study_id} -all -sd {studies_output}"
print(f"[..] Running command: {cmd1}")
os.system(cmd1)
def extract_volumes(study_id, studies_output):
'''Step 2: Extract volumes of segmented structures using fs_stat_2_pd'''
volumes = {}
asegstats_file_names = ['aseg.stats']
for asegstats_file_name in asegstats_file_names:
asegstats_file = os.path.join(
studies_output, study_id, 'stats', asegstats_file_name)
print(f"[..] Looking for aseg.stats file at: {asegstats_file}")
if not os.path.exists(asegstats_file):
print(f"[!!] File not found: {asegstats_file}")
return {}
aseg_data = fs_stat_2_pd(asegstats_file)
for index, row in aseg_data.iterrows():
structure = row['StructName']
volume = float(row['Volume_mm3'])
volumes[structure] = volume
return volumes
def extract_coordinates(aseg, structure_label):
'''Step 3: Extract coordinates of structures and convert to RAS'''
affine = aseg.affine
seg_data = aseg.get_fdata()
coords = np.argwhere(seg_data == structure_label)
ras_coords = nib.affines.apply_affine(affine, coords)
return ras_coords
def extract_roi_info(study_id, roi_labels, studies_output):
'''Step 4: Extract information for all ROIs and compute centroids'''
roi_info = {}
aseg_file = os.path.join(studies_output, study_id, 'mri', 'aseg.mgz')
aseg = nib.load(aseg_file)
voxel_volume = np.prod(aseg.header.get_zooms())
if not os.path.exists(aseg_file):
print(f"[!!] File not found: {aseg_file}")
return np.array([])
for roi, label in roi_labels.items():
coords = []
print(f"[..] Extracting data for {roi} (label: {label})")
for lbl in label:
coords.extend(extract_coordinates(aseg, lbl))
coords = np.array(coords)
volume = len(coords) * voxel_volume if len(coords) > 0 else 0
centroid = np.mean(coords, axis=0) if len(coords) > 0 else None
roi_info[roi] = {
'coordinates': coords,
'volume': volume,
'centroid': centroid
}
return roi_info
def cosine_similarity(v1, v2):
"""Calculate the cosine similarity between two vectors."""
# Calculate dot product
dot_product = np.sum(v1 * v2, axis=1)
# Calculate magnitudes
magnitude_v1 = np.linalg.norm(v1, axis=1)
magnitude_v2 = np.linalg.norm(v2, axis=1)
# Calculate cosine similarity
cosine_sim = dot_product / (magnitude_v1 * magnitude_v2)
return cosine_sim
def cosine_distance(v1, v2):
return 1-cosine_similarity(v1, v2)
def minkowski_distance(v1, v2, p=2):
"""Calculate the Minkowski distance between two vectors."""
return np.sum(np.abs(v1 - v2) ** p, axis=1) ** (1/p)
def parse_csv(file_path, sep=';'):
"""Reads a CSV file and returns a DataFrame."""
return pd.read_csv(file_path, sep=sep)
def initialize_dataframe(columns):
"""Initializes a DataFrame with the specified columns filled with NaN and correct dtypes."""
dtype_dict = {col: float for col in columns if col not in [
'StudyID', 'Age', 'Sex']}
dtype_dict.update({'StudyID': str, 'Age': int, 'Sex': str})
return pd.DataFrame(columns=columns).astype(dtype_dict)
def assemble_results(csv_files, input_path):
"""Assembles results from multiple CSV files into a single DataFrame."""
# Initialize an empty DataFrame with the specified columns filled with NaN
result_df = initialize_dataframe(columns_of_interest)
for file in csv_files:
file_path = os.path.join(input_path, file)
df = parse_csv(file_path)
for _, row in df.iterrows():
if 'ROI' not in row:
print(f'Empty row:')
print(row)
continue
roi = row['ROI']
if roi in roi_labels.keys():
study_id = row['StudyID']
age = row['Age']
sex = row['Sex']
# Check if this SubjectID and StudyID combination already exists in the result DataFrame
if ((result_df['StudyID'] == study_id)).any():
# Update existing row
idx = (result_df['StudyID'] == study_id)
result_df.loc[idx, f"{roi}_Volume_mm3"] = row['Volume_mm3']
result_df.loc[idx,
f"{roi}_ROI_Volume_mm3"] = row['ROI_Volume_mm3']
result_df.loc[idx,
f"{roi}_Centroid_X"] = row['ROI_Centroid_X']
result_df.loc[idx,
f"{roi}_Centroid_Y"] = row['ROI_Centroid_Y']
result_df.loc[idx,
f"{roi}_Centroid_Z"] = row['ROI_Centroid_Z']
else:
# Create new row filled with NaN
new_row = pd.Series(index=result_df.columns, dtype=object)
new_row['StudyID'] = study_id
new_row['Age'] = age
new_row['Sex'] = sex
new_row[f"{roi}_Volume_mm3"] = row['Volume_mm3']
new_row[f"{roi}_ROI_Volume_mm3"] = row['ROI_Volume_mm3']
new_row[f"{roi}_Centroid_X"] = row['ROI_Centroid_X']
new_row[f"{roi}_Centroid_Y"] = row['ROI_Centroid_Y']
new_row[f"{roi}_Centroid_Z"] = row['ROI_Centroid_Z']
result_df = pd.concat(
[result_df, pd.DataFrame([new_row])], ignore_index=True)
return result_df
def aggregate_results(df):
"""Aggregates the results by selecting one volume and calculating distances from the Thalamus."""
# Initialize columns for final volumes and distances
aggregated_data = df[['StudyID', 'Age', 'Sex']].copy()
for roi in roi_labels.keys():
# Select the appropriate volume
volume_column = f"{roi}_Volume_mm3"
roi_volume_column = f"{roi}_ROI_Volume_mm3"
aggregated_data[roi + '_Volume'] = np.where(
df[volume_column].notna(), df[volume_column], df[roi_volume_column])
aggregated_data[roi + '_THNorm_Volume'] = np.where(
df[volume_column].notna(), df[volume_column], df[roi_volume_column])
# Calculate distances from Thalamus
if roi != "Thalamus":
v1 = df[[roi + '_Centroid_X', roi +
'_Centroid_Y', roi + '_Centroid_Z']].values
v2 = df[['Thalamus_Centroid_X', 'Thalamus_Centroid_Y',
'Thalamus_Centroid_Z']].values
# Assign all columns at once
aggregated_data = aggregated_data.assign(
**{
roi + '_THNorm_Minkowski2': minkowski_distance(v1, v2, 2),
roi + '_THNorm_Cosine': cosine_distance(v1, v2),
}
)
# Remove columns with all NaN values
aggregated_data = aggregated_data.dropna(axis=1, how='all')
return aggregated_data
def normalize_volumes(df):
"""Normalize ROI volumes by dividing by Thalamus volume."""
for roi in roi_labels.keys():
if roi != "Thalamus":
df = df.drop(columns=[f"{roi}_Volume"])
df[f"{roi}_THNorm_Volume"] = df[f"{roi}_THNorm_Volume"] / \
df["Thalamus_THNorm_Volume"]
df = df.drop(columns=[f"Thalamus_THNorm_Volume"])
return df
def fs_stat_2_pd(input_file):
'''Utility function to convert stats file to DataFrame'''
with open(input_file, 'rt') as f:
reader = csv.reader(f, delimiter=' ')
csv_list = []
for l in reader:
# Join and split to handle multiple spaces and filter out comment lines
cleaned_line = ' '.join(l).split()
if len(cleaned_line) > 1 and not cleaned_line[0].startswith('#'):
csv_list.append(cleaned_line)
# Extract the header separately
header = ['Index', 'SegId', 'NVoxels', 'Volume_mm3', 'StructName',
'normMean', 'normStdDev', 'normMin', 'normMax', 'normRange']
# Create DataFrame
df = pd.DataFrame(csv_list, columns=header)
# Convert appropriate columns to numeric
numeric_columns = ['Index', 'SegId', 'NVoxels', 'Volume_mm3',
'normMean', 'normStdDev', 'normMin', 'normMax', 'normRange']
df[numeric_columns] = df[numeric_columns].apply(pd.to_numeric)
return df
def get_structs_csv_files(path):
"""Returns a list of CSV files in the given directory."""
return [f for f in os.listdir(path) if f.endswith('_structs.csv')]
def save_results(result_df, output_path):
"""Saves the assembled DataFrame to a CSV file."""
result_df.to_csv(output_path, index=False, sep=';')
def save_coordinates_to_html(roi_info, output_file):
'''Save the coordinates to an HTML file with Three.js visualization'''
def serialize_coordinates(coords):
return coords.tolist() if isinstance(coords, np.ndarray) else coords
serialized_roi_info = {
roi: {
'coordinates': serialize_coordinates(info['coordinates']),
'volume': info['volume'],
'centroid': serialize_coordinates(info['centroid'])
}
for roi, info in roi_info.items()
}
html_template = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>3D ROI Visualization</title>
</head>
<body style="margin: 0; padding: 0">
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/r128/three.min.js"></script>
<script>
const roiData = {roi_data};
const colors = ["#caa331", "#6f71d9", "#96b642", "#563688", "#44cc7c", "#bc51a8", "#59ac4c", "#c584d5", "#396d22", "#da6295", "#48b989", "#e1556e", "#33d4d1", "#d85a49", "#5e87d3", "#c37127", "#892b60", "#76b870", "#ac4258", "#a2b864", "#ad4248", "#ccad52", "#984126", "#7a7020", "#ce9157"]
function init() {{
const scene = new THREE.Scene();
const camera = new THREE.PerspectiveCamera(75, window.innerWidth / window.innerHeight, 0.1, 1000);
const renderer = new THREE.WebGLRenderer();
renderer.setSize(window.innerWidth, window.innerHeight);
document.body.appendChild(renderer.domElement);
let iRoi = 0;
const sections = ['coordinates', 'centroid']
for (const roi in roiData) {{
sections.forEach((roiSection) => {{
let coords = roiData[roi][roiSection];
if (roiSection === 'centroid') {{
coords = [roiData[roi][roiSection]];
}}
// When working with MRI data and rendering points in 3D space, it's common to encounter issues related to coordinate system differences.
// MRI scans are often stored in a coordinate system that may differ from the one used in Three.js for rendering.
// Specifically, MRI data might use a right-handed coordinate system, while Three.js uses a left-handed one by default.
// Since the coordinate systems are mirrored along certain axes, you need to flip one or more axes to align them properly.
coords = coords.map(coord => [coord[1], coord[2], coord[0]]);
if (coords.filter(Boolean).length === 0) {{
return;
}}
const geometry = new THREE.BufferGeometry();
const points = [];
for (let i = 0; i < coords.length; i++) {{
points.push(coords[i][0], coords[i][1], coords[i][2]);
}}
const vertices = new Float32Array(points);
geometry.setAttribute('position', new THREE.BufferAttribute(vertices, 3));
let material = new THREE.PointsMaterial({{ color: colors[iRoi], size: 1, transparent: true, opacity: 0.5, blending: THREE.AdditiveBlending, }});
if (roiSection === 'centroid') {{
// Set point size
material = new THREE.PointsMaterial({{ color: colors[iRoi], size: 5, }});
}}
const pointCloud = new THREE.Points(geometry, material);
scene.add(pointCloud);
if (roiSection === 'centroid') {{
// Add label
const centroidPosition = new THREE.Vector3(vertices[0], vertices[1], vertices[2]);
// Create a canvas element and draw the label
const canvas = document.createElement('canvas');
const context = canvas.getContext('2d');
context.clearRect(0, 0, canvas.width, canvas.height);
context.font = '30px Arial';
context.fillStyle = colors[iRoi];
context.fillText(roi, 0, 30);
const texture = new THREE.CanvasTexture(canvas);
const spriteMaterial = new THREE.SpriteMaterial({{ map: texture, transparent: true, depthWrite: false, }});
const sprite = new THREE.Sprite(spriteMaterial);
sprite.position.copy(centroidPosition);
const scaleFactor = camera.fov / 30;
sprite.scale.set(10 * scaleFactor, 5 * scaleFactor, 1);
scene.add(sprite);
}}
}})
iRoi++;
}};
function onDocumentMouseWheel(event) {{
camera.fov += event.deltaY * 0.05;
camera.fov = Math.max(10, Math.min(100, camera.fov));
camera.updateProjectionMatrix();
}}
document.addEventListener('wheel', onDocumentMouseWheel, false);
let isMouseDown = false;
let startY = 0;
let cameraTarget = new THREE.Vector3(90, 0, 0);
function onDocumentMouseDown(event) {{
isMouseDown = true;
startY = event.clientY;
}}
document.addEventListener('mousedown', onDocumentMouseDown, false);
function onDocumentMouseMove(event) {{
if (!isMouseDown) return;
const deltaY = event.clientY - startY;
startY = event.clientY;
const angle = deltaY * 0.005;
camera.position.y += angle * 30;
camera.lookAt(cameraTarget);
}}
document.addEventListener('mousemove', onDocumentMouseMove, false);
function onDocumentMouseUp() {{
isMouseDown = false;
}}
document.addEventListener('mouseup', onDocumentMouseUp, false);
function animate() {{
const r = Date.now() * 0.0001;
camera.position.x = 180 * Math.cos(r);
camera.position.z = 180 * Math.sin(r);
camera.lookAt(scene.position);
requestAnimationFrame(animate);
renderer.render(scene, camera);
}}
scene.fog = new THREE.FogExp2( 0x000000, 0.001 );
camera.position.z = 40;
camera.position.y = 0;
animate();
}}
init();
</script>
</body>
</html>
"""
roi_data_json = json.dumps(serialized_roi_info)
html_content = html_template.format(roi_data=roi_data_json)
with open(output_file, 'w') as f:
f.write(html_content)
def main():
'''Pipeline execution'''
parser = argparse.ArgumentParser(description='This is input args')
parser.add_argument('--mri', required=True,
help=' ++ please provide path to MRI (*.nii) for processing')
parser.add_argument('--id', required=True,
help=' ++ please provide subject id')
parser.add_argument('--age', required=True,
help=' ++ please provide age of subject', type=int)
parser.add_argument('--sex', required=True,
help=' ++ please provide sex of subject')
parser.add_argument('--stud_out', required=False,
help=' ++ optional studies output path (default = studies_output)', default=f"studies_output")
parser.add_argument('--csv_out', required=False,
help=' ++ optional csv output path (default = processed_data)', default=f"processed_data")
args = parser.parse_args()
path_mri = args.mri
study_id = args.id
age = args.age
sex = args.sex
studies_output = args.stud_out
csv_out = args.csv_out
if not os.path.exists(studies_output):
os.makedirs(studies_output)
if not os.path.exists(csv_out):
os.makedirs(csv_out)
# Print the results to verify
print("[ok] Input MRI:", path_mri)
print("[ok] Study ID:", study_id)
print("[ok] Output directory:", studies_output)
# Run
run_recon_all(path_mri, study_id, studies_output)
# Wait for recon-all to complete
recon_all_output_dir = os.path.join(studies_output, study_id)
while not os.path.exists(os.path.join(recon_all_output_dir, 'scripts', 'recon-all.done')):
print("[..] Waiting for recon-all to complete...")
time.sleep(60)
volumes = extract_volumes(study_id, studies_output)
roi_info = extract_roi_info(study_id, roi_labels, studies_output)
# Write the results to a CSV file
now_date = dt.now().strftime("%Y%m%d%H%M%S")
output_csv = os.path.join(csv_out, f'{now_date}_{study_id}_structs.csv')
fieldnames = ['StudyID', 'Age', 'Sex', 'ROI', 'Volume_mm3', 'ROI_Centroid_X',
'ROI_Centroid_Y', 'ROI_Centroid_Z', 'ROI_Volume_mm3']
with open(output_csv, 'w', newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames, delimiter=';')
writer.writeheader()
# Combine volumes from aseg.stats and ROI information
for roi in set(volumes.keys()).union(roi_info.keys()):
volume = volumes.get(roi, None)
info = roi_info.get(roi, {})
centroid = info.get('centroid', None)
roi_volume = info.get('volume', None)
centroid_x, centroid_y, centroid_z = centroid if centroid is not None else (
None, None, None)
writer.writerow({
'StudyID': study_id,
'Age': age,
'Sex': sex,
'ROI': roi,
'Volume_mm3': volume,
'ROI_Centroid_X': centroid_x,
'ROI_Centroid_Y': centroid_y,
'ROI_Centroid_Z': centroid_z,
'ROI_Volume_mm3': roi_volume
})
print(f"[ok] Raw structs written to {output_csv}")
# Get list of CSV files in the directory
csv_files = get_structs_csv_files(csv_out)
# Assemble the results from the CSV files
result_df = assemble_results(csv_files, csv_out)
# Aggregate the results
aggregated_df = aggregate_results(result_df)
# Normalize volumes
normalized_df = normalize_volumes(aggregated_df)
# Save
output_results_csv = os.path.join(csv_out, f'{now_date}_norm.csv')
save_results(normalized_df, output_results_csv)
# Coords
output_results_points = os.path.join(
csv_out, f'{now_date}_{study_id}_s3d.html')
save_coordinates_to_html(roi_info, output_results_points)
print(f"[ok] 3d visualization written to {output_results_points}")
# Finish and inform
print(f"[ok] Normalized results written to {output_results_csv}")
print("[ok] Processing is finished.")
if __name__ == '__main__':
main()