From 821415257b2b91e28e9338bd9d4597867515bae1 Mon Sep 17 00:00:00 2001 From: Yasser Aleman Gomez Date: Fri, 8 Mar 2024 20:03:42 +0100 Subject: [PATCH] Running black to correct the code and creating the first version with minor changes --- .gitignore | 1 + clabtoolkit/__init__.py | 4 +- clabtoolkit/bidstools.py | 37 +- clabtoolkit/dicomtools.py | 6 +- clabtoolkit/dwitools.py | 2 +- clabtoolkit/examples.py | 4 - clabtoolkit/freesurfertools.py | 471 +++++++++++++++++--- clabtoolkit/misctools.py | 14 +- clabtoolkit/parcellationtools.py | 46 +- clabtoolkit/surfacetools.py | 724 ------------------------------- docs/conf.py | 58 +-- setup.cfg | 2 +- setup.py | 42 +- 13 files changed, 534 insertions(+), 877 deletions(-) diff --git a/.gitignore b/.gitignore index 4c915d1..196a0fa 100644 --- a/.gitignore +++ b/.gitignore @@ -70,6 +70,7 @@ target/ # Jupyter Notebook .ipynb_checkpoints +testing_notebook.ipynb # pyenv .python-version diff --git a/clabtoolkit/__init__.py b/clabtoolkit/__init__.py index ec0961d..4a39905 100644 --- a/clabtoolkit/__init__.py +++ b/clabtoolkit/__init__.py @@ -1,5 +1,5 @@ """Top-level package for Connectomics Lab Toolkit.""" __author__ = """Yasser Alemán-Gómez""" -__email__ = 'yasseraleman@protonmail.com' -__version__ = '0.0.1' +__email__ = "yasseraleman@protonmail.com" +__version__ = "0.1.0" diff --git a/clabtoolkit/bidstools.py b/clabtoolkit/bidstools.py index c24cc71..cdf0bfc 100755 --- a/clabtoolkit/bidstools.py +++ b/clabtoolkit/bidstools.py @@ -5,15 +5,20 @@ # This function copies the BIDs folder and its derivatives for e given subjects to a new location def _copy_bids_folder( - bids_dir: str, out_dir: str, fold2copy: list = ["anat"], subjs2copy: str = None, deriv_dir: str = None, -include_derivatives: bool = False): + bids_dir: str, + out_dir: str, + fold2copy: list = ["anat"], + subjs2copy: str = None, + deriv_dir: str = None, + include_derivatives: bool = False, +): """ Copy full bids folders @params: bids_dir - Required : BIDs dataset directory: out_dir - Required : Output directory: fold2copy - Optional : List of folders to copy: default = ['anat'] - subjs2copy - Optional : List of subjects to copy: + subjs2copy - Optional : List of subjects to copy: deriv_dir - Optional : Derivatives directory: default = None include_derivatives - Optional : Include derivatives folder: default = False """ @@ -28,12 +33,12 @@ def _copy_bids_folder( subj_ids.sort() else: subj_ids = subjs2copy - + # Selecting the derivatives folder if include_derivatives: if deriv_dir is None: deriv_dir = os.path.join(bids_dir, "derivatives") - + if not os.path.isdir(deriv_dir): # Lunch a warning message if the derivatives folder does not exist print("WARNING: The derivatives folder does not exist.") @@ -83,7 +88,9 @@ def _copy_bids_folder( directories = os.listdir(ses_dir) fold2copy = [] for directory in directories: - if not directory.startswith(".") and os.path.isdir(os.path.join(ses_dir, directory)): + if not directory.startswith(".") and os.path.isdir( + os.path.join(ses_dir, directory) + ): print(directory) fold2copy.append(directory) @@ -108,10 +115,14 @@ def _copy_bids_folder( for pipe_dir in der_pipe_folders: if os.path.isdir(pipe_dir): - out_pipe_dir = os.path.join(out_dir, "derivatives", os.path.basename(pipe_dir)) + out_pipe_dir = os.path.join( + out_dir, "derivatives", os.path.basename(pipe_dir) + ) pipe_indiv_subj_in = os.path.join(pipe_dir, subj_id, ses_id) - pipe_indiv_subj_out = os.path.join(out_pipe_dir, subj_id, ses_id) + pipe_indiv_subj_out = os.path.join( + out_pipe_dir, subj_id, ses_id + ) if os.path.isdir(pipe_indiv_subj_in): try: @@ -119,11 +130,15 @@ def _copy_bids_folder( os.makedirs(pipe_indiv_subj_out, exist_ok=True) # Copying the folder - shutil.copytree(pipe_indiv_subj_in, pipe_indiv_subj_out, dirs_exist_ok=True) + shutil.copytree( + pipe_indiv_subj_in, + pipe_indiv_subj_out, + dirs_exist_ok=True, + ) except: fail_deriv.append(pipe_indiv_subj_in) - + # Print the failed sessions and derivatives print(" ") if fail_sess: @@ -137,5 +152,5 @@ def _copy_bids_folder( for i in fail_deriv: print(i) print(" ") - + print("End of copying the files.") diff --git a/clabtoolkit/dicomtools.py b/clabtoolkit/dicomtools.py index a2c33cd..ed8a884 100755 --- a/clabtoolkit/dicomtools.py +++ b/clabtoolkit/dicomtools.py @@ -3,11 +3,12 @@ from glob import glob import subprocess + def sum(a, b): return a + b -def _uncompress_dicom_session(dic_dir: str, subj_ids = None): +def _uncompress_dicom_session(dic_dir: str, subj_ids=None): """ Uncompress session folders @params: @@ -31,7 +32,6 @@ def _uncompress_dicom_session(dic_dir: str, subj_ids = None): subj_ids = [x.strip() for x in subj_ids] elif not isinstance(subj_ids, list): raise ValueError("The subj_ids parameter must be a list or a string") - # Failed sessions fail_sess = [] @@ -83,7 +83,7 @@ def _uncompress_dicom_session(dic_dir: str, subj_ids = None): print("End of the uncompression process.") -def _compress_dicom_session(dic_dir: str, subj_ids = None): +def _compress_dicom_session(dic_dir: str, subj_ids=None): """ Compress session folders @params: diff --git a/clabtoolkit/dwitools.py b/clabtoolkit/dwitools.py index a10185f..5dac53c 100755 --- a/clabtoolkit/dwitools.py +++ b/clabtoolkit/dwitools.py @@ -5,7 +5,7 @@ # This function removes the B0s volumes located at the end of the diffusion 4D volume. -def _remove_empty_dwi_Volume(dwifile): +def _remove_empty_dwi_Volume(dwifile: str): """ Remove the B0s volumes located at the end of the diffusion 4D volume. @params: diff --git a/clabtoolkit/examples.py b/clabtoolkit/examples.py index a3a22c2..c2062fa 100644 --- a/clabtoolkit/examples.py +++ b/clabtoolkit/examples.py @@ -1,6 +1,3 @@ - - - # _copy_bids_folder('/media/HPCdata/Mindfulness/', '/media/yaleman/HagmannHDD/Test/',["anat", "dwi"], ["sub-S001"], include_derivatives=True, deriv_dir='/media/HPCdata/Mindfulness/derivatives') # _uncompress_dicom_session('/media/yaleman/Database/IMAGING-PROJECTS/Dicom') @@ -24,4 +21,3 @@ # freelut = '/media/COSAS/Yasser/Work2Do/ReconVertDatabase/derivatives/chimera-atlases/sub-CHUVA001/ses-V2/anat/sub-CHUVA001_ses-V2_run-1_space-orig_atlas-chimeraLFMIIIFIF_desc-scale5growwm_dseg.lut' # fsllut = '/home/yaleman/test.fsllut' - diff --git a/clabtoolkit/freesurfertools.py b/clabtoolkit/freesurfertools.py index 8fb6166..9273b98 100755 --- a/clabtoolkit/freesurfertools.py +++ b/clabtoolkit/freesurfertools.py @@ -1,71 +1,356 @@ import os +import time +import subprocess + import numpy as np import nibabel as nib -from glob import glob -import subprocess -def _annot2gcs( - annot_folder: str, - gcs_folder: str, - freesurfer_dir: str = "/opt/freesurfer/subjects/", -): + +class AnnotParcellation: """ - Convert FreeSurfer annot files to gcs files - @params: - annot_folder - Required : Folder containing the annot files: - gcs_folder - Required : Output folder: - freesurfer_dir - Optional : FreeSurfer directory: + This class contains methods to work with FreeSurfer annot files + + # Implemented methods: + # - Correct the parcellation by refilling the vertices from the cortex label file that do not have a label in the annotation file + # - Convert FreeSurfer annot files to gcs files + + # Methods to be implemented: + # Grouping regions to create a coarser parcellation + # Removing regions from the parcellation + # Correct parcellations by removing small clusters of vertices labeled inside another region + """ - # Create te gcs folder if it does not exist - if not os.path.exists(gcs_folder): - os.makedirs(gcs_folder) + def __init__(self, annot_file: str): - os.environ["SUBJECTS_DIR"] = freesurfer_dir - any_annots = glob(os.path.join(annot_folder, "*.annot")) - out_gcs = [] - for annot_file in any_annots: - print(annot_file) - annot_name = os.path.basename(annot_file) - annot_name = annot_name.replace(".annot", "") - hemi = annot_name[0:2] + self.annotfile = annot_file + + # Verify if the annot file exists + if not os.path.exists(self.annotfile): + raise ValueError("The annot file does not exist") + + # Extracting the filename, folder and name + self.annotfolder = os.path.dirname(self.annotfile) + self.annotname = os.path.basename(self.annotfile) + + # Detecting the hemisphere + annot_name = self.annotname.lower() + temp_name = annot_name.replace(".annot", "").lower() + + # Find in the string annot_name if it is lh. or rh. + if "lh." in temp_name: + hemi = "lh" + elif "rh." in temp_name: + hemi = "rh" + elif "hemi-l" in temp_name: + hemi = "lh" + elif "hemi-r" in temp_name: + hemi = "rh" + else: + hemi = None + raise ValueError( + "The hemisphere could not be extracted from the annot filename. Please provide it as an argument" + ) + + self.hemi = hemi - print(annot_name) # Read the annot file using nibabel - sdata = nib.freesurfer.io.read_annot(annot_file) + codes, reg_table, reg_names = nib.freesurfer.io.read_annot(self.annotfile) - # Read the colors from lh_annot - codes = sdata[0] - colors = sdata[1][1:, 0:3] - stnames = sdata[2][1:] + # Correcting region names + reg_names = [name.decode("utf-8") for name in reg_names] - # Create the lookup table for the right hemisphere - luttable = [ - "{:<4} {:<40} {:>3} {:>3} {:>3} {:>3}".format( - 0, "ctx-unknown", 250, 250, 250, 0 + # Storing the codes, colors and names in the object + self.codes = codes + self.regtable = reg_table + self.regnames = reg_names + + def _correct_names(self, prefix: str = None, sufix: str = None, lower: bool = False): + """ + Correcting region names + @params: + prefix - Optional : Add prefix to the region names: + sufix - Optional : Add sufix to the region names: + lower - Optional : Lower the region names. Default is False: + """ + + # Add prefix and sufix to the region names + if prefix is not None: + # If temp_name do not starts with ctx- then add it + self.regnames = [ + name if name.startswith(prefix) else prefix + "{}".format(name) + for name in self.regnames + ] + + if sufix is not None: + # If temp_name do not ends with - then add it + self.regnames = [ + name if name.endswith(sufix) else "{}".format(name) + sufix + for name in self.regnames + ] + + if lower: + self.regnames = [name.lower() for name in self.regnames] + + def _save_annotation(self, out_file: str = None): + """ + Save the annotation file + @params: + out_file - Required : Output annotation file: + """ + + if out_file is None: + out_file = os.path.join(self.annotfolder, self.annotname) + + # If the directory does not exist then create it + temp_dir = os.path.dirname(out_file) + if not os.path.exists(temp_dir): + os.makedirs(temp_dir) + + # Save the annotation file + nib.freesurfer.io.write_annot( + out_file, self.codes, self.regtable, self.regnames + ) + + def _fill_parcellation( + self, label_file: str, surf_file: str, corr_annot: str = None + ): + """ + Correct the parcellation by refilling the vertices from the cortex label file that do not have a label in the annotation file. + @params: + label_file - Required : Label file: + surf_file - Required : Surface file: + corr_annot - Optional : Corrected annotation file. If not provided, it will be saved with the same filename as the original annotation file: + + Returns + ------- + corr_annot: str + Corrected annotation file + + """ + + # Auxiliary variables for the progress bar + # LINE_UP = '\033[1A' + # LINE_CLEAR = '\x1b[2K' + + # Get the vertices from the cortex label file that do not have a label in the annotation file + + # If the surface file does not exist, raise an error, otherwise load the surface + if os.path.isfile(surf_file): + vertices, faces = nib.freesurfer.read_geometry(surf_file) + else: + raise ValueError( + "Surface file not found. Annotation, surface and cortex label files are mandatory to correct the parcellation." ) - ] - for roi_pos, roi_name in enumerate(stnames): - temp_name = "ctx-{}".format(roi_name.decode("utf-8")) + + # If the cortex label file does not exist, raise an error, otherwise load the cortex label + if os.path.isfile(label_file): + cortex_label = nib.freesurfer.read_label(label_file) + else: + raise ValueError( + "Cortex label file not found. Annotation, surface and cortex label files are mandatory to correct the parcellation." + ) + + vert_lab = self.codes + vert_lab[vert_lab == -1] = 0 + + reg_ctable = self.regtable + reg_names = self.regnames + + ctx_lab = vert_lab[cortex_label].astype( + int + ) # Vertices from the cortex label file that have a label in the annotation file + + bool_bound = vert_lab[faces] != 0 + + # Boolean variable to check the faces that contain at least two vertices that are different from 0 and at least one vertex that is not 0 (Faces containing the boundary of the parcellation) + bool_a = np.sum(bool_bound, axis=1) < 3 + bool_b = np.sum(bool_bound, axis=1) > 0 + bool_bound = bool_a & bool_b + + faces_bound = faces[bool_bound, :] + bound_vert = np.ndarray.flatten(faces_bound) + + vert_lab_bound = vert_lab[bound_vert] + + # Delete from the array bound_vert the vertices that contain the vert_lab_bound different from 0 + bound_vert = np.delete(bound_vert, np.where(vert_lab_bound != 0)[0]) + bound_vert = np.unique(bound_vert) + + # Detect which vertices from bound_vert are in the cortex_label array + bound_vert = bound_vert[np.isin(bound_vert, cortex_label)] + + bound_vert_orig = np.zeros(len(bound_vert)) + # Create a while loop to fill the vertices that are in the boundary of the parcellation + # The loop will end when the array bound_vert is empty or when bound_vert is equal bound_vert_orig + + # Detect if the array bound_vert is equal to bound_vert_orig + bound = np.array_equal(bound_vert, bound_vert_orig) + it_count = 0 + while len(bound_vert) > 0: + + if not bound: + # it_count = it_count + 1 + # cad2print = "Interation number: {} - Vertices to fill: {}".format( + # it_count, len(bound_vert)) + # print(cad2print) + # time.sleep(.5) + # print(LINE_UP, end=LINE_CLEAR) + + bound_vert_orig = np.copy(bound_vert) + temp_Tri = np.zeros((len(bound_vert), 100)) + for pos, i in enumerate(bound_vert): + # Get the neighbors of the vertex + neighbors = np.unique(faces[np.where(faces == i)[0], :]) + neighbors = np.delete(neighbors, np.where(neighbors == i)[0]) + temp_Tri[pos, 0 : len(neighbors)] = neighbors + temp_Tri = temp_Tri.astype(int) + index_zero = np.where(temp_Tri == 0) + labels_Tri = vert_lab[temp_Tri] + labels_Tri[index_zero] = 0 + + for pos, i in enumerate(bound_vert): + + # Get the labels of the neighbors + labels = labels_Tri[pos, :] + # Get the most frequent label different from 0 + most_frequent_label = np.bincount(labels[labels != 0]).argmax() + + # Assign the most frequent label to the vertex + vert_lab[i] = most_frequent_label + + ctx_lab = vert_lab[cortex_label].astype( + int + ) # Vertices from the cortex label file that have a label in the annotation file + + bool_bound = vert_lab[faces] != 0 + + # Boolean variable to check the faces that contain at least one vertex that is 0 and at least one vertex that is not 0 (Faces containing the boundary of the parcellation) + bool_a = np.sum(bool_bound, axis=1) < 3 + bool_b = np.sum(bool_bound, axis=1) > 0 + bool_bound = bool_a & bool_b + + faces_bound = faces[bool_bound, :] + bound_vert = np.ndarray.flatten(faces_bound) + + vert_lab_bound = vert_lab[bound_vert] + + # Delete from the array bound_vert the vertices that contain the vert_lab_bound different from 0 + bound_vert = np.delete(bound_vert, np.where(vert_lab_bound != 0)[0]) + bound_vert = np.unique(bound_vert) + + # Detect which vertices from bound_vert are in the cortex_label array + bound_vert = bound_vert[np.isin(bound_vert, cortex_label)] + + bound = np.array_equal(bound_vert, bound_vert_orig) + + # Save the annotation file + if corr_annot is not None: + if os.path.isfile(corr_annot): + os.remove(corr_annot) + + # Create folder if it does not exist + os.makedirs(os.path.dirname(corr_annot), exist_ok=True) + nib.freesurfer.write_annot(corr_annot, vert_lab, reg_ctable, reg_names) + else: + nib.freesurfer.write_annot(self.annotfile, vert_lab, reg_ctable, reg_names) + corr_annot = self.annotfile + + return corr_annot, vert_lab, reg_ctable, reg_names + + def _annot2gcs( + self, + gcs_file: str = None, + freesurfer_dir: str = None, + fssubj_id: str = None, + hemi: str = None, + ): + """ + Convert FreeSurfer annot files to gcs files + @params: + annot_file - Required : Annot filename: + gcs_file - Optional : GCS filename. If not provided, it will be saved in the same folder as the annot file: + freesurfer_dir - Optional : FreeSurfer directory. Default is the $SUBJECTS_DIR environment variable: + fssubj_id - Optional : FreeSurfer subject id. Default is fsaverage: + hemi - Optional : Hemisphere (lh or rh). If not provided, it will be extracted from the annot filename: + """ + + if gcs_file is None: + gcs_name = self.annotname.replace(".annot", ".gcs") + + # Create te gcs folder if it does not exist + if gcs_folder is None: + gcs_folder = self.annotfolder + + gcs_file = os.path.join(gcs_folder, gcs_name) + + else: + gcs_name = os.path.basename(gcs_file) + gcs_folder = os.path.dirname(gcs_file) + + if not os.path.exists(gcs_folder): + os.makedirs(gcs_folder) + + # Read the colors from annot + reg_colors = self.regtable[:, 0:3] + + # Create the lookup table for the right hemisphere + luttable = [] + for roi_pos, roi_name in enumerate(self.regnames): + luttable.append( "{:<4} {:<40} {:>3} {:>3} {:>3} {:>3}".format( roi_pos + 1, - temp_name, - colors[roi_pos, 0], - colors[roi_pos, 1], - colors[roi_pos, 2], + roi_name, + reg_colors[roi_pos, 0], + reg_colors[roi_pos, 1], + reg_colors[roi_pos, 2], 0, ) ) + + # Set the FreeSurfer directory + if freesurfer_dir is not None: + os.environ["SUBJECTS_DIR"] = freesurfer_dir + else: + if "SUBJECTS_DIR" not in os.environ: + raise ValueError( + "The FreeSurfer directory must be set in the environment variables or passed as an argument" + ) + else: + freesurfer_dir = os.environ["SUBJECTS_DIR"] + + # Set the FreeSurfer subject id + if fssubj_id is None: + raise ValueError( + "Please supply a valid subject ID." + ) + + # If the freesurfer subject directory does not exist, raise an error + if not os.path.isdir(os.path.join(freesurfer_dir, fssubj_id)): + raise ValueError( + "The FreeSurfer subject directory for {} does not exist".format(fssubj_id) + ) + + if not os.path.isfile(os.path.join(freesurfer_dir, fssubj_id, "surf", "sphere.reg")): + raise ValueError( + "The FreeSurfer subject directory for {} does not contain the sphere.reg file".format(fssubj_id) + ) + # Save the lookup table for the left hemisphere - ctab_file = os.path.join(gcs_folder, annot_name + ".ctab") + ctab_file = os.path.join(gcs_folder, self.annotname + ".ctab") with open(ctab_file, "w") as colorLUT_f: colorLUT_f.write("\n".join(luttable)) - # Create the gcs file - gcs_file = os.path.join(gcs_folder, annot_name + ".gcs") + # Detecting the hemisphere + if hemi is None: + hemi = self.hemi + if hemi is None: + raise ValueError( + "The hemisphere could not be extracted from the annot filename. Please provide it as an argument" + ) cmd_cont = [ "mris_ca_train", @@ -75,19 +360,105 @@ def _annot2gcs( ctab_file, hemi, "sphere.reg", - annot_file, - "fsaverage", + self.filename, + fssubj_id, gcs_file, ] + + echo_var = " ".join(cmd_cont) + print(echo_var) subprocess.run( cmd_cont, stdout=subprocess.PIPE, universal_newlines=True ) # Running container command - out_gcs.append(gcs_file) + # Delete the ctab file + os.remove(ctab_file) + + return gcs_name + + +def _create_fsaverage_links( + fssubj_dir: str, fsavg_dir: str = None, refsubj_name: str = None +): + """ + Create the links to the fsaverage folder + @params: + fssubj_dir - Required : FreeSurfer subjects directory. It does not have to match the $SUBJECTS_DIR environment variable: + fsavg_dir - Optional : FreeSurfer fsaverage directory. If not provided, it will be extracted from the $FREESURFER_HOME environment variable: + refsubj_name - Optional : Reference subject name. Default is None: + + Returns + ------- + link_folder: str + Path to the linked folder + + """ + + # Verify if the FreeSurfer directory exists + if not os.path.isdir(fssubj_dir): + raise ValueError("The selected FreeSurfer directory does not exist") + + # Creating and veryfying the freesurfer directory for the reference name + if fsavg_dir is None: + if refsubj_name is None: + fsavg_dir = os.path.join( + os.environ["FREESURFER_HOME"], "subjects", "fsaverage" + ) + else: + fsavg_dir = os.path.join( + os.environ["FREESURFER_HOME"], "subjects", refsubj_name + ) + else: + if fsavg_dir.endswith(os.path.sep): + fsavg_dir = fsavg_dir[0:-1] + + if refsubj_name is not None: + if not fsavg_dir.endswith(refsubj_name): + fsavg_dir = os.path.join(fsavg_dir, refsubj_name) + + if not os.path.isdir(fsavg_dir): + raise ValueError("The selected fsaverage directory does not exist") + + # Taking into account that the fsaverage folder could not be named fsaverage + refsubj_name = os.path.basename(fsavg_dir) - return out_gcs + # Create the link to the fsaverage folder + link_folder = os.path.join(fssubj_dir, refsubj_name) -def read_fscolorlut(lutFile: str): + if not os.path.isdir(link_folder): + process = subprocess.run( + ["ln", "-s", fsavg_dir, fssubj_dir], + stdout=subprocess.PIPE, + universal_newlines=True, + ) + + return link_folder + + +def _remove_fsaverage_links(linkavg_folder: str): + """ + Remove the links to the average folder + @params: + linkavg_folder - Required : FreeSurfer average directory. + It does not have to match the $SUBJECTS_DIR environment variable. + If it is a link and do not match with the original fsaverage folder + then it will be removed: + """ + + # FreeSurfer subjects directory + fssubj_dir_orig = os.path.join( + os.environ["FREESURFER_HOME"], "subjects", "fsaverage" + ) + + # if linkavg_folder is a link then remove it + if ( + os.path.islink(linkavg_folder) + and os.path.realpath(linkavg_folder) != fssubj_dir_orig + ): + os.remove(linkavg_folder) + + +def _read_fscolorlut(lutFile: str): """ Reading freesurfer lut file @params: @@ -118,6 +489,7 @@ def read_fscolorlut(lutFile: str): return st_codes, st_names, st_colors + def _convertluts_freesurfer2fsl(freelut: str, fsllut: str): """ Convert FreeSurfer lut file to FSL lut file @@ -127,7 +499,7 @@ def _convertluts_freesurfer2fsl(freelut: str, fsllut: str): """ # Reading FreeSurfer color lut - st_codes_lut, st_names_lut, st_colors_lut = read_fscolorlut(freelut) + st_codes_lut, st_names_lut, st_colors_lut = _read_fscolorlut(freelut) lut_lines = [] for roi_pos, st_code in enumerate(st_codes_lut): @@ -144,4 +516,3 @@ def _convertluts_freesurfer2fsl(freelut: str, fsllut: str): with open(fsllut, "w") as colorLUT_f: colorLUT_f.write("\n".join(lut_lines)) - diff --git a/clabtoolkit/misctools.py b/clabtoolkit/misctools.py index 69802d9..5f2c96f 100755 --- a/clabtoolkit/misctools.py +++ b/clabtoolkit/misctools.py @@ -1,4 +1,3 @@ - # Print iterations progress def _printprogressbar( iteration, @@ -30,7 +29,8 @@ def _printprogressbar( if iteration == total: print() -def rgb2hex(r, g, b): + +def _rgb2hex(r, g, b): """ Function to convert rgb to hex @@ -47,13 +47,13 @@ def rgb2hex(r, g, b): ------- hexcode: str Hexadecimal code for the color - + """ - + return "#{:02x}{:02x}{:02x}".format(r, g, b) -def hex2rgb(hexcode): +def _hex2rgb(hexcode): """ Function to convert hex to rgb @@ -66,7 +66,7 @@ def hex2rgb(hexcode): ------- tuple Tuple with the rgb values - + """ - return tuple(map(ord, hexcode[1:].decode('hex'))) \ No newline at end of file + return tuple(map(ord, hexcode[1:].decode("hex"))) diff --git a/clabtoolkit/parcellationtools.py b/clabtoolkit/parcellationtools.py index 13794b6..0c2ba88 100644 --- a/clabtoolkit/parcellationtools.py +++ b/clabtoolkit/parcellationtools.py @@ -22,32 +22,28 @@ def _parc_tsv_table(codes, names, colors, tsv_filename): ------- tsv_df: pandas DataFrame DataFrame with the tsv table - + """ - + # Table for parcellation # 1. Converting colors to hexidecimal string seg_hexcol = [] nrows, ncols = colors.shape for i in np.arange(0, nrows): - seg_hexcol.append(cltmisc.rgb2hex(colors[i, 0], colors[i, 1], colors[i, 2])) + seg_hexcol.append(cltmisc._rgb2hex(colors[i, 0], colors[i, 1], colors[i, 2])) tsv_df = pd.DataFrame( - { - 'index': np.asarray(codes), - 'name': names, - 'color': seg_hexcol - } + {"index": np.asarray(codes), "name": names, "color": seg_hexcol} ) # print(bids_df) # Save the tsv table - with open(tsv_filename, 'w+') as tsv_file: - tsv_file.write(tsv_df.to_csv(sep='\t', index=False)) - + with open(tsv_filename, "w+") as tsv_file: + tsv_file.write(tsv_df.to_csv(sep="\t", index=False)) + return tsv_df -def tissue_seg_table(tsv_filename): +def _tissue_seg_table(tsv_filename): """ Function to create a tsv table for tissue segmentation @@ -60,32 +56,34 @@ def tissue_seg_table(tsv_filename): ------- seg_df: pandas DataFrame DataFrame with the tsv table - + """ # Table for tissue segmentation # 1. Default values for tissues segmentation table seg_rgbcol = np.array([[172, 0, 0], [0, 153, 76], [0, 102, 204]]) seg_codes = np.array([1, 2, 3]) - seg_names = ['cerebro_spinal_fluid', 'gray_matter', 'white_matter'] - seg_acron = ['CSF', 'GM', 'WM'] + seg_names = ["cerebro_spinal_fluid", "gray_matter", "white_matter"] + seg_acron = ["CSF", "GM", "WM"] # 2. Converting colors to hexidecimal string seg_hexcol = [] nrows, ncols = seg_rgbcol.shape for i in np.arange(0, nrows): - seg_hexcol.append(cltmisc.rgb2hex(seg_rgbcol[i, 0], seg_rgbcol[i, 1], seg_rgbcol[i, 2])) + seg_hexcol.append( + cltmisc._rgb2hex(seg_rgbcol[i, 0], seg_rgbcol[i, 1], seg_rgbcol[i, 2]) + ) seg_df = pd.DataFrame( { - 'index': seg_codes, - 'name': seg_names, - 'abbreviation': seg_acron, - 'color': seg_hexcol + "index": seg_codes, + "name": seg_names, + "abbreviation": seg_acron, + "color": seg_hexcol, } ) # Save the tsv table - with open(tsv_filename, 'w+') as tsv_file: - tsv_file.write(seg_df.to_csv(sep='\t', index=False)) - - return seg_df \ No newline at end of file + with open(tsv_filename, "w+") as tsv_file: + tsv_file.write(seg_df.to_csv(sep="\t", index=False)) + + return seg_df diff --git a/clabtoolkit/surfacetools.py b/clabtoolkit/surfacetools.py index cdb3e14..e69de29 100755 --- a/clabtoolkit/surfacetools.py +++ b/clabtoolkit/surfacetools.py @@ -1,724 +0,0 @@ -import argparse -import os -import numpy as np -import nibabel as nib -from datetime import datetime -from glob import glob -from pathlib import Path -from shutil import copyfile -import subprocess -from skimage import measure -import numpy as np -import pandas as pd -import subprocess - - -def _annot2gcs( - annot_folder: str, - gcs_folder: str, - freesurfer_dir: str = "/opt/freesurfer/subjects/", -): - """ - Convert FreeSurfer annot files to gcs files - @params: - annot_folder - Required : Folder containing the annot files: - gcs_folder - Required : Output folder: - freesurfer_dir - Optional : FreeSurfer directory: - """ - - # Create te gcs folder if it does not exist - if not os.path.exists(gcs_folder): - os.makedirs(gcs_folder) - - os.environ["SUBJECTS_DIR"] = freesurfer_dir - - any_annots = glob(os.path.join(annot_folder, "*.annot")) - out_gcs = [] - for annot_file in any_annots: - print(annot_file) - annot_name = os.path.basename(annot_file) - annot_name = annot_name.replace(".annot", "") - hemi = annot_name[0:2] - - print(annot_name) - # Read the annot file using nibabel - sdata = nib.freesurfer.io.read_annot(annot_file) - - # Read the colors from lh_annot - codes = sdata[0] - colors = sdata[1][1:, 0:3] - stnames = sdata[2][1:] - - # Create the lookup table for the right hemisphere - luttable = [ - "{:<4} {:<40} {:>3} {:>3} {:>3} {:>3}".format( - 0, "ctx-unknown", 250, 250, 250, 0 - ) - ] - for roi_pos, roi_name in enumerate(stnames): - temp_name = "ctx-{}".format(roi_name.decode("utf-8")) - luttable.append( - "{:<4} {:<40} {:>3} {:>3} {:>3} {:>3}".format( - roi_pos + 1, - temp_name, - colors[roi_pos, 0], - colors[roi_pos, 1], - colors[roi_pos, 2], - 0, - ) - ) - - # Save the lookup table for the left hemisphere - ctab_file = os.path.join(gcs_folder, annot_name + ".ctab") - with open(ctab_file, "w") as colorLUT_f: - colorLUT_f.write("\n".join(luttable)) - - # Create the gcs file - gcs_file = os.path.join(gcs_folder, annot_name + ".gcs") - - cmd_cont = [ - "mris_ca_train", - "-n", - "2", - "-t", - ctab_file, - hemi, - "sphere.reg", - annot_file, - "fsaverage", - gcs_file, - ] - subprocess.run( - cmd_cont, stdout=subprocess.PIPE, universal_newlines=True - ) # Running container command - - out_gcs.append(gcs_file) - - return out_gcs - - -# This function removes the B0s volumes located at the end of the diffusion 4D volume. -def _remove_empty_dwi_Volume(dwifile): - """ - Remove the B0s volumes located at the end of the diffusion 4D volume. - @params: - dwifile - Required : Diffusion 4D volume: - """ - - # Creating the name for the json file - pth = os.path.dirname(dwifile) - fname = os.path.basename(dwifile) - if fname.endswith(".nii.gz"): - flname = fname[0:-7] - elif fname.endswith(".nii"): - flname = fname[0:-4] - - # Creating filenames - bvecfile = os.path.join(pth, flname + ".bvec") - bvalfile = os.path.join(pth, flname + ".bval") - jsonfile = os.path.join(pth, flname + ".json") - - # Loading bvalues - if os.path.exists(bvalfile): - bvals = np.loadtxt(bvalfile, dtype=float, max_rows=5).astype(int) - - tempBools = list(bvals < 10) - if tempBools[-1]: - if os.path.exists(bvecfile): - bvecs = np.loadtxt(bvecfile, dtype=float) - - # Reading the image - mapI = nib.load(dwifile) - diffData = mapI.get_fdata() - affine = mapI.affine - - # Detecting the clusters of B0s - lb_bvals = measure.label(bvals, 2) - - lab2rem = lb_bvals[-1] - vols2rem = np.where(lb_bvals == lab2rem)[0] - vols2keep = np.where(lb_bvals != lab2rem)[0] - - # Removing the volumes - array_data = np.delete(diffData, vols2rem, 3) - - # Temporal image and diffusion scheme - tmp_dwi_file = os.path.join(pth, flname + ".nii.gz") - array_img = nib.Nifti1Image(array_data, affine) - nib.save(array_img, tmp_dwi_file) - - select_bvecs = bvecs[:, vols2keep] - select_bvals = bvals[vols2keep] - select_bvals.transpose() - - # Saving new bvecs and new bvals - tmp_bvecs_file = os.path.join(pth, flname + ".bvec") - np.savetxt(tmp_bvecs_file, select_bvecs, fmt="%f") - - tmp_bvals_file = os.path.join(pth, flname + ".bval") - np.savetxt(tmp_bvals_file, select_bvals, newline=" ", fmt="%d") - - return dwifile, bvecfile, bvalfile, jsonfile - - -# Print iterations progress -def _printprogressbar( - iteration, - total, - prefix="", - suffix="", - decimals=1, - length=100, - fill="█", - printend="\r", -): - """ - Call in a loop to create terminal progress bar - @params: - iteration - Required : current iteration (Int) - total - Required : total iterations (Int) - prefix - Optional : prefix string (Str) - suffix - Optional : suffix string (Str) - decimals - Optional : positive number of decimals in percent complete (Int) - length - Optional : character length of bar (Int) - fill - Optional : bar fill character (Str) - printend - Optional : end character (e.g. "\r", "\r\n") (Str) - """ - percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total))) - filledlength = int(length * iteration // total) - bar = fill * filledlength + "-" * (length - filledlength) - print(f"\r{prefix} |{bar}| {percent}% {suffix}", end=printend) - # Print New Line on Complete - if iteration == total: - print() - - -def _uncompress_dicom_session(dic_dir: str): - """ - Uncompress session folders - @params: - dic_dir - Required : Directory containing the subjects. It assumes an organization in: - //(Str) - """ - - # Listing the subject ids inside the dicom folder - my_list = os.listdir(dic_dir) - subj_ids = [] - for it in my_list: - if "sub-" in it: - subj_ids.append(it) - subj_ids.sort() - - # Failed sessions - fail_sess = [] - - # Loop around all the subjects - nsubj = len(subj_ids) - for i, subj_id in enumerate(subj_ids): # Loop along the IDs - subj_dir = os.path.join(dic_dir, subj_id) - - _printprogressbar( - i + 1, - nsubj, - "Processing subject " - + subj_id - + ": " - + "(" - + str(i + 1) - + "/" - + str(nsubj) - + ")", - ) - - # Loop along all the sessions inside the subject directory - for ses_tar in glob( - subj_dir + os.path.sep + "*.tar.gz" - ): # Loop along the session - # print('SubjectId: ' + subjId + ' ======> Session: ' + sesId) - # Compress only if it is a folder - if os.path.isfile(ses_tar): - try: - # Compressing the folder - subprocess.run( - ["tar", "xzf", ses_tar, "-C", subj_dir], - stdout=subprocess.PIPE, - universal_newlines=True, - ) - - # Removing the uncompressed dicom folder - # subprocess.run( - # ['rm', '-r', ses_tar], stdout=subprocess.PIPE, universal_newlines=True) - - except: - fail_sess.append(ses_tar) - if fail_sess: - print("THE PROCESS FAILED TO UNCOMPRESS THE FOLLOWING TAR FILES:") - for i in fail_sess: - print(i) - print(" ") - print("End of the uncompression process.") - - -def _uncompress_dicom_session_tosolve(dic_dir: str): - """ - Uncompress session folders - @params: - dic_dir - Required : Directory containing the subjects. It assumes an organization in: - //(Str) - """ - - # Listing the subject ids inside the dicom folder - my_list = os.listdir(dic_dir) - subj_ids = [] - for it in my_list: - if "sub-" in it: - subj_ids.append(it) - subj_ids.sort() - - # Failed sessions - fail_sess = [] - - # Loop around all the subjects - nsubj = len(subj_ids) - for i, subj_id in enumerate(subj_ids): # Loop along the IDs - subj_dir = os.path.join(dic_dir, subj_id) - - _printprogressbar( - i + 1, - nsubj, - "Processing subject " - + subj_id - + ": " - + "(" - + str(i + 1) - + "/" - + str(nsubj) - + ")", - ) - - # Loop along all the sessions inside the subject directory - for ses_tar in glob( - subj_dir + os.path.sep + "*.tar.gz" - ): # Loop along the session - # print('SubjectId: ' + subjId + ' ======> Session: ' + sesId) - # Compress only if it is a folder - if os.path.isfile(ses_tar): - try: - # Compressing the folder - subprocess.run( - ["tar", "xzf", ses_tar, "-C", subj_dir], - stdout=subprocess.PIPE, - universal_newlines=True, - ) - - ses_id = os.path.basename(ses_tar)[:-7] - outDir = os.path.join(subj_dir, ses_id) - os.makedirs(outDir) - t = os.path.join( - subj_dir, - "media", - "yaleman", - "Database", - "IMAGING-PROJECTS", - "ENDOCRINOLOGY_PROJECT", - "Dicom", - subj_id, - ses_id, - ) - cmd = "mv " + t + os.path.sep + "* " + outDir - os.system(cmd) - os.system("rm -r " + subj_dir + os.path.sep + "media") - - # Removing the uncompressed dicom folder - subprocess.run( - ["rm", "-r", ses_tar], - stdout=subprocess.PIPE, - universal_newlines=True, - ) - - except: - fail_sess.append(ses_tar) - if fail_sess: - print("THE PROCESS FAILED TO UNCOMPRESS THE FOLLOWING TAR FILES:") - for i in fail_sess: - print(i) - print(" ") - print("End of the uncompression process.") - - -def _compress_dicom_session(dic_dir: str): - """ - Compress session folders - @params: - dic_dir - Required : Directory containing the subjects. It assumes an organization in: - //(Str) - """ - - # Listing the subject ids inside the dicom folder - my_list = os.listdir(dic_dir) - subj_ids = [] - for it in my_list: - if "sub-" in it: - subj_ids.append(it) - subj_ids.sort() - - # Failed sessions - fail_sess = [] - - # Loop around all the subjects - nsubj = len(subj_ids) - for i, subj_id in enumerate(subj_ids): # Loop along the IDs - subj_dir = os.path.join(dic_dir, subj_id) - - _printprogressbar( - i + 1, - nsubj, - "Processing subject " - + subj_id - + ": " - + "(" - + str(i + 1) - + "/" - + str(nsubj) - + ")", - ) - - # Loop along all the sessions inside the subject directory - for ses_id in os.listdir(subj_dir): # Loop along the session - ses_dir = os.path.join(subj_dir, ses_id) - # print('SubjectId: ' + subjId + ' ======> Session: ' + sesId) - # Compress only if it is a folder - if os.path.isdir(ses_dir): - tar_filename = ses_dir + ".tar.gz" - try: - # Compressing the folder - subprocess.run( - ["tar", "-C", subj_dir, "-czvf", tar_filename, ses_id], - stdout=subprocess.PIPE, - universal_newlines=True, - ) - - # Removing the uncompressed dicom folder - subprocess.run( - ["rm", "-r", ses_dir], - stdout=subprocess.PIPE, - universal_newlines=True, - ) - - except: - fail_sess.append(ses_dir) - if fail_sess: - print("THE PROCESS FAILED TO COMPRESS THE FOLLOWING SESSIONS:") - for i in fail_sess: - print(i) - print(" ") - print("End of the compression process.") - -# This function copies the BIDs folder and its derivatives for e given subjects to a new location -def _copy_bids_folder( - bids_dir: str, out_dir: str, fold2copy: list = ["anat"], subjs2copy: str = None, deriv_dir: str = None, -include_derivatives: bool = False): - """ - Copy full bids folders - @params: - bids_dir - Required : BIDs dataset directory: - out_dir - Required : Output directory: - fold2copy - Optional : List of folders to copy: default = ['anat'] - subjs2copy - Optional : List of subjects to copy: - deriv_dir - Optional : Derivatives directory: default = None - include_derivatives - Optional : Include derivatives folder: default = False - """ - - # Listing the subject ids inside the dicom folder - if subjs2copy is None: - my_list = os.listdir(bids_dir) - subj_ids = [] - for it in my_list: - if "sub-" in it: - subj_ids.append(it) - subj_ids.sort() - else: - subj_ids = subjs2copy - - # Selecting the derivatives folder - if include_derivatives: - if deriv_dir is None: - deriv_dir = os.path.join(bids_dir, "derivatives") - - if not os.path.isdir(deriv_dir): - # Lunch a warning message if the derivatives folder does not exist - print("WARNING: The derivatives folder does not exist.") - print("WARNING: The derivatives folder will not be copied.") - include_derivatives = False - - # Selecting all the derivatives folders - der_pipe_folders = [] - directories = os.listdir(deriv_dir) - der_pipe_folders = [] - for directory in directories: - pipe_dir = os.path.join(deriv_dir, directory) - if not directory.startswith(".") and os.path.isdir(pipe_dir): - der_pipe_folders.append(pipe_dir) - - # Failed sessions and derivatives - fail_sess = [] - fail_deriv = [] - - # Loop around all the subjects - nsubj = len(subj_ids) - for i, subj_id in enumerate(subj_ids): # Loop along the IDs - subj_dir = os.path.join(bids_dir, subj_id) - out_subj_dir = os.path.join(out_dir, subj_id) - - _printprogressbar( - i + 1, - nsubj, - "Processing subject " - + subj_id - + ": " - + "(" - + str(i + 1) - + "/" - + str(nsubj) - + ")", - ) - - # Loop along all the sessions inside the subject directory - for ses_id in os.listdir(subj_dir): # Loop along the session - ses_dir = os.path.join(subj_dir, ses_id) - out_ses_dir = os.path.join(out_subj_dir, ses_id) - - # print('Copying SubjectId: ' + subjId + ' ======> Session: ' + sesId) - - if fold2copy[0] == "all": - directories = os.listdir(ses_dir) - fold2copy = [] - for directory in directories: - if not directory.startswith(".") and os.path.isdir(os.path.join(ses_dir, directory)): - print(directory) - fold2copy.append(directory) - - for fc in fold2copy: - # Copying the anat folder - if os.path.isdir(ses_dir): - fold_to_copy = os.path.join(ses_dir, fc) - - try: - # Creating destination directory using make directory - dest_dir = os.path.join(out_ses_dir, fc) - os.makedirs(dest_dir, exist_ok=True) - - shutil.copytree(fold_to_copy, dest_dir, dirs_exist_ok=True) - - except: - fail_sess.append(fold_to_copy) - - if include_derivatives: - # Copying the derivatives folder - - for pipe_dir in der_pipe_folders: - if os.path.isdir(pipe_dir): - - out_pipe_dir = os.path.join(out_dir, "derivatives", os.path.basename(pipe_dir)) - - pipe_indiv_subj_in = os.path.join(pipe_dir, subj_id, ses_id) - pipe_indiv_subj_out = os.path.join(out_pipe_dir, subj_id, ses_id) - - if os.path.isdir(pipe_indiv_subj_in): - try: - # Creating destination directory using make directory - os.makedirs(pipe_indiv_subj_out, exist_ok=True) - - # Copying the folder - shutil.copytree(pipe_indiv_subj_in, pipe_indiv_subj_out, dirs_exist_ok=True) - - except: - fail_deriv.append(pipe_indiv_subj_in) - - # Print the failed sessions and derivatives - print(" ") - if fail_sess: - print("THE PROCESS FAILED COPYING THE FOLLOWING SESSIONS:") - for i in fail_sess: - print(i) - print(" ") - - if fail_deriv: - print("THE PROCESS FAILED COPYING THE FOLLOWING DERIVATIVES:") - for i in fail_deriv: - print(i) - print(" ") - - print("End of copying the files.") - - - - -def _detect_dwi_nvols(bids_dir: str, out_dir: str): - """ - Copy full anat folders - @params: - bids_dir - Required : BIDs dataset directory: - out_dir - Required : Output directory: - """ - - # Listing the subject ids inside the dicom folder - my_list = os.listdir(bids_dir) - subj_ids = [] - for it in my_list: - if "sub-" in it: - subj_ids.append(it) - subj_ids.sort() - - # Failed sessions - fail_sess = [] - - # Loop around all the subjects - nsubj = len(subj_ids) - for i, subj_id in enumerate(subj_ids): # Loop along the IDs - subj_dir = os.path.join(bids_dir, subj_id) - out_subj_dir = os.path.join(out_dir, subj_id) - - _printprogressbar( - i + 1, - nsubj, - "Processing subject " - + subj_id - + ": " - + "(" - + str(i + 1) - + "/" - + str(nsubj) - + ")", - ) - - # Loop along all the sessions inside the subject directory - for ses_id in os.listdir(subj_dir): # Loop along the session - ses_dir = os.path.join(subj_dir, ses_id) - out_ses_dir = os.path.join(out_subj_dir, ses_id) - - # print('SubjectId: ' + subjId + ' ======> Session: ' + sesId) - # Copying the anat folder - if os.path.isdir(ses_dir): - anat_fold = os.path.join(ses_dir, "dwi") - - try: - # Creating destination directory - subprocess.run( - ["mkdir", "-p", out_ses_dir], - stdout=subprocess.PIPE, - universal_newlines=True, - ) - - # Copying the folder - subprocess.run( - ["cp", "-r", anat_fold, out_ses_dir], - stdout=subprocess.PIPE, - universal_newlines=True, - ) - - except: - fail_sess.append(anat_fold) - if fail_sess: - print("THE PROCESS FAILED TO COMPRESS THE FOLLOWING SESSIONS:") - for i in fail_sess: - print(i) - print(" ") - print("End of the compression process.") - - -def read_fscolorlut(lutFile: str): - """ - Reading freesurfer lut file - @params: - lutFile - Required : FreeSurfer color lut: - """ - # Readind a color LUT file - fid = open(lutFile) - LUT = fid.readlines() - fid.close() - - # Make dictionary of labels - LUT = [row.split() for row in LUT] - st_names = [] - st_codes = [] - cont = 0 - for row in LUT: - if ( - len(row) > 1 and row[0][0] != "#" and row[0][0] != "\\\\" - ): # Get rid of the comments - st_codes.append(int(row[0])) - st_names.append(row[1]) - if cont == 0: - st_colors = np.array([[int(row[2]), int(row[3]), int(row[4])]]) - else: - ctemp = np.array([[int(row[2]), int(row[3]), int(row[4])]]) - st_colors = np.append(st_colors, ctemp, axis=0) - cont = cont + 1 - - return st_codes, st_names, st_colors - - -def _convertluts_freesurfer2fsl(freelut: str, fsllut: str): - """ - Convert FreeSurfer lut file to FSL lut file - @params: - freelut - Required : FreeSurfer color lut: - fsllut - Required : FSL color lut: - """ - - # Reading FreeSurfer color lut - st_codes_lut, st_names_lut, st_colors_lut = read_fscolorlut(freelut) - - lut_lines = [] - for roi_pos, st_code in enumerate(st_codes_lut): - st_name = st_names_lut[roi_pos] - lut_lines.append( - "{:<4} {:>3.5f} {:>3.5f} {:>3.5f} {:<40} ".format( - st_code, - st_colors_lut[roi_pos, 0] / 255, - st_colors_lut[roi_pos, 1] / 255, - st_colors_lut[roi_pos, 2] / 255, - st_name, - ) - ) - - with open(fsllut, "w") as colorLUT_f: - colorLUT_f.write("\n".join(lut_lines)) - - -# Load csv table and save the column participant_id as a list called subj_ids -# csv_file = '/media/HPCdata/Jagruti_DTI_DSI/participants_DSI_DTI.csv' -# import pandas as pd -# df = pd.read_csv(csv_file) -# subj_ids = df['participant_id'].tolist() - -# # Add 'sub-' to the subject ids -# subj_ids = ['sub-' + i for i in subj_ids] - - -# _copy_bids_folder('/media/HPCdata/Mindfulness/', '/media/yaleman/HagmannHDD/Test/',["anat", "dwi"], ["sub-S001"], include_derivatives=True, deriv_dir='/media/HPCdata/Mindfulness/derivatives') -# _uncompress_dicom_session('/media/yaleman/Database/IMAGING-PROJECTS/Dicom') - -# _compress_dicom_session('/media/COSAS/Yasser/Work2Do/ReconVertDatabase/Dicom') - -# atdir = '/media/COSAS/Yasser/Work2Do/ReconVertDatabase/derivatives/chimera-atlases/sub-CHUVA001/ses-V2/anat' -# freelut = os.path.join(atdir, 'sub-CHUVA001_ses-V2_run-1_space-orig_atlas-chimeraBFIIHIFIF_desc-grow0mm_dseg.lut') -# fsllut = '/home/yaleman/BFIIHIFIF.lut' -# _convertluts_freesurfer2fsl(freelut, fsllut) -# -# freelut = os.path.join(atdir, 'sub-CHUVA001_ses-V2_run-1_space-orig_atlas-chimeraHFIIIIFIF_desc-7p1grow0mm_dseg.lut') -# fsllut = '/home/yaleman/HFIIIIFIF.lut' -# _convertluts_freesurfer2fsl(freelut, fsllut) -# -# freelut = os.path.join(atdir, 'sub-CHUVA001_ses-V2_run-1_space-orig_atlas-chimeraLFMIIIFIF_desc-scale1grow0mm_dseg.lut') -# fsllut = '/home/yaleman/LFMIIIFIF.lut' -# _convertluts_freesurfer2fsl(freelut, fsllut) - -# dwifile = '/media/yaleman/Database/LENNARDS/BIDsDataset/sub-LEN0199/ses-20210804175649/dwi/sub-LEN0199_ses-20210804175649_run-1_acq-dtiNdir30_dwi.nii.gz' -# _remove_empty_dwi_Volume(dwifile) - -# freelut = '/media/COSAS/Yasser/Work2Do/ReconVertDatabase/derivatives/chimera-atlases/sub-CHUVA001/ses-V2/anat/sub-CHUVA001_ses-V2_run-1_space-orig_atlas-chimeraLFMIIIFIF_desc-scale5growwm_dseg.lut' -# fsllut = '/home/yaleman/test.fsllut' - -# _uncompress_dicom_session_tosolve('/media/yaleman/Database/IMAGING-PROJECTS/test/Dicom') diff --git a/docs/conf.py b/docs/conf.py index 2353eb8..02f8540 100755 --- a/docs/conf.py +++ b/docs/conf.py @@ -19,7 +19,8 @@ # import os import sys -sys.path.insert(0, os.path.abspath('..')) + +sys.path.insert(0, os.path.abspath("..")) import clabtoolkit @@ -31,22 +32,22 @@ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode'] +extensions = ["sphinx.ext.autodoc", "sphinx.ext.viewcode"] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +source_suffix = ".rst" # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = 'Connectomics Lab Toolkit' +project = "Connectomics Lab Toolkit" copyright = "2024, Yasser Alemán-Gómez" author = "Yasser Alemán-Gómez" @@ -69,10 +70,10 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False @@ -83,7 +84,7 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = 'alabaster' +html_theme = "alabaster" # Theme options are theme-specific and customize the look and feel of a # theme further. For a list of options available for each theme, see the @@ -94,13 +95,13 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # -- Options for HTMLHelp output --------------------------------------- # Output file base name for HTML help builder. -htmlhelp_basename = 'clabtoolkitdoc' +htmlhelp_basename = "clabtoolkitdoc" # -- Options for LaTeX output ------------------------------------------ @@ -109,15 +110,12 @@ # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. # # 'preamble': '', - # Latex figure (float) alignment # # 'figure_align': 'htbp', @@ -127,9 +125,13 @@ # (source start file, target name, title, author, documentclass # [howto, manual, or own class]). latex_documents = [ - (master_doc, 'clabtoolkit.tex', - 'Connectomics Lab Toolkit Documentation', - 'Yasser Alemán-Gómez', 'manual'), + ( + master_doc, + "clabtoolkit.tex", + "Connectomics Lab Toolkit Documentation", + "Yasser Alemán-Gómez", + "manual", + ), ] @@ -138,9 +140,7 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ - (master_doc, 'clabtoolkit', - 'Connectomics Lab Toolkit Documentation', - [author], 1) + (master_doc, "clabtoolkit", "Connectomics Lab Toolkit Documentation", [author], 1) ] @@ -150,13 +150,13 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'clabtoolkit', - 'Connectomics Lab Toolkit Documentation', - author, - 'clabtoolkit', - 'One line description of project.', - 'Miscellaneous'), + ( + master_doc, + "clabtoolkit", + "Connectomics Lab Toolkit Documentation", + author, + "clabtoolkit", + "One line description of project.", + "Miscellaneous", + ), ] - - - diff --git a/setup.cfg b/setup.cfg index d81c55c..74c42a0 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 0.0.3 +current_version = 0.1.0 commit = True tag = True diff --git a/setup.py b/setup.py index 552c355..60aa20c 100644 --- a/setup.py +++ b/setup.py @@ -4,41 +4,41 @@ from setuptools import setup, find_packages -with open('README.rst') as readme_file: +with open("README.rst") as readme_file: readme = readme_file.read() -with open('HISTORY.rst') as history_file: +with open("HISTORY.rst") as history_file: history = history_file.read() -requirements = [ ] +requirements = [] -test_requirements = [ ] +test_requirements = [] setup( author="Yasser Alemán-Gómez", - author_email='yasseraleman@protonmail.com', - python_requires='>=3.6', + author_email="yasseraleman@protonmail.com", + python_requires=">=3.6", classifiers=[ - 'Development Status :: 2 - Pre-Alpha', - 'Intended Audience :: Developers', - 'License :: OSI Approved :: Apache Software License', - 'Natural Language :: English', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', + "Development Status :: 2 - Pre-Alpha", + "Intended Audience :: Developers", + "License :: OSI Approved :: Apache Software License", + "Natural Language :: English", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", ], description="This package contains a set of useful tools for image processing", install_requires=requirements, license="Apache Software License 2.0", - long_description=readme + '\n\n' + history, + long_description=readme + "\n\n" + history, include_package_data=True, - keywords='clabtoolkit', - name='clabtoolkit', - packages=find_packages(include=['clabtoolkit', 'clabtoolkit.*']), - test_suite='tests', + keywords="clabtoolkit", + name="clabtoolkit", + packages=find_packages(include=["clabtoolkit", "clabtoolkit.*"]), + test_suite="tests", tests_require=test_requirements, - url='https://github.com/connectomicslab/clabtoolkit', - version='0.0.3', + url="https://github.com/connectomicslab/clabtoolkit", + version="0.1.0", zip_safe=False, )