From 9606cd0f414715d10d720979e2073cbe4a1b46b5 Mon Sep 17 00:00:00 2001 From: Yasser Aleman Gomez Date: Mon, 11 Mar 2024 00:16:43 +0100 Subject: [PATCH] Major changes in the class Parcellation --- HISTORY.rst | 26 +- clabtoolkit/__init__.py | 2 +- clabtoolkit/freesurfertools.py | 81 --- clabtoolkit/misctools.py | 390 +++++++++++++- clabtoolkit/parcellationtools.py | 841 ++++++++++++++++++++++++++++--- setup.cfg | 2 +- setup.py | 2 +- 7 files changed, 1169 insertions(+), 175 deletions(-) diff --git a/HISTORY.rst b/HISTORY.rst index 529e9bc..1dad104 100644 --- a/HISTORY.rst +++ b/HISTORY.rst @@ -1,8 +1,22 @@ -======= -History -======= +.. :changelog: -0.0.3 (2024-03-07) ------------------- +Release History +--------------- +dev ++++ +- [First stable release in PyPI.] +0.1.0 (2024-03-07) ++++++++++++++++++++ -* First stable release on PyPI. +**Bugfixes and Improvements** +- Correcting problems in the exportation of the color lut files. +- Creating a new method to export the color lut files. +- Correction in the method to export the color lut files as TSV files. + + +0.2.0 (2024-03-10) ++++++++++++++++++++ +**Improvements** +- Development of different methods in the class Parcellation. +- Introduction of some filtering methods. +- Adding a method to add a new parcellation to the parcellation object. diff --git a/clabtoolkit/__init__.py b/clabtoolkit/__init__.py index 4a39905..8ded30b 100644 --- a/clabtoolkit/__init__.py +++ b/clabtoolkit/__init__.py @@ -2,4 +2,4 @@ __author__ = """Yasser Alemán-Gómez""" __email__ = "yasseraleman@protonmail.com" -__version__ = "0.1.0" +__version__ = "0.2.0" diff --git a/clabtoolkit/freesurfertools.py b/clabtoolkit/freesurfertools.py index 9273b98..e6fbe9a 100755 --- a/clabtoolkit/freesurfertools.py +++ b/clabtoolkit/freesurfertools.py @@ -66,33 +66,6 @@ def __init__(self, annot_file: str): self.regtable = reg_table self.regnames = reg_names - def _correct_names(self, prefix: str = None, sufix: str = None, lower: bool = False): - """ - Correcting region names - @params: - prefix - Optional : Add prefix to the region names: - sufix - Optional : Add sufix to the region names: - lower - Optional : Lower the region names. Default is False: - """ - - # Add prefix and sufix to the region names - if prefix is not None: - # If temp_name do not starts with ctx- then add it - self.regnames = [ - name if name.startswith(prefix) else prefix + "{}".format(name) - for name in self.regnames - ] - - if sufix is not None: - # If temp_name do not ends with - then add it - self.regnames = [ - name if name.endswith(sufix) else "{}".format(name) + sufix - for name in self.regnames - ] - - if lower: - self.regnames = [name.lower() for name in self.regnames] - def _save_annotation(self, out_file: str = None): """ Save the annotation file @@ -458,61 +431,7 @@ def _remove_fsaverage_links(linkavg_folder: str): os.remove(linkavg_folder) -def _read_fscolorlut(lutFile: str): - """ - Reading freesurfer lut file - @params: - lutFile - Required : FreeSurfer color lut: - """ - # Readind a color LUT file - fid = open(lutFile) - LUT = fid.readlines() - fid.close() - - # Make dictionary of labels - LUT = [row.split() for row in LUT] - st_names = [] - st_codes = [] - cont = 0 - for row in LUT: - if ( - len(row) > 1 and row[0][0] != "#" and row[0][0] != "\\\\" - ): # Get rid of the comments - st_codes.append(int(row[0])) - st_names.append(row[1]) - if cont == 0: - st_colors = np.array([[int(row[2]), int(row[3]), int(row[4])]]) - else: - ctemp = np.array([[int(row[2]), int(row[3]), int(row[4])]]) - st_colors = np.append(st_colors, ctemp, axis=0) - cont = cont + 1 - return st_codes, st_names, st_colors -def _convertluts_freesurfer2fsl(freelut: str, fsllut: str): - """ - Convert FreeSurfer lut file to FSL lut file - @params: - freelut - Required : FreeSurfer color lut: - fsllut - Required : FSL color lut: - """ - - # Reading FreeSurfer color lut - st_codes_lut, st_names_lut, st_colors_lut = _read_fscolorlut(freelut) - - lut_lines = [] - for roi_pos, st_code in enumerate(st_codes_lut): - st_name = st_names_lut[roi_pos] - lut_lines.append( - "{:<4} {:>3.5f} {:>3.5f} {:>3.5f} {:<40} ".format( - st_code, - st_colors_lut[roi_pos, 0] / 255, - st_colors_lut[roi_pos, 1] / 255, - st_colors_lut[roi_pos, 2] / 255, - st_name, - ) - ) - with open(fsllut, "w") as colorLUT_f: - colorLUT_f.write("\n".join(lut_lines)) diff --git a/clabtoolkit/misctools.py b/clabtoolkit/misctools.py index 5f2c96f..381f75e 100755 --- a/clabtoolkit/misctools.py +++ b/clabtoolkit/misctools.py @@ -1,3 +1,8 @@ +import numpy as np +from typing import Union +import shlex +import os + # Print iterations progress def _printprogressbar( iteration, @@ -30,7 +35,10 @@ def _printprogressbar( print() -def _rgb2hex(r, g, b): +def _rgb2hex(r:int, + g:int, + b:int): + """ Function to convert rgb to hex @@ -52,8 +60,38 @@ def _rgb2hex(r, g, b): return "#{:02x}{:02x}{:02x}".format(r, g, b) +def _multi_rgb2hex(colors: Union[list, np.ndarray]): + """ + Function to convert rgb to hex for an array of colors + + Parameters + ---------- + colors : list or numpy array + List of rgb colors + + Returns + ------- + hexcodes: list + List of hexadecimal codes for the colors -def _hex2rgb(hexcode): + """ + + # If all the values in the list are between 0 and 1, then the values are multiplied by 255 + colors = _readjust_colors(colors) + + hexcodes = [] + if isinstance(colors, list): + for color in colors: + hexcodes.append(_rgb2hex(color[0], color[1], color[2])) + + elif isinstance(colors, np.ndarray): + nrows, ncols = colors.shape + for i in np.arange(0, nrows): + hexcodes.append(_rgb2hex(colors[i, 0], colors[i, 1], colors[i, 2])) + + return hexcodes + +def _hex2rgb(hexcode: str): """ Function to convert hex to rgb @@ -68,5 +106,351 @@ def _hex2rgb(hexcode): Tuple with the rgb values """ + # Convert hexadecimal color code to RGB values + hexcode = hexcode.lstrip('#') + return tuple(int(hexcode[i:i+2], 16) for i in (0, 2, 4)) + +def _multi_hex2rgb(hexcodes: list): + """ + Function to convert hex to rgb for an array of colors + + Parameters + ---------- + hexcodes : list + List of hexadecimal codes for the colors + + Returns + ------- + rgb_list: np.array + Array of rgb values + + """ + + rgb_list = [_hex2rgb(hex_color) for hex_color in hexcodes] + return np.array(rgb_list) + +def _build_indexes(range_vector: list): + """ + Function to build the indexes from a range vector. The range vector can contain integers, tuples, lists or strings. + + For example: + range_vector = [1, (2, 5), [6, 7], "8-10", "11:13", "14:2:22"] + + In this example the tuple (2, 5) will be converted to [2, 3, 4, 5] + The list [6, 7] will be kept as it is + The string "8-10" will be converted to [8, 9, 10] + The string "11:13" will be converted to [11, 12, 13] + The string "14:2:22" will be converted to [14, 16, 18, 20, 22] + + All this values will be flattened and unique values will be returned. + In this case the output will be [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16, 18, 20, 22] + + Parameters + ---------- + range_vector : list + List of ranges + + Returns + ------- + indexes: list + List of indexes + + """ + + indexes = [] + for i in range_vector: + if isinstance(i, tuple): + + # Apend list from the minimum to the maximum value + indexes.append(list(range(i[0], i[1]+1))) + + elif isinstance(i, int): + # Append the value as an integer + indexes.append([i]) + + elif isinstance(i, list): + # Append the values in the values in the list + indexes.append(i) + + elif isinstance(i, str): + + # Find if the strin contains "-" or ":" + if "-" in i: + # Split the string by the "-" + i = i.split("-") + indexes.append(list(range(int(i[0]), int(i[1])+1))) + elif ":" in i: + # Split the string by the ":" + i = i.split(":") + if len(i) == 2: + indexes.append(list(range(int(i[0]), int(i[1])+1))) + elif len(i) == 3: + + # Append the values in the range between the minimum to the maximum value of the elements of the list with a step + indexes.append(list(range(int(i[0]), int(i[2])+1, int(i[1])))) + + else: + + try: + # Append the value as an integer + indexes.append([int(i)]) + except: + pass + + + indexes = [item for sublist in indexes for item in sublist] + + # Remove the elements with 0 + indexes = [x for x in indexes if x != 0] + + # Flatten the list and unique the values + indexes = _remove_duplicates(indexes) + + return indexes + +def _remove_duplicates(input_list: list): + """ + Function to remove duplicates from a list while preserving the order + + Parameters + ---------- + input_list : list + List of elements + + Returns + ------- + unique_list: list + List of unique elements + + """ + + + unique_list = [] + seen_elements = set() + + for element in input_list: + if element not in seen_elements: + unique_list.append(element) + seen_elements.add(element) + + return unique_list + +def _readjust_colors(colors: Union[list, np.ndarray]): + """ + Function to readjust the colors to the range 0-255 + + Parameters + ---------- + colors : list or numpy array + List of colors + + Returns + ------- + colors: Numpy array + List of colors normalized + + """ + + if isinstance(colors, list): + + # If all the values in the list are between 0 and 1, then the values are multiplied by 255 + if max(max(colors)) <= 1: + colors = [color * 255 for color in colors] + + elif isinstance(colors, np.ndarray): + nrows, ncols = colors.shape + + # If all the values in the array are between 0 and 1, then the values are multiplied by 255 + if np.max(colors) <= 1: + colors = colors * 255 + + return colors + +def _create_random_colors(n: int): + """ + Function to create a list of n random colors + + Parameters + ---------- + n : int + Number of colors + + Returns + ------- + colors: list + List of random colors + + """ + + # Create a numpy array with n random colors in the range 0-255 + colors = np.random.randint(0, 255, size=(n, 3)) + + return colors + +def _correct_names(regnames: list, + prefix: str = None, + sufix: str = None, + lower: bool = False, + remove: list = None, + replace: list = None): + + """ + Correcting region names + @params: + regnames - Required : List of region names: + prefix - Optional : Add prefix to the region names: + sufix - Optional : Add sufix to the region names: + lower - Optional : Lower the region names. Default is False: + remove - Optional : Remove the substring item from the region names: + replace - Optional : Replace the substring item from the region names: + """ + + # Add prefix to the region names + if prefix is not None: + # If temp_name do not starts with ctx- then add it + regnames = [ + name if name.startswith(prefix) else prefix + "{}".format(name) + for name in regnames + ] + + # Add sufix to the region names + if sufix is not None: + # If temp_name do not ends with - then add it + regnames = [ + name if name.endswith(sufix) else "{}".format(name) + sufix + for name in regnames + ] + + # Lower the region names + if lower: + regnames = [name.lower() for name in regnames] + + # Remove the substring item from the region names + if remove is not None: + + for item in remove: + + # Remove the substring item from the region names + regnames = [name.replace(item, "") for name in regnames] + + # Replace the substring item from the region names + if replace is not None: + + for item in replace: + + # Replace the substring item from the region names + regnames = [name.replace(item[0], item[1]) for name in regnames] + + return regnames + + +def _my_ismember(a, b): + """ + Function to check if elements of a are in b + + Parameters + ---------- + a : list + List of elements to check + b : list + List of elements to check against + + Returns + ------- + values: list + List of unique elements in a + idx: list + List of indices of elements in a that are in b + + """ + + values, indices = np.unique(a, return_inverse=True) + is_in_list = np.isin(a, b) + idx = indices[is_in_list].astype(int) + + return values, idx + +def _generate_container_command(bash_args, technology:str = "local", image_path:str = None): + """ + This function generates the command to run a bash command inside a container + + Parameters + ---------- + bash_args : list + List of arguments for the bash command + + technology : str + Container technology ("docker" or "singularity"). Default is "local" + + image_path : str + Path to the container image. Default is None + + Returns + ------- + container_cmd: list + List with the command to run the bash command locally or inside the container + + """ + + # Checks if the variable "a_list" is a list + if isinstance(bash_args, str): + bash_args = shlex.split(bash_args) + + + container_cmd = [] + # Creating the container command + if technology == "singularity": # Using Singularity technology + container_cmd.append('singularity') # singularity command + container_cmd.append('run') + + # Checking if the arguments are files or directories + bind_mounts = [] + + for arg in bash_args: # Checking if the arguments are files or directories + abs_arg_path = os.path.dirname(arg) + if os.path.exists(abs_arg_path): + bind_mounts.append(abs_arg_path) # Adding the argument to the bind mounts + + if bind_mounts: # Adding the bind mounts to the container command + for mount_path in bind_mounts: + container_cmd.extend(['--bind', f'{mount_path}:{mount_path}']) + + # Adding the container image path and the bash command arguments + if image_path is not None: + if not os.path.exists(image_path): + raise ValueError(f"The container image {image_path} does not exist.") + else: + raise ValueError("The image path is required for Singularity containerization.") + + container_cmd.append(image_path) + container_cmd.extend(bash_args) + + # Using Docker technology + elif technology == "docker": + container_cmd.append('docker') # docker command + container_cmd.append('run') + + for arg in bash_args: # Checking if the arguments are files or directories + abs_arg_path = os.path.dirname(arg) + if os.path.exists(abs_arg_path): + bind_mounts.append(abs_arg_path) # Adding the argument to the bind mounts + + if bind_mounts: # Adding the bind mounts to the container command + for mount_path in bind_mounts: + container_cmd.extend(['-v', f'{mount_path}:{mount_path}']) + + # Adding the container image path and the bash command arguments + if image_path is not None: + if not os.path.exists(image_path): + raise ValueError(f"The container image {image_path} does not exist.") + else: + raise ValueError("The image path is required for Docker containerization.") + + container_cmd.append(image_path) + container_cmd.extend(bash_args) + + else: # No containerization + container_cmd = bash_args + - return tuple(map(ord, hexcode[1:].decode("hex"))) + return container_cmd \ No newline at end of file diff --git a/clabtoolkit/parcellationtools.py b/clabtoolkit/parcellationtools.py index 0c2ba88..542b281 100644 --- a/clabtoolkit/parcellationtools.py +++ b/clabtoolkit/parcellationtools.py @@ -1,89 +1,766 @@ +import os +from datetime import datetime + import numpy as np import pandas as pd +import nibabel as nib +from typing import Union import clabtoolkit.misctools as cltmisc +class Parcellation: + + def __init__(self, + parc_file: Union[str, np.uint] = None, + affine:np.float_ = None): + + self.parc_file = parc_file + + if parc_file is not None: + if isinstance(parc_file, str): + if os.path.exists(parc_file): + + temp_iparc = nib.load(parc_file) + affine = temp_iparc.affine + self.data = temp_iparc.get_fdata() + self.affine = affine + self.dtype = temp_iparc.get_data_dtype() + + elif isinstance(parc_file, np.ndarray): + self.data = parc_file + self.affine = affine + + # Detect minimum and maximum labels + self._parc_range() + + def _keep_by_code(self, + codes2look: Union[list, np.ndarray], + rearrange: bool = False): + """ + Filter the parcellation by a list of codes. It will keep only the structures with codes specified in the list. + @params: + codes2look - Required : List of codes to look for: + rearrange - Required : If True, the parcellation will be rearranged starting from 1. Default = False + """ + + # Convert the codes2look to a numpy array + if isinstance(codes2look, list): + codes2look = cltmisc._build_indexes(codes2look) + codes2look = np.array(codes2look) + + # Create + dims = np.shape(self.data) + out_atlas = np.zeros((dims[0], dims[1], dims[2]), dtype='int16') + + if hasattr(self, "index"): + temp_index = np.array(self.index) + index_new = [] + indexes = [] + + cont = 0 + + for i, v in enumerate(codes2look): + + # Find the elements in the data that are equal to v + result = np.where(self.data == v) + + if len(result[0]) > 0: + cont = cont + 1 + + if hasattr(self, "index"): + # Find the element in self.index that is equal to v + ind = np.where(temp_index == v)[0] + + if len(ind) > 0: + indexes.append(ind[0]) + if rearrange: + index_new.append(cont) + else: + index_new.append(self.index[ind[0]]) + + if rearrange: + out_atlas[result[0], result[1], result[2]] = cont + + else: + out_atlas[result[0], result[1], result[2]] = v + + + # Find the indexes of the elements in a that are also in b + # If index is an attribute of self + if hasattr(self, "index"): + self.index = index_new + + # If name is an attribute of self + if hasattr(self, "name"): + self.name = [self.name[i] for i in indexes] + + # If color is an attribute of self + if hasattr(self, "color"): + self.color = self.color[indexes] + + self.data = out_atlas + + # Detect minimum and maximum labels + self._parc_range() + + + def _remove_by_code(self, + codes2remove: Union[list, np.ndarray], + rearrange: bool = False): + """ + Remove the structures with the codes specified in the list. + @params: + codes2remove - Required : List of codes to remove: + rearrange - Required : If True, the parcellation will be rearranged starting from 1. Default = False + """ + + if isinstance(codes2remove, list): + codes2look = cltmisc._build_indexes(codes2look) + codes2remove = np.array(codes2remove) + + for i, v in enumerate(codes2remove): + # Find the elements in the data that are equal to v + result = np.where(self.data == v) + + if len(result[0]) > 0: + self.data[result[0], result[1], result[2]] = 0 + + st_codes = np.unique(self.data) + st_codes = st_codes[st_codes != 0] + + # If rearrange is True, the parcellation will be rearranged starting from 1 + if rearrange: + self._keep_by_code(codes2look=st_codes, rearrange=True) + else: + self._keep_by_code(codes2look=st_codes, rearrange=False) + + # Detect minimum and maximum labels + self._parc_range() + + def _mask_by_code(self, + codes2mask: Union[list, np.ndarray], + mask_type: str = 'upright' + ): + """ + Mask the structures with the codes specified in the list or array codes2mask. + @params: + codes2mask - Required : List of codes to mask: + mask_type - Optional : Mask type: 'upright' or 'inverted'. Default = upright + """ + mask_type.lower() + if mask_type not in ['upright', 'inverted']: + raise ValueError("The mask_type must be 'upright' or 'inverted'") + + if isinstance(codes2mask, list): + codes2look = cltmisc._build_indexes(codes2look) + codes2mask = np.array(codes2mask) + + if mask_type == 'inverted': + self.data[np.isin(self.data, codes2mask)==True] = 0 + + else: + self.data[np.isin(self.data, codes2mask)==False] = 0 + + # Detect minimum and maximum labels + self._parc_range() + + def _group_by_code(self, + codes2group: Union[list, np.ndarray], + new_codes: Union[list, np.ndarray] = None, + new_names: Union[list, str] = None, + new_colors: Union[list, np.ndarray] = None): + """ + Group the structures with the codes specified in the list or array codes2group. + @params: + codes2group - Required : List, numpy array or list of list of codes to group: + new_codes - Optional : New codes for the groups. It can assign new codes + otherwise it will assign the codes from 1 to number of groups: + new_names - Optional : New names for the groups: + new_colors - Optional : New colors for the groups: + + """ + + # Detect thecodes2group is a list of list + if isinstance(codes2group, list): + if isinstance(codes2group[0], list): + n_groups = len(codes2group) + + elif isinstance(codes2group[0], str) or isinstance(codes2group[0], int) or isinstance(codes2group[0], tuple): + codes2group = [codes2group] + n_groups = 1 + + elif isinstance(codes2group, np.ndarray): + codes2group = codes2group.tolist() + n_groups = 1 + + for i, v in enumerate(codes2group): + if isinstance(v, list): + codes2group[i] = cltmisc._build_indexes(v) + + # Convert the new_codes to a numpy array + if new_codes is not None: + if isinstance(new_codes, list): + new_codes = cltmisc._build_indexes(new_codes) + new_codes = np.array(new_codes) + elif isinstance(new_codes, int): + new_codes = np.array([new_codes]) + + elif new_codes is None: + new_codes = np.arange(1, n_groups + 1) + + if len(new_codes) != n_groups: + raise ValueError("The number of new codes must be equal to the number of groups that will be created") + + # Convert the new_names to a list + if new_names is not None: + if isinstance(new_names, str): + new_names = [new_names] + + if len(new_names) != n_groups: + raise ValueError("The number of new names must be equal to the number of groups that will be created") + + # Convert the new_colors to a numpy array + if new_colors is not None: + if isinstance(new_colors, list): + + if isinstance(new_colors[0], str): + new_colors = cltmisc._multi_hex2rgb(new_colors) + + elif isinstance(new_colors[0], np.ndarray): + new_colors = np.array(new_colors) + + else: + raise ValueError("If new_colors is a list, it must be a list of hexadecimal colors or a list of rgb colors") + + elif isinstance(new_colors, np.ndarray): + pass + + else: + raise ValueError("The new_colors must be a list of colors or a numpy array") + + new_colors = cltmisc._readjust_colors(new_colors) + + if new_colors.shape[0] != n_groups: + raise ValueError("The number of new colors must be equal to the number of groups that will be created") + + # Creating the grouped parcellation + out_atlas = np.zeros_like(self.data, dtype='int16') + for i in range(n_groups): + code2look = np.array(codes2group[i]) + + if new_codes is not None: + out_atlas[np.isin(self.data, code2look)==True] = new_codes[i] + else: + out_atlas[np.isin(self.data, code2look)==True] = i + 1 + + self.data = out_atlas + + if new_codes is not None: + self.index = new_codes.tolist() + + if new_names is not None: + self.name = new_names + else: + # If new_names is not provided, the names will be created + self.name = ["group_{}".format(i) for i in new_codes] + + if new_colors is not None: + self.color = new_colors + else: + # If new_colors is not provided, the colors will be created + self.color = cltmisc._create_random_colors(n_groups) + + + # Detect minimum and maximum labels + self._parc_range() + + def _rearange_parc(self, offset: int = 0): + """ + Rearrange the parcellation starting from 1 + @params: + offset - Optional : Offset to start the rearrangement. Default = 0 + """ + + st_codes = np.unique(self.data) + st_codes = st_codes[st_codes != 0] + self._keep_by_code(codes2look=st_codes, rearrange=True) + + if offset != 0: + self.index = [x + offset for x in self.index] + + self._parc_range() + + def _add_parcellation(self, + parc2add): + """ + Add a parcellation + @params: + parc2add - Required : Parcellation to add: + """ + if isinstance(parc2add, Parcellation): + parc2add = [parc2add] + + if isinstance(parc2add, list): + if len(parc2add) > 0: + for parc in parc2add: + if isinstance(parc, Parcellation): + ind = np.where(parc.data != 0) + parc.data[ind] = parc.data[ind] + self.maxlab + self.data[ind] = self.data[ind] + parc.data[ind] + + if hasattr(self, "index") and hasattr(self, "name") and hasattr(self, "color"): + parc.index = [x + self.maxlab for x in parc.index] + + self.index = self.index + parc.index + self.name = self.name + parc.name + self.color = np.concatenate((self.color, parc.color), axis=0) + else: + raise ValueError("The list is empty") + + # Detect minimum and maximum labels + self._parc_range() + + def _save_parcellation(self, + out_file: str, + affine: np.float_ = None, + save_lut: bool = False, + save_tsv: bool = False): + """ + Save the parcellation to a file + @params: + out_file - Required : Output file: + affine - Optional : Affine matrix. Default = None + """ + + if affine is None: + affine = self.affine + + out_atlas = nib.Nifti1Image(self.data, affine) + nib.save(out_atlas, out_file) -def _parc_tsv_table(codes, names, colors, tsv_filename): - """ - Function to create a tsv table for parcellation - - Parameters - ---------- - codes : list - List of codes for the parcellation - names : list - List of names for the parcellation - colors : list - List of colors for the parcellation - tsv_filename : str - Name of the tsv file - - Returns - ------- - tsv_df: pandas DataFrame - DataFrame with the tsv table - - """ - - # Table for parcellation - # 1. Converting colors to hexidecimal string - seg_hexcol = [] - nrows, ncols = colors.shape - for i in np.arange(0, nrows): - seg_hexcol.append(cltmisc._rgb2hex(colors[i, 0], colors[i, 1], colors[i, 2])) - - tsv_df = pd.DataFrame( - {"index": np.asarray(codes), "name": names, "color": seg_hexcol} - ) - # print(bids_df) - # Save the tsv table - with open(tsv_filename, "w+") as tsv_file: - tsv_file.write(tsv_df.to_csv(sep="\t", index=False)) - - return tsv_df - - -def _tissue_seg_table(tsv_filename): - """ - Function to create a tsv table for tissue segmentation - - Parameters - ---------- - tsv_filename : str - Name of the tsv file - - Returns - ------- - seg_df: pandas DataFrame - DataFrame with the tsv table - - """ - - # Table for tissue segmentation - # 1. Default values for tissues segmentation table - seg_rgbcol = np.array([[172, 0, 0], [0, 153, 76], [0, 102, 204]]) - seg_codes = np.array([1, 2, 3]) - seg_names = ["cerebro_spinal_fluid", "gray_matter", "white_matter"] - seg_acron = ["CSF", "GM", "WM"] - - # 2. Converting colors to hexidecimal string - seg_hexcol = [] - nrows, ncols = seg_rgbcol.shape - for i in np.arange(0, nrows): - seg_hexcol.append( - cltmisc._rgb2hex(seg_rgbcol[i, 0], seg_rgbcol[i, 1], seg_rgbcol[i, 2]) + if save_lut: + if hasattr(self, "index") and hasattr(self, "name") and hasattr(self, "color"): + self._export_colortable(out_file=out_file.replace(".nii.gz", ".lut")) + else: + raise ValueError("The parcellation does not contain a color table") + + if save_tsv: + if hasattr(self, "index") and hasattr(self, "name") and hasattr(self, "color"): + self._export_colortable(out_file=out_file.replace(".nii.gz", ".tsv"), lut_type="tsv") + else: + raise ValueError("The parcellation does not contain a color table") + + def _load_colortable(self, + lut_file: Union[str, dict], + lut_type: str = "lut"): + """ + Add a lookup table to the parcellation + @params: + lut_file - Required : Lookup table file. It can be a string with the path to the + file or a dictionary containing the keys 'index', 'color' and 'name': + lut_type - Optional : Type of the lut file: 'lut' or 'tsv'. Default = 'lut' + """ + + if isinstance(lut_file, str): + if os.path.exists(lut_file): + self.lut_file = lut_file + + if lut_type == "lut": + st_codes, st_names, st_colors = self.read_luttable(in_file=lut_file) + + elif lut_type == "tsv": + st_codes, st_names, st_colors = self.read_tsvtable(in_file=lut_file) + + else: + raise ValueError("The lut_type must be 'lut' or 'tsv'") + + self.index = st_codes + self.name = st_names + self.color = st_colors + + else: + raise ValueError("The lut file does not exist") + + elif isinstance(lut_file, dict): + self.lut_file = None + + if "index" not in lut_file.keys() or "color" not in lut_file.keys() or "name" not in lut_file.keys(): + raise ValueError("The dictionary must contain the keys 'index', 'color' and 'name'") + + colors = lut_file["color"] + if isinstance(colors[0], str): + colors = cltmisc._multi_hex2rgb(colors) + + elif isinstance(colors[0], list): + colors = np.array(colors) + + self.index = lut_file["index"] + self.color = colors + self.name = lut_file["name"] + + def _export_colortable(self, + out_file: str, + lut_type: str = "lut"): + """ + Export the lookup table to a file + @params: + out_file - Required : Lookup table file: + lut_type - Optional : Type of the lut file: 'lut' or 'tsv'. Default = 'lut' + """ + + if not hasattr(self, "index") or not hasattr(self, "name") or not hasattr(self, "color"): + raise ValueError("The parcellation does not contain a color table. The index, name and color attributes must be present") + + + if lut_type == "lut": + + now = datetime.now() + date_time = now.strftime("%m/%d/%Y, %H:%M:%S") + headerlines = ['# $Id: {} {} \n'.format(out_file, date_time)] + + if os.path.isfile(self.parc_file): + headerlines.append('# Corresponding parcellation: {} \n'.format(self.parc_file)) + + headerlines.append('{:<4} {:<50} {:>3} {:>3} {:>3} {:>3}'.format("#No.", "Label Name:", "R", "G", "B", "A")) + + self.write_luttable( + self.index, self.name, self.color, out_file, headerlines=headerlines + ) + elif lut_type == "tsv": + self.write_tsvtable( + self.index, self.name, self.color, out_file + ) + else: + raise ValueError("The lut_type must be 'lut' or 'tsv'") + + def _parc_range(self): + """ + Detect the range of labels + + """ + # Detecting the unique elements in the parcellation different from zero + st_codes = np.unique(self.data) + st_codes = st_codes[st_codes != 0] + self.minlab = np.min(st_codes) + self.maxlab = np.max(st_codes) + + @staticmethod + def write_fslcolortable(lut_file_fs: str, + lut_file_fsl: str): + """ + Convert FreeSurfer lut file to FSL lut file + @params: + lut_file_fs - Required : FreeSurfer color lut: + lut_file_fsl - Required : FSL color lut: + """ + + # Reading FreeSurfer color lut + st_codes_lut, st_names_lut, st_colors_lut = Parcellation.read_luttable(lut_file_fs) + + lut_lines = [] + for roi_pos, st_code in enumerate(st_codes_lut): + st_name = st_names_lut[roi_pos] + lut_lines.append( + "{:<4} {:>3.5f} {:>3.5f} {:>3.5f} {:<40} ".format( + st_code, + st_colors_lut[roi_pos, 0] / 255, + st_colors_lut[roi_pos, 1] / 255, + st_colors_lut[roi_pos, 2] / 255, + st_name, + ) + ) + + with open(lut_file_fsl, "w") as colorLUT_f: + colorLUT_f.write("\n".join(lut_lines)) + + @staticmethod + def read_luttable(in_file: str): + """ + Reading freesurfer lut file + @params: + in_file - Required : FreeSurfer color lut: + + Returns + ------- + st_codes: list + List of codes for the parcellation + st_names: list + List of names for the parcellation + st_colors: list + List of colors for the parcellation + + """ + + # Readind a color LUT file + fid = open(in_file) + LUT = fid.readlines() + fid.close() + + # Make dictionary of labels + LUT = [row.split() for row in LUT] + st_names = [] + st_codes = [] + cont = 0 + for row in LUT: + if ( + len(row) > 1 and row[0][0] != "#" and row[0][0] != "\\\\" + ): # Get rid of the comments + st_codes.append(int(row[0])) + st_names.append(row[1]) + if cont == 0: + st_colors = np.array([[int(row[2]), int(row[3]), int(row[4])]]) + else: + ctemp = np.array([[int(row[2]), int(row[3]), int(row[4])]]) + st_colors = np.append(st_colors, ctemp, axis=0) + cont = cont + 1 + + return st_codes, st_names, st_colors + + @staticmethod + def read_tsvtable(in_file: str, + cl_format: str = "rgb"): + """ + Reading tsv table + @params: + in_file - Required : TSV file: + cl_format - Optional : Color format: 'rgb' or 'hex'. Default = 'rgb' + + Returns + ------- + st_codes: list + List of codes for the parcellation + st_names: list + List of names for the parcellation + st_colors: list + List of colors for the parcellation + + """ + + # Read the tsv file + if not os.path.exists(in_file): + raise ValueError("The file does not exist") + + tsv_df = pd.read_csv(in_file, sep="\t") + + st_codes = tsv_df["index"].values + st_names = tsv_df["name"].values + temp_colors = tsv_df["color"].values.tolist() + + if cl_format == "rgb": + st_colors = cltmisc._multi_hex2rgb(temp_colors) + elif cl_format == "hex": + st_colors = temp_colors + + return st_codes, st_names, st_colors + + @staticmethod + def write_luttable(codes:list, + names:list, + colors:Union[list, np.ndarray], + out_file:str, + headerlines: Union[list, str] = None, + boolappend: bool = False, + force: bool = True): + + """ + Function to create a lut table for parcellation + + Parameters + ---------- + codes : list + List of codes for the parcellation + names : list + List of names for the parcellation + colors : list + List of colors for the parcellation + lut_filename : str + Name of the lut file + headerlines : list or str + List of strings for the header lines + + Returns + ------- + out_file: file + Lut file with the table + + """ + + # Check if the file already exists and if the force parameter is False + if os.path.exists(out_file) and not force: + raise ValueError("The file already exists") + + out_dir = os.path.dirname(out_file) + if not os.path.exists(out_dir): + os.makedirs(out_dir) + + happend_bool = True # Boolean to append the headerlines + if headerlines is None: + happend_bool = False # Only add this if it is the first time the file is created + now = datetime.now() + date_time = now.strftime("%m/%d/%Y, %H:%M:%S") + headerlines = ['# $Id: {} {} \n'.format(out_file, date_time), + '{:<4} {:<50} {:>3} {:>3} {:>3} {:>3}'.format("#No.", "Label Name:", "R", "G", "B", "A")] + + elif isinstance(headerlines, str): + headerlines = [headerlines] + + elif isinstance(headerlines, list): + pass + + else: + raise ValueError("The headerlines parameter must be a list or a string") + + if boolappend: + if not os.path.exists(out_file): + raise ValueError("The file does not exist") + else: + with open(out_file, "r") as file: + luttable = file.readlines() + + luttable = [l.strip('\n\r') for l in luttable] + luttable = ["\n" if element == "" else element for element in luttable] + + + if happend_bool: + luttable = luttable + headerlines + + else: + luttable = headerlines + + # Table for parcellation + for roi_pos, roi_name in enumerate(names): + luttable.append('{:<4} {:<50} {:>3} {:>3} {:>3} {:>3}'.format(codes[roi_pos], + names[roi_pos], + colors[roi_pos,0], + colors[roi_pos,1], + colors[roi_pos,2], 0)) + luttable.append('\n') + + # Save the lut table + with open(out_file, 'w') as colorLUT_f: + colorLUT_f.write('\n'.join(luttable)) + + return out_file + + @staticmethod + def write_tsvtable(codes:list, + names:list, + colors:Union[list, np.ndarray], + out_file:str, + boolappend: bool = False, + force: bool = False): + """ + Function to create a tsv table for parcellation + + Parameters + ---------- + codes : list + List of codes for the parcellation + names : list + List of names for the parcellation + colors : list + List of colors for the parcellation + tsv_filename : str + Name of the tsv file + + Returns + ------- + tsv_file: file + Tsv file with the table + + """ + + # Check if the file already exists and if the force parameter is False + if os.path.exists(out_file) and not force: + raise ValueError("The TSV file already exists") + + out_dir = os.path.dirname(out_file) + if not os.path.exists(out_dir): + os.makedirs(out_dir) + + # Table for parcellation + # 1. Converting colors to hexidecimal string + seg_hexcol = cltmisc._multi_rgb2hex(colors) + + if boolappend: + if not os.path.exists(out_file): + raise ValueError("The file does not exist") + else: + tsv_df = pd.read_csv(out_file, sep="\t") + tsv_df = tsv_df.append( + {"index": np.asarray(codes), "name": names, "color": seg_hexcol} + ) + else: + tsv_df = pd.DataFrame( + {"index": np.asarray(codes), "name": names, "color": seg_hexcol} ) + # Save the tsv table + with open(out_file, "w+") as tsv_file: + tsv_file.write(tsv_df.to_csv(sep="\t", index=False)) + + return tsv_file + + @staticmethod + def tissue_seg_table(tsv_filename): + """ + Function to create a tsv table for tissue segmentation + + Parameters + ---------- + tsv_filename : str + Name of the tsv file + + Returns + ------- + seg_df: pandas DataFrame + DataFrame with the tsv table + + """ + + # Table for tissue segmentation + # 1. Default values for tissues segmentation table + seg_rgbcol = np.array([[172, 0, 0], [0, 153, 76], [0, 102, 204]]) + seg_codes = np.array([1, 2, 3]) + seg_names = ["cerebro_spinal_fluid", "gray_matter", "white_matter"] + seg_acron = ["CSF", "GM", "WM"] + + # 2. Converting colors to hexidecimal string + seg_hexcol = [] + nrows, ncols = seg_rgbcol.shape + for i in np.arange(0, nrows): + seg_hexcol.append( + cltmisc._rgb2hex(seg_rgbcol[i, 0], seg_rgbcol[i, 1], seg_rgbcol[i, 2]) + ) + + seg_df = pd.DataFrame( + { + "index": seg_codes, + "name": seg_names, + "abbreviation": seg_acron, + "color": seg_hexcol, + } + ) + # Save the tsv table + with open(tsv_filename, "w+") as tsv_file: + tsv_file.write(seg_df.to_csv(sep="\t", index=False)) + + return seg_df + + + def _print_properties(self): + """ + Print the properties of the parcellation + """ + + # Get and print attributes and methods + attributes_and_methods = [attr for attr in dir(self) if not callable(getattr(self, attr))] + methods = [method for method in dir(self) if callable(getattr(self, method))] + + print("Attributes:") + for attribute in attributes_and_methods: + if not attribute.startswith("__"): + print(attribute) - seg_df = pd.DataFrame( - { - "index": seg_codes, - "name": seg_names, - "abbreviation": seg_acron, - "color": seg_hexcol, - } - ) - # Save the tsv table - with open(tsv_filename, "w+") as tsv_file: - tsv_file.write(seg_df.to_csv(sep="\t", index=False)) - - return seg_df + print("\nMethods:") + for method in methods: + if not method.startswith("__"): + print(method) diff --git a/setup.cfg b/setup.cfg index 74c42a0..cf5245f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 0.1.0 +current_version = 0.2.0 commit = True tag = True diff --git a/setup.py b/setup.py index 60aa20c..03dfd0f 100644 --- a/setup.py +++ b/setup.py @@ -39,6 +39,6 @@ test_suite="tests", tests_require=test_requirements, url="https://github.com/connectomicslab/clabtoolkit", - version="0.1.0", + version="0.2.0", zip_safe=False, )