Newer
Older
# --- Find the root of the {ext+conn} graphs.
# Roots are the nodes of degree 1 that are to be linked to the soma
soma.graph_roots = ge_.FindGraphsRootWithNodes(soma)
# Add a node "soma" and link it to the root nodes
soma_node = f"S-{int(soma.centroid[0])}-{int(soma.centroid[1])}-{int(soma.centroid[2])}"
soma.skl_graph.add_node(soma_node, soma=True, soma_nfo=soma)
for node in soma.graph_roots.values():
soma.skl_graph.add_edge(node, soma_node, root=True)
if save_images is not None:
nx_.draw_networkx(soma.skl_graph)
pl_.imsave(f"{save_images}\\graph_{name_file}.png")
pl_.close()
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
print(": Done")
elapsed_time = tm_.gmtime(tm_.time() - start_time)
print(f"\nElapsed Time={tm_.strftime('%Hh %Mm %Ss', elapsed_time)}")
# --- Extract features
print('\n--- Features Extraction\n')
# Parameters
if hist_bins_borders_length is None:
number_of_bins_length = int(number_of_bins_length)
bins_length = np_.linspace(hist_min_length, hist_min_length + hist_step_length * number_of_bins_length,
num=number_of_bins_length)
bins_length[-1] = np_.inf
else:
bins_length = np_.array(hist_bins_borders_length)
bins_length[-1] = np_.inf
if hist_bins_borders_curvature is None:
number_of_bins_curvature = int(number_of_bins_curvature)
bins_curvature = np_.linspace(hist_min_curvature,
hist_min_curvature + hist_step_curvature * number_of_bins_curvature,
num=number_of_bins_curvature)
bins_curvature[-1] = np_.inf
else:
bins_curvature = np_.array(hist_bins_borders_curvature)
bins_curvature[-1] = np_.inf
# Pandas dataframe creation with all the measured features
features_df = ge_.ExtractFeaturesInDF(name_file, somas, size_voxel_in_micron, bins_length, bins_curvature,
ext_scales)
# Save the pandas df into .csv
features_df.to_csv(f"{name_dir}\\{name_file}.csv")
#
elapsed_time = tm_.gmtime(tm_.time() - start_time)
print(f"\nElapsed Time={tm_.strftime('%Hh %Mm %Ss', elapsed_time)}")
print(f"DONE: {tm_.strftime('%a, %b %d %Y @ %H:%M:%S')}\n")
return features_df
if __name__ == '__main__':
# --- Extract cell graphs and features from microscope images using NutriMorph function.
#
# Differentiate between path to a tiff file or to a repository
if pathlib.Path(data_path).is_file():
# Perform NutriMorph algorithm on the file entered in parameters
features_df = NutriMorph(data_path)
elif pathlib.Path(data_path).is_dir():
# Keep the directory to the repository
name_dir = os_.path.dirname(data_path)
# Initialize the future concatenated features
concatenated_features_df = pd_.DataFrame()
# Find all the tiff files in the parent and child repositories
for path in pathlib.Path(data_path).glob("**/*.tif"):
if path.is_file():
name_file = os_.path.basename(path)
try:
# Perform NutriMorph algorithm
features_df = NutriMorph(path)
# Concatenate all the dataframes
concatenated_features_df = concatenated_features_df.append(features_df)
except:
## TODO /!\ Still errors in the graph = some extensions are tangent to each other.
# Verify Dilatation and Erosion!
## TODO + Error in best fitting ellipsoid : JTJ is singular.
print(f"WARNING: Error in the running of NutriMorph on {name_file}")
# Save to .csv in the parent repository
concatenated_features_df.to_csv(f"{data_path}_features.csv")
# --- TODO Clustering with this df and module cell_clustering.py