Mentions légales du service

Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • edebreuv/nutrimorph
  • monadal/nutrimorph
2 results
Show changes
Commits on Source (127)
Showing
with 4031 additions and 657 deletions
.idea/
.mypy_cache/
__pycache__/
.*/
_*/
__dll__/
__material__/
__runtime__/
__version__/
__when__.txt
*.so
*.prof
data/
documentation/
result/
mprofile_*.dat
brick/processing/frangi_c/obj/
brick/processing/frangi_c/src/original-formatting/
brick/processing/frangi_c/vesselness.so
brick/vesselness.so
__when__.txt
......@@ -29,37 +29,43 @@
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license and that you accept its terms.
from brick.general.type import array_t
import sys as sstm
from pathlib import Path as path_t
import numpy as np_
from brick.feature.analysis import Main as Analysis
# from brick.io.storage.parameters import NutrimorphArguments
_CSV_FEATURE_FILE = "/home/eric/Code/project/mno/nutrimorph/2-morgane/_result-SKIP/complete-20W/hypothalamus-priority/features.csv"
shifts_of_26_neighbors_c = tuple(
(i, j, k)
for i in (-1, 0, 1)
for j in (-1, 0, 1)
for k in (-1, 0, 1)
if i != 0 or j != 0 or k != 0 # take every direct neighbors except itself
)
# Must be positive and higher (strictly) than the max number of neighbors
invalid_n_neighbors_c = shifts_of_26_neighbors_c.__len__() + 1
def Main():
""""""
if sstm.argv.__len__() < 2:
print("Missing parameter file argument")
sstm.exit(0)
argument = path_t(sstm.argv[1])
if not (argument.is_file() and (argument.suffix.lower() == ".ini")):
print("Wrong parameter file path or type, or parameter file unreadable")
sstm.exit(0)
# config = NutrimorphArguments(argument)
# (
# *_,
# input_cfg,
# output_cfg,
# __,
# ___,
# ) = config
# base_folder = sorted((output_cfg["save_images"] / input_cfg["data_path"].stem).glob("2023*"))[-1]
# output_cfg["save_images"] = base_folder / "image"
# output_cfg["save_csv"] = base_folder / "features"
# output_cfg["save_features_analysis"] = base_folder / "analysis"
def PartLMap(map_: array_t) -> array_t:
#
"""
The part mask is labeled as follows: background=invalid_n_neighbors_c;
Pixels of the skeleton = number of neighboring pixels that belong to the skeleton
(as expected, isolated pixels receive 0).
"""
#
result = np_.array(map_, dtype=np_.int8)
result[result > 0] = 1 # binary map
padded_sm = np_.pad(map_ > 0, 1, "constant") # pad for voxels at the border --> map of booleans / binary mask
# path_1 = output_cfg["save_csv"]
# path_2 = output_cfg["save_features_analysis"]
# print(base_folder, path_1, path_2, sep="\n")
Analysis(_CSV_FEATURE_FILE, path_t(_CSV_FEATURE_FILE).parent / "analysis")#str(path_1 / _CSV_FEATURE_FILE), path_2)
for shifts in shifts_of_26_neighbors_c:
result += np_.roll(padded_sm, shifts, axis=(0, 1, 2))[1:-1, 1:-1, 1:-1]
result[map_ == 0] = invalid_n_neighbors_c + 1
return result - 1
if __name__ == "__main__":
#
Main()
import inspect as nspt
import pathlib
import sys as sstm
from types import FunctionType as function_t
from types import MethodType as method_t
import __main__ as main_package
from memory_profiler import profile as Profiled
def MemoryProfileCallables(module: str) -> None:
"""
module: typically, __name__
"""
module_name = module
module = sstm.modules[module]
functions = (
(_nme, _elm)
for _nme, _elm in nspt.getmembers(module)
if (isinstance(_elm, (function_t, method_t)) and _elm.__module__ == module_name)
)
memory_profile_report = open(
pathlib.Path(main_package.__file__).parent / "memory_profiler.txt", "w+"
)
profile = lambda _fct: Profiled(_fct, stream=memory_profile_report)
for name, function in functions:
setattr(module, name, profile(function))
# Copyright CNRS/Inria/UNS
# Contributor(s): Eric Debreuve (since 2019), Morgane Nadal (2020)
#
# eric.debreuve@cnrs.fr
#
# This software is governed by the CeCILL license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/ or redistribute the software under the terms of the CeCILL
# license as circulated by CEA, CNRS and INRIA at the following URL
# "http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards their
# requirements in conditions enabling the security of their systems and/or
# data to be ensured and, more generally, to use and operate it in the
# same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license and that you accept its terms.
import brick.processing.dijkstra_1_to_n as dk_
from brick.component.extension import extension_t
from brick.component.glial_cmp import glial_cmp_t
from brick.component.soma import soma_t
from brick.general.type import array_t, site_h, site_path_h
import itertools as it_
from typing import Callable, Sequence, Tuple
import numpy as np_
import matplotlib.pyplot as pl_
import skimage
def CandidateConnections(
somas: Sequence[soma_t],
influence_map: array_t,
dist_to_closest: array_t,
extensions: Sequence[extension_t],
max_straight_sq_dist: float = np_.inf,
) -> list:
#
candidate_conn_nfo = [] # conn=connection
extensions = filter(lambda ext: ext.is_unconnected, extensions)
for soma, extension in it_.product(somas, extensions):
new_candidates = extension.EndPointsForSoma(soma.uid, influence_map)
candidate_conn_nfo.extend(
(ep_idx, soma, extension, end_point)
for ep_idx, end_point in enumerate(new_candidates)
if dist_to_closest[end_point] <= max_straight_sq_dist
)
candidate_conn_nfo.sort(key=lambda elm: dist_to_closest[elm[3]])
return candidate_conn_nfo
def ShortestPathFromToN(
point: site_h,
costs: array_t,
candidate_points_fct: Callable,
max_straight_sq_dist: float = np_.inf,
) -> Tuple[site_path_h, float]:
#
candidate_points, candidate_indexing = candidate_points_fct(
point, max_straight_sq_dist
)
if candidate_points is None:
return (), np_.inf
costs[point] = 0.0
costs[candidate_indexing] = 0.0
# pl_.imshow(costs[10,:,:])
# pl_.show(block=True)
path, length = dk_.DijkstraShortestPath(costs, point, candidate_points)
# print(skimage.graph.route_through_array(image, point, candidate_points))
# We could use here skimage.graph.route_through_array(array, start, end, fully_connected=True, geometric=True)
# -> (path: list, cost: float)
costs[point] = np_.inf
costs[candidate_indexing] = np_.inf
return path, length
def ValidateConnection(
glial_cmp: glial_cmp_t, extension: glial_cmp_t, end_point: tuple, ep_idx: int, dijkstra_path: site_path_h, costs: array_t
) -> None:
#
connection_path = tuple(zip(*dijkstra_path[1:-1]))
if connection_path.__len__() == 0:
connection_path = None
glial_cmp.connection_path[extension.uid] = connection_path
glial_cmp.extensions.append(extension)
extension.BackReferenceSoma(glial_cmp)
# Add the site of the connexion between the extension and the soma, for each soma and for ech extension
if type(glial_cmp) is soma_t: # restrain the verification to the soma <-> ext step
glial_cmp.ext_roots.append(end_point)
# TODO: delete one of this info (ep_idx or end_point) according to the code following and its needs
# TODO: Ideally, these paths should be dilated + put this outside
# but in ext-ext connections, there must not be dilation around the current ext
# (current ext plays the role of a soma in soma-ext step)
if connection_path is not None:
costs[connection_path] = np_.inf
# Copyright CNRS/Inria/UNS
# Contributor(s): Eric Debreuve (since 2019), Morgane Nadal (2020)
#
# eric.debreuve@cnrs.fr
#
# This software is governed by the CeCILL license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/ or redistribute the software under the terms of the CeCILL
# license as circulated by CEA, CNRS and INRIA at the following URL
# "http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards their
# requirements in conditions enabling the security of their systems and/or
# data to be ensured and, more generally, to use and operate it in the
# same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license and that you accept its terms.
import itertools
import sys as sy_
from pathlib import Path as path_t
from typing import List, Sequence
import matplotlib.pyplot as pl_
import numpy as nmpy
import pandas as pd_
import scipy as si_
import seaborn as sb_
from logger_36 import LOGGER
from matplotlib.lines import Line2D
from sklearn import tree
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
def PCAOnDF(
df: pd_.DataFrame,
target: str,
targets: Sequence[str],
colors: List[str],
save_name: str = None,
title: str = "",
plot_duration: bool = True,
save_in: str = None,
three_D: bool = False,
):
"""
Perform 2D or 3D PCA on the CHO-DIO dataframe.
Save explained variance ratio as csv, plot and save the PCAs.
"""
# Separating the features from their conditions and durations
all_target = pd_.DataFrame(df.loc[:, [target]].values, columns=[target])
df_all = df.drop([target, "duration"], axis=1)
# Standardize the data
scaler = StandardScaler()
scaler.fit(df_all)
stand_df = scaler.transform(df_all)
# Create the PCA and fit the data
pca = PCA(n_components=2)
principal_components = pca.fit_transform(stand_df)
# Printing and saving the explained variance ratio
LOGGER.info(
f"PCA explained variance ratio ({save_name}): {pca.explained_variance_ratio_}"
)
pca_exp_var_df = pd_.DataFrame(
pca.explained_variance_ratio_, columns=["pca explained variance ration"]
)
pca_exp_var_df = pca_exp_var_df.rename(
index={0: "principal component 1", 1: "principal component 2"}
)
pca_exp_var_df.to_csv(
str(path_t(save_in) / f"pca_explained_variance_ratio_{save_name}.csv")
)
# Creating the principal component df
principal_df = pd_.DataFrame(
data=principal_components,
columns=["principal component 1", "principal component 2"],
)
# Give the final df containing the principal component and their condition
final_df = pd_.concat([principal_df, all_target[target]], axis=1)
# Plot 2D
fig = pl_.figure(figsize=(8, 8))
ax = fig.add_subplot(1, 1, 1)
ax.set_xlabel(
f"Principal Component 1 - ratio = {round(pca.explained_variance_ratio_[0],2)}",
fontsize=15,
)
ax.set_ylabel(
f"Principal Component 2 - ratio = {round(pca.explained_variance_ratio_[1],2)}",
fontsize=15,
)
ax.set_title(f"2 component PCA{title}", fontsize=20)
for tgt, color in zip(targets, colors):
idx = final_df[target] == tgt
ax.scatter(
final_df.loc[idx, "principal component 1"],
final_df.loc[idx, "principal component 2"],
c=color,
s=30,
)
ax.legend(targets)
ax.grid()
if save_name is not None:
pl_.savefig(str(path_t(save_in) / f"PCA_{save_name}.png"))
new_tgts = None
if plot_duration:
# Make separated plots for each duration of the experiments
fig = pl_.figure(figsize=(8, 8))
ax = fig.add_subplot(1, 1, 1)
ax.set_xlabel(
f"Principal Component 1 - ratio = {round(pca.explained_variance_ratio_[0],2)}",
fontsize=15,
)
ax.set_ylabel(
f"Principal Component 2 - ratio = {round(pca.explained_variance_ratio_[1],2)}",
fontsize=15,
)
ax.set_title(f"2 component PCA{title}", fontsize=20)
# new_tgts = [
# ["CHO", "1H"],
# ["CHO", "3H"],
# ["CHO", "6H"],
# ["DIO", "1H"],
# ["DIO", "3H"],
# ["DIO", "6H"],
# ]
final_df = pd_.concat([df[["condition", "duration"]], principal_df], axis=1)
new_tgts = _Experiments(df)
LOGGER.info(f"New Targets: {new_tgts}")
for tgt, color in zip(
new_tgts,
["lavender", "royalblue", "navy", "lightcoral", "firebrick", "red"],
):
new_df = final_df.loc[
(final_df["condition"] == tgt[0]) & (final_df["duration"] == tgt[1])
]
new_df = new_df.drop(["condition", "duration"], axis=1)
ax.scatter(
new_df["principal component 1"],
new_df["principal component 2"],
c=color,
s=30,
)
ax.legend(new_tgts)
ax.grid()
if save_name is not None:
pl_.savefig(str(path_t(save_in) / f"PCA_duration_{save_name}.png"))
if three_D:
# Create the 3D PCA and fit the data
pca3d = PCA(n_components=3)
principal_components3d = pca3d.fit_transform(stand_df)
# Print and Save the explained variance ratio
LOGGER.info(
f"3D PCA explained variance ratio ({save_name}): {pca3d.explained_variance_ratio_}"
)
pca3d_exp_var_df = pd_.DataFrame(
pca3d.explained_variance_ratio_,
columns=["3d pca explained variance ration"],
)
pca3d_exp_var_df = pca3d_exp_var_df.rename(
index={
0: "principal component 1",
1: "principal component 2",
2: "principal component 3",
}
)
pca3d_exp_var_df.to_csv(
str(
path_t(save_in)
/ f"three_d_pca_explained_variance_ratio_{save_name}.csv"
)
)
# Creating the principal component df
principal3d_df = pd_.DataFrame(
data=principal_components3d,
columns=[
"principal component 1",
"principal component 2",
"principal component 3",
],
)
# Give the final df containing the principal component and their condition
final3d_df = pd_.concat([principal3d_df, all_target[target]], axis=1)
# Plot
fig = pl_.figure(figsize=(8, 8))
ax = fig.add_subplot(111, projection="3d")
ax.set_xlabel(
f"Principal Component 1 - ratio = {round(pca3d.explained_variance_ratio_[0],2)}",
fontsize=15,
)
ax.set_ylabel(
f"Principal Component 2 - ratio = {round(pca3d.explained_variance_ratio_[1],2)}",
fontsize=15,
)
ax.set_zlabel(
f"Principal Component 3 - ratio = {round(pca3d.explained_variance_ratio_[2],2)}",
fontsize=15,
)
ax.set_title(f"3 component PCA{title}", fontsize=20)
for tgt, color in zip(targets, colors):
idx = final3d_df[target] == tgt
ax.scatter(
final3d_df.loc[idx, "principal component 1"],
final3d_df.loc[idx, "principal component 2"],
final3d_df.loc[idx, "principal component 3"],
c=color,
s=30,
)
ax.legend(targets)
ax.grid()
if save_name is not None:
pl_.savefig(str(path_t(save_in) / f"three_d_PCA_{save_name}.png"))
if plot_duration:
fig = pl_.figure(figsize=(8, 8))
ax = fig.add_subplot(111, projection="3d")
ax.set_xlabel(
f"Principal Component 1 - ratio = {round(pca3d.explained_variance_ratio_[0], 2)}",
fontsize=15,
)
ax.set_ylabel(
f"Principal Component 2 - ratio = {round(pca3d.explained_variance_ratio_[1],2)}",
fontsize=15,
)
ax.set_zlabel(
f"Principal Component 3 - ratio = {round(pca3d.explained_variance_ratio_[2],2)}",
fontsize=15,
)
ax.set_title(f"3 component PCA{title}", fontsize=20)
final3d_df = pd_.concat(
[df[["condition", "duration"]], principal3d_df], axis=1
)
for tgt, color in zip(
new_tgts,
["lavender", "royalblue", "navy", "lightcoral", "firebrick", "red"],
):
new3d_df = final3d_df.loc[
(final_df["condition"] == tgt[0])
& (final3d_df["duration"] == tgt[1])
]
new3d_df = new3d_df.drop(["condition", "duration"], axis=1)
ax.scatter(
new3d_df["principal component 1"],
new3d_df["principal component 2"],
new3d_df["principal component 3"],
c=color,
s=30,
)
ax.legend(new_tgts)
ax.grid()
if save_name is not None:
pl_.savefig(
str(path_t(save_in) / f"three_d_PCA_duration_{save_name}.png")
)
def KmeansOnDF(
df: pd_.DataFrame,
nb_clusters: tuple,
target: str,
plot_bar: bool = True,
rep_on_image: bool = False,
labeled_somas=None,
elbow: bool = False,
intracluster_var: bool = True,
save_name: str = None,
save_in: str = None,
title: str = "",
duration: bool = False,
features_distribution: bool = True,
) -> KMeans:
"""
Perform kmeans on the pandas dataframe. Can find the best number of cluster with elbow method,
find the intracluster variance, and represent the result on the initial images.
Plot barplots with percentage of each label for each condition.
"""
# Separating the features from their conditions and durations
all_target = pd_.DataFrame(df.loc[:, [target]].values, columns=[target])
df2 = df.drop([target, "duration"], axis=1)
df_scalar = df.drop(["duration", "condition"], axis=1)
# Data standardization
scaler = StandardScaler()
stand_df = scaler.fit_transform(df2)
kmeans = None
# Best number of clusters using Elbow method
if elbow:
wcss = [] # within cluster sum of errors(wcss)
for i in range(1, 24):
kmeans = KMeans(n_clusters=i, n_init=10, random_state=0)
kmeans.fit(stand_df)
wcss.append(kmeans.inertia_)
pl_.figure()
pl_.plot(range(1, 24), wcss)
pl_.plot(range(1, 24), wcss, "bo")
pl_.title("Elbow Method")
pl_.xlabel("Number of clusters")
pl_.ylabel("WCSS")
pl_.show(block=True)
pl_.close()
# Kmeans with x clusters
for nb_cluster in nb_clusters:
kmeans = KMeans(n_clusters=nb_cluster, n_init=10, random_state=0)
kmeans.fit_predict(stand_df)
label_df = pd_.DataFrame(data=kmeans.labels_, columns=["label"])
lab_cond_df = pd_.concat([label_df, all_target[target]], axis=1)
# Intracluster variance
if intracluster_var:
var_df = pd_.DataFrame(stand_df)
var = IntraClusterVariance(var_df, kmeans, nb_cluster, save_name)
var_df = pd_.DataFrame(var, columns=["intracluster variance"])
var_df = var_df.rename(
{cluster: f"label {cluster}" for cluster in range(nb_cluster)}
)
if save_in:
var_df.to_csv(
str(
path_t(save_in)
/ f"intracluster_variance_kmeans_{save_name}_k={nb_cluster}.csv"
)
)
# TODO Intersection of ellipsoids of the covariance matrices
# - ellipsoid equation: (x-mean)(1/Mcov)(x-mean).T <= 1
# Barplot
if plot_bar:
if duration:
fig = pl_.figure(figsize=(16, 8))
ax = fig.add_subplot(1, 1, 1)
(
lab_cond_df.groupby("condition")["label"]
.value_counts(normalize=True)
.mul(100)
.rename("Percentage (%)")
.reset_index()
.pipe(
(sb_.barplot, "data"),
x="condition",
y="Percentage (%)",
hue="label",
palette=sb_.color_palette("deep", n_colors=nb_cluster),
)
)
ax.set_ylim(0, 100)
ax.set_title(
f"Distribution of the clustering labels according to conditions{title}_k={nb_cluster}",
fontsize=11,
)
ax.grid()
if save_name is not None:
pl_.savefig(
str(
path_t(save_in)
/ f"Hist_Clustering_{save_name}_k={nb_cluster}.png"
)
)
# pl_.show(block=True)
# pl_.close()
else:
## Batthacharyya similarity
groupbycond = lab_cond_df.groupby("condition")
hist = []
for cond, values in groupbycond:
hist.append(nmpy.histogram(values["label"], bins=2))
bhatt = BhattacharyyaSimilarity(hist[0][0], hist[1][0])
LOGGER.info(
f"Bhattacharyya similarity btw CHO and DIO, all times: {bhatt}"
)
## Plot the barplots
fig = pl_.figure(figsize=(16, 8))
ax = fig.add_subplot(1, 1, 1)
(
groupbycond["label"]
.value_counts(normalize=True)
.mul(100)
.rename("Percentage (%)")
.reset_index()
.pipe(
(sb_.barplot, "data"),
x="condition",
y="Percentage (%)",
hue="label",
palette=sb_.color_palette("deep", n_colors=nb_cluster),
)
)
ax.set_ylim(0, 100)
ax.grid()
ax.set_title(
f"Distribution of the clustering labels according to conditions{title}_k={nb_cluster}",
fontsize=11,
)
if save_name is not None:
pl_.savefig(
str(
path_t(save_in)
/ f"Hist_Clustering_{save_name}_k={nb_cluster}.png"
)
)
# pl_.show(block=True)
# pl_.close()
lab_cond_df2 = pd_.concat((lab_cond_df, df[["duration"]]), axis=1)
bhattacharyya = []
for dur in df["duration"].unique():
dur_df = lab_cond_df2.loc[lab_cond_df2["duration"] == dur]
## Bhattacharyya
groupbycond = dur_df.groupby("condition")
hist = []
for cond, values in groupbycond:
hist.append(nmpy.histogram(values["label"], bins=2))
bhatt = BhattacharyyaSimilarity(hist[0][0], hist[1][0])
LOGGER.info(
f"Bhattacharyya similarity btw CHO and DIO {dur}: {bhatt}"
)
bhattacharyya.append(bhatt)
## Barplot
fig = pl_.figure(figsize=(16, 8))
ax = fig.add_subplot(1, 1, 1)
to_plot = (
dur_df.groupby("condition")["label"]
.value_counts(normalize=True)
.mul(100)
.rename("Percentage (%)")
.reset_index()
)
n_labels = to_plot["label"].unique()
n_labels.sort()
colors = sb_.color_palette("deep", n_colors=nb_cluster)
color_to_use = list(colors[i] for i in n_labels)
(
to_plot.pipe(
(sb_.barplot, "data"),
x="condition",
y="Percentage (%)",
hue="label",
palette=color_to_use,
)
)
ax.set_ylim(0, 100)
ax.grid()
ax.set_title(
f"Distribution of the clustering labels according to conditions{title}_k={nb_cluster} - duration = {dur}",
fontsize=11,
)
if save_name is not None:
pl_.savefig(
str(
path_t(save_in)
/ f"Hist_Clustering_{save_name}_dur_{dur}_k={nb_cluster}.png"
)
)
# pl_.show(block=True)
pl_.close()
if bhattacharyya is not None:
fig = pl_.figure(figsize=(16, 8))
ax = fig.add_subplot(1, 1, 1)
ax.plot(
[f"{duration}" for duration in df["duration"].unique()],
[bhattacharyya[i] for i in range(3)],
"ko",
)
ax.plot(
[f"{duration}" for duration in df["duration"].unique()],
[bhattacharyya[i] for i in range(3)],
"g",
)
ax.set_xlabel("Duration")
ax.set_yscale("log")
ax.set_ylabel("Bhattacharyya distance")
ax.grid()
ax.set_title(
f"Bhattacharyya distance between CHO and DIO{title}_k={nb_cluster}",
fontsize=11,
)
if save_name is not None:
pl_.savefig(
str(
path_t(save_in)
/ f"Bhattacharyya_distance_{save_name}_k={nb_cluster}.png"
)
)
pl_.close()
if save_name is not None:
pl_.savefig(
str(
path_t(save_in)
/ f"Hist_duration_Clustering_{save_name}_k={nb_cluster}.png"
)
)
print(f"Saved in {save_in}")
# pl_.show(block=True)
pl_.close()
if features_distribution:
for column in df_scalar.columns:
cond_col_df = pd_.concat((df_scalar[column], lab_cond_df), axis=1)
conditions = _Conditions(df_scalar)
print("----", conditions)
labels = nmpy.arange(nb_cluster)
fig = pl_.figure(constrained_layout=False)
gs = fig.add_gridspec(ncols=1, nrows=1)
ax1 = fig.add_subplot(gs[0, 0])
# Plot a histogram and kernel density estimate
# print(f"Plot a histogram and kernel density estimate for feature {column}")
for comb, color in zip(
itertools.product(conditions, labels),
sb_.color_palette("deep", n_colors=len(labels) * len(conditions)),
):
to_plot = cond_col_df.loc[(cond_col_df["condition"] == comb[0])]
to_plot = to_plot.loc[(to_plot["label"] == comb[1])]
# Kernel estimate of the histogram
sb_.kdeplot(to_plot[[column]], color=color, ax=ax1)
lines = [
Line2D([0], [0], color=c, linewidth=3, linestyle="-")
for c in sb_.color_palette(
"deep", n_colors=len(labels) * len(conditions)
)
]
lb = list(itertools.product(conditions, labels))
ax1.legend(lines, lb)
ax1.set_title(f"{column} distribution{title}", fontsize=11)
pl_.tight_layout()
if save_in is not None:
pl_.savefig(
str(
path_t(save_in)
/ f"feat_kernel_estimate_{column}_k={nb_cluster}.png"
)
)
pl_.close()
# Boxplot
for comb, color in zip(
itertools.product(conditions, labels),
sb_.color_palette("deep", n_colors=len(labels) * len(conditions)),
):
fig = pl_.figure(constrained_layout=False, figsize=(10, 3))
gs = fig.add_gridspec(ncols=1, nrows=1)
ax1 = fig.add_subplot(gs[0, 0])
to_plot = cond_col_df.loc[(cond_col_df["condition"] == comb[0])]
to_plot = to_plot.loc[(to_plot["label"] == comb[1])]
sb_.boxplot(to_plot[[column]], color=color, ax=ax1)
ax1.set_xlim(min(cond_col_df[column]), max(cond_col_df[column]))
lines = [Line2D([0], [0], color=color, linewidth=3, linestyle="-")]
ax1.legend(lines, [comb])
ax1.set_title(f"{column} boxplot{title}", fontsize=11)
pl_.tight_layout()
if save_in is not None:
pl_.savefig(
str(
path_t(save_in)
/ f"feat_boxplot_{column}_{comb}_k={nb_cluster}.png"
)
)
pl_.close()
# Do the same thing but separating durations
cond_col_df_dur = pd_.concat((cond_col_df, df["duration"]), axis=1)
groupby_dur_ = cond_col_df_dur.groupby("duration")
fig = pl_.figure(constrained_layout=False, figsize=(15, 10))
gs = fig.add_gridspec(ncols=3, nrows=1)
ax1 = fig.add_subplot(gs[0, 0])
ax2 = fig.add_subplot(gs[0, 1])
ax3 = fig.add_subplot(gs[0, 2])
ax = [ax1, ax2, ax3]
# Plot a histogram and kernel density estimate
# print(f"Plot a histogram and kernel density estimate for feature {column}")
x = 0
for dur_, val in groupby_dur_:
for comb, color in zip(
itertools.product(conditions, labels),
sb_.color_palette(
"deep", n_colors=len(labels) * len(conditions)
),
):
to_plot = val.loc[(val["condition"] == comb[0])]
to_plot = to_plot.loc[(to_plot["label"] == comb[1])]
# Kernel estimate of the histogram
sb_.kdeplot(
to_plot[[column]], color=color, ax=ax[x]
)
lines = [
Line2D([0], [0], color=c, linewidth=3, linestyle="-")
for c in sb_.color_palette(
"deep", n_colors=len(labels) * len(conditions)
)
]
lb = list(itertools.product(conditions, labels))
ax[x].legend(lines, lb)
ax[x].set_title(f"{dur_}", fontsize=11)
# ax[x].set_xlim(min(val[column]), max(val[column]))
# ax[x].set_ylim()
ax[x].set_xlabel("Distribution kernel estimate")
ax[x].set_ylabel("Features values")
x += 1
fig.suptitle(f"{column} distribution{title}")
# pl_.tight_layout()
if save_in is not None:
pl_.savefig(
str(
path_t(save_in)
/ f"feat_kernel_estimate_{column}_k={nb_cluster}_dur.png"
)
)
pl_.close()
# Representation on the image
if rep_on_image:
RepresentationOnImages(labeled_somas, kmeans, nb_cluster)
return kmeans
def IntraClusterVariance(
df: pd_.DataFrame, kmeans: KMeans(), nb_cluster: int, save_name: str = ""
) -> list:
"""
Return the intracluster variance of a given cluster found by kmeans.
"""
var = []
# TODO change to .inertia_
for cluster in range(nb_cluster):
soma_cluster = [
indx for indx, value in enumerate(kmeans.labels_) if value == cluster
]
mean_cluster = nmpy.average(
[df.iloc[row, :] for row in soma_cluster], axis=0
) # TODO .cluster_centers_
variance = sum(
[
nmpy.linalg.norm(df.iloc[row, :] - mean_cluster) ** 2
for row in soma_cluster
]
) / (len(soma_cluster) - 1)
var.append(variance)
LOGGER.info(
f"Intracluster variance for {nb_cluster} clusters ({save_name}) : {var}"
)
return var
def RepresentationOnImages(labeled_somas, kmeans, nb_cluster):
"""
Represent the result of kmeans on labeled image. IN DVPT. Only available for a kmean intra-image.
"""
clustered_somas = labeled_somas.copy()
clustered_somas = nmpy.amax(clustered_somas, axis=0)
for indx, value in enumerate(kmeans.labels_):
for indx_axe, axe in enumerate(clustered_somas):
for indx_pixel, pixel in enumerate(axe):
if pixel == indx + 1:
clustered_somas[indx_axe][indx_pixel] = value + 1
pl_.imshow(clustered_somas, cmap="tab20")
pl_.title(f"n cluster = {nb_cluster}")
pl_.show(block=True)
pl_.close()
def _Conditions(df)->tuple[str,...]:
""""""
return tuple(set(df["condition"].to_numpy().tolist()))
def _Experiments(df):
""""""
return tuple(set(tuple(_elm) for _elm in df[["condition", "duration"]].to_numpy().tolist()))
def FeaturesStatistics(
df: pd_.DataFrame,
save_in: str = None,
title: str = "",
describe: bool = True,
heatmap: bool = True,
drop_feat: bool = True,
distribution: bool = True,
stat_test: bool = True,
decision_tree: bool = True,
):
"""
Return the statistics allowing the user to choose the most relevant features to feed ML algorithms for ex.
Statistical description, correlation heatmap, dropping correlated features or features with little difference between the two conditions,
Plotting features distribution and boxplots, performing Kolmogorov-Smirnov two-sample test and Wilcoxon signed-rank two-sample test.
Can draw a decision tree.
"""
#
# Overview of the basic stats on each columns of df
if describe:
description = df.describe()
if save_in is not None:
description.to_csv(str(path_t(save_in) / "df_stat_description.csv"))
# Data formatting
df_scalar = df.drop(["duration", "condition"], axis=1)
df_cond = df.drop(["duration"], axis=1)
df_groupby_dur = df.groupby("duration")
# Compute the correlation matrix
corr_matrix = df_scalar.corr().abs()
if heatmap:
# Plot heat map with correlation matrix btw features
print("Heat map with correlation matrix btw features")
# Generate a mask for the upper triangle
mask = nmpy.triu(nmpy.ones_like(corr_matrix, dtype=nmpy.bool_))
# Set up the figure
fig, ax = pl_.subplots(figsize=(13, 9))
# Generate a custom diverging colormap
cmap = sb_.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sb_.heatmap(
corr_matrix,
mask=mask,
cmap=cmap,
center=0,
square=True,
linewidths=0.5,
cbar_kws={"shrink": 0.5},
xticklabels=False,
)
ax.set_title(f"Features correlation heat map{title}", fontsize=20)
if save_in is not None:
pl_.savefig(
str(path_t(save_in) / f"Features correlation heat map{title}.png")
)
pl_.close()
all_conditions = _Conditions(df_cond)
stat_tests_df1 = stat_tests_df2 = None
if stat_test and (all_conditions.__len__() == 2):
# Create dictionaries to store the statistics and p-values
dict_ks = {}
dict_wx = {}
dict_ks_dur = {}
dict_wx_dur = {}
for column in df_scalar.columns:
# For each feature, perform a statistical test to know if the feature distribution or median is different btw CHO and DIO conditions
cond_col_df = pd_.concat((df_scalar[column], df_cond["condition"]), axis=1)
# Separate the conditions
CHO = cond_col_df.loc[cond_col_df["condition"] == all_conditions[0]]
CHO = nmpy.asarray(CHO[column])
DIO = cond_col_df.loc[cond_col_df["condition"] == all_conditions[1]]
DIO = nmpy.asarray(DIO[column])
# Compare distribution between conditions (goodness of fit)
ks = si_.stats.ks_2samp(CHO, DIO)
dict_ks[column] = ks
# Compare median between conditions
wx = si_.stats.ranksums(CHO, DIO)
dict_wx[column] = wx
# Plot the p-values
fig = pl_.figure(constrained_layout=False, figsize=(15, 8))
gs = fig.add_gridspec(ncols=2, nrows=1)
ax1 = fig.add_subplot(gs[0, 0])
ax2 = fig.add_subplot(gs[0, 1])
#
ax1.set_title(
f"Kolmogorov-Smirnov p-value btw conditions - {column}",
fontsize=13,
pad=20,
)
ax1.grid()
ax1.set_xlabel("Duration")
ax1.set_yscale("log")
ax1.set_ylabel("p-value")
#
ax2.set_title(
f"Wilcoxon signed-rank p-value btw conditions - {column}",
fontsize=13,
pad=20,
)
ax2.grid()
ax2.set_xlabel("Duration")
ax2.set_yscale("log")
ax2.set_ylabel("p-value")
# Do the same thing than above but separate durations between conditions
for duration, values in df_groupby_dur:
if duration not in dict_ks_dur:
dict_ks_dur[duration] = {}
dict_wx_dur[duration] = {}
duration_df = values.drop(["duration"], axis=1)
CHO_ = duration_df.loc[duration_df["condition"] == all_conditions[0]]
DIO_ = duration_df.loc[duration_df["condition"] == all_conditions[1]]
CHO_ = CHO_.drop(["condition"], axis=1)
DIO_ = DIO_.drop(["condition"], axis=1)
CHO_ = nmpy.asarray(CHO_[column])
DIO_ = nmpy.asarray(DIO_[column])
# Compare distribution
ks2 = si_.stats.ks_2samp(CHO_, DIO_)
dict_ks_dur[duration][column] = ks2
# Compare median
wx2 = si_.stats.ranksums(CHO_, DIO_)
dict_wx_dur[duration][column] = wx2
ax1.plot(
[f"{duration}" for duration, _ in df_groupby_dur],
[dict_ks_dur[duration][column][1] for duration, _ in df_groupby_dur],
"g",
)
ax1.plot(
[f"{duration}" for duration, _ in df_groupby_dur],
[dict_ks_dur[duration][column][1] for duration, _ in df_groupby_dur],
"ko",
)
ax2.plot(
[f"{duration}" for duration, _ in df_groupby_dur],
[dict_wx_dur[duration][column][1] for duration, _ in df_groupby_dur],
"y",
)
ax2.plot(
[f"{duration}" for duration, _ in df_groupby_dur],
[dict_wx_dur[duration][column][1] for duration, _ in df_groupby_dur],
"ko",
)
pl_.tight_layout()
if save_in:
pl_.savefig(str(path_t(save_in) / f"p_values_duration_{column}.png"))
pl_.close()
# Reformat the data to save it as csv
df_ks = pd_.DataFrame.from_dict(data=dict_ks)
df_ks = df_ks.rename(
index={0: "Kolmogorov-Smirnov statistic", 1: "Kolmogorov-Smirnov p-value"}
)
df_wx = pd_.DataFrame.from_dict(data=dict_wx)
df_wx = df_wx.rename(index={0: "Wilcoxon statistic", 1: "Wilcoxon p-value"})
df_ks_dur = pd_.DataFrame()
df_wx_dur = pd_.DataFrame()
for key in dict_ks_dur.keys():
df_ks_dur = pd_.concat(
(df_ks_dur, pd_.DataFrame.from_dict(data=dict_ks_dur[key]))
)
df_ks_dur = df_ks_dur.rename(
index={
0: f"{key} - Kolmogorov-Smirnov statistic",
1: f"{key} - Kolmogorov-Smirnov p-value",
}
)
df_wx_dur = pd_.concat(
(df_wx_dur, pd_.DataFrame.from_dict(data=dict_wx_dur[key]))
)
df_wx_dur = df_wx_dur.rename(
index={0: f"{key} - Wilcoxon statistic", 1: f"{key} - Wilcoxon p-value"}
)
stat_tests_df1 = pd_.concat((df_ks, df_wx)) # KS and Wx all durations
stat_tests_df2 = pd_.concat(
(df_ks_dur, df_wx_dur)
) # KS and Wx for each duration
if save_in:
stat_tests_df1.to_csv(
str(path_t(save_in) / "stat_test_KS_wilcoxon_all.csv")
)
stat_tests_df2.to_csv(
str(path_t(save_in) / "stat_test_KS_wilcoxon_for_each_duration.csv")
)
if drop_feat:
# Drop highly correlated features
LOGGER.info("Drop highly correlated features")
# Select upper triangle of correlation matrix
upper = corr_matrix.where(
nmpy.triu(nmpy.ones(corr_matrix.shape), k=1).astype(nmpy.bool_)
)
# Find index of feature columns with correlation greater than 0.9
to_drop = [column for column in upper.columns if any(upper[column] > 0.9)]
# Drop features
drop_HCF_df = df_scalar.drop(df[to_drop], axis=1)
# Drop column with null variance
drop_HCF_df = drop_HCF_df.loc[:, drop_HCF_df.var() != 0.0]
if stat_test and (all_conditions.__len__() == 2):
# drop non-significant features (distribution and mean not different btw CHO and DIO)
drop_HCF_df_6H = drop_HCF_df.copy()
# based on all durations;
to_drop = [
column
for column in drop_HCF_df.columns
if (stat_tests_df1.loc["Kolmogorov-Smirnov p-value", column] > 1.0e-2)
and (stat_tests_df1.loc["Wilcoxon p-value", column] > 1.0e-2)
]
drop_HCF_df = drop_HCF_df.drop(drop_HCF_df[to_drop], axis=1)
if "6H - Kolmogorov-Smirnov p-value" in stat_tests_df2.columns:
# only based on 6H duration;
to_drop_6H = [
column
for column in drop_HCF_df_6H.columns
if (
stat_tests_df2.loc["6H - Kolmogorov-Smirnov p-value", column]
> 1.0e-3
)
and (stat_tests_df2.loc["6H - Wilcoxon p-value", column] > 1.0e-3)
]
drop_HCF_df_6H = drop_HCF_df_6H.drop(drop_HCF_df_6H[to_drop_6H], axis=1)
drop_HCF_df_6H = pd_.concat(
(df[["condition", "duration"]], drop_HCF_df_6H), axis=1
)
if save_in:
drop_HCF_df_6H.to_csv(
str(path_t(save_in) / "df_drop_highly_corr_feat_6H.csv")
)
print(
f"Selection of features with distribution and median of CHO vs DIO different in: {save_in}"
)
# Add the condition and duration
drop_HCF_df = pd_.concat((df[["condition", "duration"]], drop_HCF_df), axis=1)
if save_in:
drop_HCF_df.to_csv(str(path_t(save_in) / "df_drop_highly_corr_feat.csv"))
print(f"Selection of low correlated features in: {save_in}")
# Statistics for each features
if distribution and (all_conditions.__len__() == 2):
for column in df_scalar.columns:
cond_col_df = pd_.concat((df_scalar[column], df_cond["condition"]), axis=1)
fig = pl_.figure(constrained_layout=False)
gs = fig.add_gridspec(ncols=2, nrows=1)
ax1 = fig.add_subplot(gs[0, 0])
ax2 = fig.add_subplot(gs[0, 1])
# Plot a histogram and kernel density estimate
print(f"Plot a histogram and kernel density estimate for feature {column}")
CHO = cond_col_df.loc[cond_col_df["condition"] == all_conditions[0]]
DIO = cond_col_df.loc[cond_col_df["condition"] == all_conditions[1]]
sb_.histplot(CHO[[column]], color="b", ax=ax1)
sb_.histplot(DIO[[column]], color="r", ax=ax1)
# Draw a boxplot
print(f"Plot a boxplot for feature {column}")
sb_.boxplot(
data=df_cond,
x="condition",
y=column,
hue="condition",
palette=["b", "r"],
ax=ax2,
)
ax1.set_title(f"{column} distribution{title}", fontsize=11)
ax2.set_title(f"{column} boxplot{title}", fontsize=11)
pl_.tight_layout()
if save_in is not None:
pl_.savefig(str(path_t(save_in) / f"feat_distrib_{column}.png"))
pl_.close()
# Decision tree
if decision_tree:
# Test btw CHO and DIO
dt_df = df.drop(["condition", "duration"], axis=1)
clf = tree.DecisionTreeClassifier(max_depth=4)
clf = clf.fit(dt_df, df["condition"])
fig = pl_.figure(figsize=(150, 65))
tree.plot_tree(
clf,
feature_names=df.columns.tolist(),
class_names=list(all_conditions),
filled=True,
rounded=True,
fontsize=60,
)
fig.suptitle(f"Decision tree all durations", fontsize=120)
if save_in:
pl_.savefig(
str(path_t(save_in) / f"Decision_tree_all_durations_{title}.png")
)
pl_.close()
# Test btw CHO and DIO depending on duration
for duration, values in df_groupby_dur:
duration_df = values.drop(["duration", "condition"], axis=1)
clf = tree.DecisionTreeClassifier(max_depth=3)
clf = clf.fit(duration_df, values["condition"])
fig = pl_.figure(figsize=(30, 16))
tree.plot_tree(
clf,
feature_names=df.columns.tolist(),
class_names=list(all_conditions),
filled=True,
rounded=True,
fontsize=8,
)
fig.suptitle(f"Decision tree {duration}", fontsize=16)
if save_in:
pl_.savefig(
str(path_t(save_in) / f"Decision_tree_{duration}_{title}.png")
)
pl_.close()
def BhattacharyyaSimilarity(h1, h2):
return -nmpy.log(nmpy.sum(nmpy.sqrt(nmpy.multiply(Normalize(h1), Normalize(h2)))))
def Normalize(h):
return h / nmpy.sum(h)
def Main(path, save_in):
""""""
# TODO: clean, reduce and optimize the code (many duplicates)
#
# os.chdir("path")
## If need to concatenate files:
# all_filenames = [i for i in glob.glob('*.{}'.format("csv"))]
# print(all_filenames)
# df = pd_.concat([pd_.read_csv(f, index_col=0) for f in all_filenames])
# df.to_csv(".\combined_features.csv")
## If use labeled somas:
# labeled_somas = nmpy.load("path.npy")
# df = pd_.read_csv(".\combined_features.csv", index_col=0)
## DF cleaning
df0 = pd_.read_csv(
str(path),
# index_col=0,
)
LOGGER.info(f"Read: {path}: {df0.shape}")
# "Unnamed: 0"=column of index labels
df = df0.drop(["Unnamed: 0"], axis=1)
LOGGER.info(f"After dropping Unnamed: {df.shape}")
## Statistical analysis
# For the moment drop the columns with non scalar values, and un-useful values
# - TO BE CHANGED TODO (use distance metrics such as bhattacharyya coef, etc)
df = df.drop(
[
"soma uid",
"spherical_angles_eva",
"spherical_angles_evb",
"hist_lengths",
"hist_lengths_P",
"hist_lengths_S",
"hist_curvature",
"hist_curvature_P",
"hist_curvature_S",
],
axis=1,
)
LOGGER.info(f"After dropping non-scalar: {df.shape}")
df = df.dropna(axis=0, how="any")
LOGGER.info(f"After dropping NaN: {df.shape}")
# -- PCA with all the features
print("\nALL FEATURES\n")
# Between the two conditions, regardless the duration of experiment (2 conditions, all durations)
PCAOnDF(
df,
target="condition",
targets=_Conditions(df),
colors=["b", "r"],
save_name="all_features",
save_in=save_in,
three_D=True,
)
# # Between the two conditions, for each duration (2 conditions, 3 durations)
# groupby_duration = df.groupby("duration")
# for duration, values in groupby_duration:
# ## duration: str, values: pd_.DataFrame()
# PCAOnDF(values,
# target="condition",
# targets=["CHO", "DIO"],
# colors=["b", "r"],
# save_name=f"{duration}_features",
# save_in=save_in,
# title=f" - {duration} Sample",
# plot_duration=False,
# three_D=True,
# )
# -- K-means with all the features (2 conditions)
# Test for multiple glial populations
# Between the two conditions, regardless the duration of experiment (2 conditions, all durations)
# kmeans = KmeansOnDF(df,
# target="condition",
# nb_clusters=(2, 3, 4, 5),
# elbow=True,
# save_name="all_features_multiple_pop",
# save_in=save_in,
# features_distribution=False,
# )
# # Between the two conditions, for each duration (2 conditions, 3 durations)
# groupby_duration = df.groupby("duration")
# for duration, values in groupby_duration:
# kmeans = KmeansOnDF(values,
# target="condition",
# nb_clusters=(2, 3, 4, 5),
# elbow=False,
# intracluster_var=True,
# plot_bar=True,
# save_name=f"{duration}_features_multiple_pop",
# title=f" - {duration} Sample",
# duration=True,
# save_in=save_in,
# )
# -- Various plots to analyse the data and find discriminant features by statistical analysis
print("\nFEATURE SELECTION\n")
FeaturesStatistics(
df,
save_in=save_in,
)
## TODO: Enter selected features here
# selected_features = []
# selected_df = df[selected_features]
## TODO Or use the csv with dropped features
try:
selected_df = pd_.read_csv(str(path_t(save_in) / "df_drop_highly_corr_feat.csv"))
LOGGER.info(f"Read: {path_t(save_in) / 'df_drop_highly_corr_feat.csv'}: {selected_df.shape}")
# selected_df = pd_.read_csv(f"{save_in}\\df_drop_highly_corr_feat_6H.csv")
except:
raise RuntimeError(
"Only run the part until FeaturesStatistics included to generate df_drop_highly_corr_feat.csv, and then run the last part."
)
## If an error raises, only run the part until FeaturesStatistics included, and then run the last part.
# if other columns need to be dropped:
try:
to_drop = ["Unnamed: 0", "min_curvature"]
selected_df = selected_df.drop(to_drop, axis=1)
LOGGER.info(f"After dropping Unnamed and min_curvature: {selected_df.shape}")
except:
selected_df = selected_df.drop(["Unnamed: 0"], axis=1)
LOGGER.info(f"After dropping Unnamed: {selected_df.shape}")
# -- PCA with all the features
print("\nSELECTED FEATURES\n")
# Between the two conditions, regardless the duration of experiment (2 conditions, all durations)
if selected_df.columns.size == 2:
LOGGER.warning("All features are highly correlated; No selected feature analysis.")
else:
PCAOnDF(
selected_df,
target="condition",
targets=_Conditions(selected_df),
colors=["b", "r"],
save_name="all_selected_features",
save_in=save_in,
three_D=True,
)
# # Between the two conditions, for each duration (2 conditions, 3 durations)
# groupby_duration = selected_df.groupby("duration")
# for duration, values in groupby_duration:
# # duration: str, values: pd_.DataFrame()
# PCAOnDF(values,
# target="condition",
# targets=["CHO", "DIO"],
# colors=["b", "r"],
# save_name=f"{duration}_selected_features",
# save_in=save_in,
# title=f" - {duration} Sample - selected features",
# plot_duration=False,
# three_D=True,
# )
# -- K-means with all the features (2 conditions)
# Between the two conditions, regardless the duration of experiment (2 conditions, all durations)
# kmeans = KmeansOnDF(selected_df,
# target="condition",
# nb_clusters=(2, 3, 4, 5),
# intracluster_var=False,
# save_name="all_selected_features",
# save_in=save_in,
# )
# # Between the two conditions, for each duration (2 conditions, 3 durations)
# groupby_duration = selected_df.groupby("duration")
# for duration, values in groupby_duration:
# kmeans = KmeansOnDF(values,
# target="condition",
# nb_clusters=(2,3,4,5),
# elbow=False,
# intracluster_var=True,
# plot_bar=True,
# save_name=f"{duration}_selected_features",
# save_in=save_in,
# title=f" - {duration} Sample - selected features",
# duration=True,
# )
## TODO: Random forests ?
if __name__ == "__main__":
#
Main(sy_.argv[1], sy_.argv[2])
# Copyright CNRS/Inria/UNS
# Contributor(s): Eric Debreuve (since 2019), Morgane Nadal (2020)
#
# eric.debreuve@cnrs.fr
#
# This software is governed by the CeCILL license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/ or redistribute the software under the terms of the CeCILL
# license as circulated by CEA, CNRS and INRIA at the following URL
# "http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards their
# requirements in conditions enabling the security of their systems and/or
# data to be ensured and, more generally, to use and operate it in the
# same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license and that you accept its terms.
from typing import Any, Dict, Sequence, Union
import numpy as nmpy
import pandas as pd_
import scipy.stats as st_
from logger_36 import LOGGER
from skl_graph.type.constant import SKL_GRAPH
import brick.task.ellipsoid_fit as bf_
import brick.task.unit
from brick.type.base import array_t
from brick.type.soma import soma_t
def FindGraphsRootWithEdges(
soma: soma_t, ext_nfo: Dict[str, Union[array_t, Any]]
) -> dict:
"""
Finds the soma roots of the graph extension.
"""
# For a given soma, find the roots of the graphs
root_nodes = {}
# Finds the primary extensions
primary_extension_uids = tuple(extension.uid for extension in soma.extensions)
# print(primary_extension_uids, '\nn = ', len(primary_extension_uids))
# List of the degree 1 nodes of the graph
for node1_id, node2_id, edge_nfo in soma.skl_graph.edges.data(SKL_GRAPH):
if (soma.skl_graph.degree[node1_id] == 1) or (
soma.skl_graph.degree[node2_id] == 1
):
# Find the pixels of the terminal extension
sites = ext_nfo["lmp_soma"][edge_nfo.sites]
ext_uid = nmpy.unique(sites)[-1]
# sites > 0 because ext_nfo['lmp'] do not contain the connexions
# Save the root node candidates (one-degree nodes)
if ext_uid in primary_extension_uids:
if soma.skl_graph.degree[node1_id] == 1:
root_node = node1_id
else:
root_node = node2_id
# Get the node coordinates and extend them to the 26 neighboring voxels
# tuple('x-y-z') -> list[(x,y,z)]
root_node_coor = soma.skl_graph.nodes[root_node][
SKL_GRAPH
].position.tolist() # _GetNodesCoordinates((root_node,))[0]
root_sites = set(
(
root_node_coor[0] + i,
root_node_coor[1] + j,
root_node_coor[2] + k,
)
for i in (-1, 0, 1)
for j in (-1, 0, 1)
for k in (-1, 0, 1)
if i != 0 or j != 0 or k != 0
)
# Find the intersection between the extended root node candidate and the soma contour points
intersections = set(soma.contour_points).intersection(root_sites)
# if the graph root sites are included in the soma extensions sites (non-nul intersection):
if len(intersections) > 0:
# Keep the info of the root node. Key = ext uid, Value = root node
root_nodes[ext_uid] = root_node
## By construction, only one root node possible for an ext
# TODO: find out why there are less root points than extensions !!
return root_nodes
def FindGraphsRootWithNodes(soma: soma_t) -> dict:
"""
Find the roots of the {extension+connexion} graphs to be lined to the soma.
Add a key "root" (bool) in the dict of nodes attributes.
"""
is_end_point = []
node_label = []
coordinates = []
for label, degree in soma.skl_graph.degree:
is_end_point.append(degree == 1)
node_label.append(label)
coordinates.append(
tuple(soma.skl_graph.nodes[label][SKL_GRAPH].position.tolist())
)
# Find the nodes of degree == 1, and the str coordinates of the nodes
# is_end_point = tuple(degree == 1 for _, degree in soma.skl_graph.degree)
# node_label = tuple(xyz for xyz, _ in soma.skl_graph.degree)
root_nodes = {}
# get the coordinates of the nodes (x,y,z)
# coordinates = _GetNodesCoordinates(node_label)
# get a list with elements = (extension_uid, root coordinates), which length is the number of primary extensions
roots = soma.ext_roots
# for each node in the graph, search among the degree 1 nodes the nodes that are roots (linked to soma)
for node in range(len(coordinates)):
if is_end_point[node]:
# compare the coor with end points
for ext_root in roots:
if ext_root[1] == coordinates[node]:
root_nodes[ext_root[0]] = node_label[node]
if root_nodes.__len__() != roots.__len__():
# raise ValueError("Number of extensions roots not equal to number of graph roots.")
LOGGER.warning(
f"Number of extensions roots: {root_nodes.__len__()} "
f"not equal to number of graph roots: {roots.__len__()}."
)
return root_nodes
# def _GetNodesCoordinates(node_coord: Tuple[str, ...]) -> list:
# """
# Input: nodes attributes -> Tuple('x1-y1-z1', 'x2-y2-z2', ...) .
# Output: coordinates -> List[Tuple(x1,y1,z1), Tuple(x2,y2,z2), ...]
# """
# coord = []
# for c in node_coord:
# coord.append(c)
#
# for node in range(len(node_coord)):
# coord_node = coord[node]
# pattern = r"\d+"
# coord_node = re_.findall(pattern, coord_node)
# coor = []
# for i in range(3):
# coor.append(int(coord_node[i]))
# coor = tuple(coor)
# coord[node] = coor
#
# return coord
FEATURE_SHEET_COLUMNS = [
"condition",
"duration",
"soma uid",
"coef_V_soma__V_convex_hull",
"coef_axes_ellips_b__a",
"coef_axes_ellips_c__a",
"spherical_angles_eva",
"spherical_angles_evb",
#
"N_nodes",
"N_ext",
"N_primary_ext",
"N_sec_ext",
"min_degree",
"mean_degree",
"median_degree",
"max_degree",
"std_degree",
#
"total_ext_length",
"min_length",
"mean_length",
"median_length",
"max_length",
"std_lengths",
"entropy_lengths",
"hist_lengths",
"min_thickness",
"mean_thickness",
"median_thickness",
"max_thickness",
"std_thickness",
"entropy_thickness",
"min_volume",
"mean_volume",
"median_volume",
"max_volume",
"std_volume",
"entropy_volume",
"min_curvature",
"max_curvature",
"mean_curvature",
"median_curvature",
"std_curvature",
"entropy_curvature",
"hist_curvature",
#
"total_ext_length_P",
"min_length_P",
"mean_length_P",
"median_length_P",
"max_length_P",
"std_lengths_P",
"entropy_lengths_P",
"hist_lengths_P",
"min_thickness_P",
"mean_thickness_P",
"median_thickness_P",
"max_thickness_P",
"std_thickness_P",
"entropy_thickness_P",
"min_volume_P",
"mean_volume_P",
"median_volume_P",
"max_volume_P",
"std_volume_P",
"entropy_volume_P",
"min_curvature_P",
"max_curvature_P",
"mean_curvature_P",
"median_curvature_P",
"std_curvature_P",
"entropy_curvature_P",
"hist_curvature_P",
#
"total_ext_length_S",
"min_length_S",
"mean_length_S",
"median_length_S",
"max_length_S",
"std_lengths_S",
"entropy_lengths_S",
"hist_lengths_S",
"min_thickness_S",
"mean_thickness_S",
"median_thickness_S",
"max_thickness_S",
"std_thickness_S",
"entropy_thickness_S",
"min_volume_S",
"mean_volume_S",
"median_volume_S",
"max_volume_S",
"std_volume_S",
"entropy_volume_S",
"min_curvature_S",
"max_curvature_S",
"mean_curvature_S",
"median_curvature_S",
"std_curvature_S",
"entropy_curvature_S",
"hist_curvature_S",
]
def ExtractFeaturesInDF(
name_file,
Condition,
Duration,
somas,
voxel_size_in_micron: array_t,
bins_length: array_t,
bins_curvature: array_t,
scale_map: array_t,
decimals: int = 4,
_=None, # condition
__=None, # duration
):
"""
Extract the features from somas and graphs.
Store the condition and duration of the image
if (condition is None) or (duration is None):
Condition = re_.findall(r"[A-Z]{3}", name_file)[0]
Duration = re_.findall(r"\dH", name_file)
Find our whether the duration is in hours or in weeks
if Duration.__len__() == 0:
Duration = re_.findall(r"\dW", name_file)[0]
else:
Duration = Duration[0]
Returns a pandas dataframe, without NaN.
"""
somas_features_dict = {} # Dict{soma 1: [features], soma 2: [features], ...}
(
min_degree,
mean_degree,
median_degree,
max_degree,
std_degree,
total_ext_length_S,
min_length_S,
mean_length_S,
median_length_S,
max_length_S,
std_lengths_S,
entropy_lengths_S,
hist_lengths_S,
min_thickness_S,
mean_thickness_S,
median_thickness_S,
max_thickness_S,
std_thickness_S,
entropy_thickness_S,
min_volume_S,
mean_volume_S,
median_volume_S,
max_volume_S,
std_volume_S,
entropy_volume_S,
min_curvature_S,
max_curvature_S,
mean_curvature_S,
median_curvature_S,
std_curvature_S,
entropy_curvature_S,
hist_curvature_S,
) = 32 * (None,)
for soma in somas:
# -- Soma features
# Axes of the best fitting ellipsoid: a > b > c
(
_,
_,
soma.axes_ellipsoid,
_,
spherical_coor,
_,
volume_convex_hull,
) = bf_.FindBestFittingEllipsoid3D(soma)
# These ratios give info about the shape of the soma. ex: rather flat, rather patatoide, rather spherical...
if type(soma.axes_ellipsoid[0]) is str:
(
Coef_axes_ellips_b__a,
Coef_axes_ellips_c__a,
spherical_angles_eva,
spherical_angles_evb,
soma.volume_soma_micron,
Coef_V_soma__V_convex_hull,
) = 6 * (None,)
else:
Coef_axes_ellips_b__a = soma.axes_ellipsoid[0] / soma.axes_ellipsoid[2]
Coef_axes_ellips_c__a = soma.axes_ellipsoid[1] / soma.axes_ellipsoid[2]
# Spherical angles give the orientation of the somas in the 3D volume
spherical_angles_eva = (spherical_coor[0][1], spherical_coor[0][2])
spherical_angles_evb = (spherical_coor[1][1], spherical_coor[1][2])
# Volume of the in micron**3
soma.volume_soma_micron = brick.task.unit.ToMicron(
len(soma.sites[0]),
voxel_size_in_micron,
dimension=(0, 1, 2),
decimals=2,
)
# Calculates volume of soma's convex hull in voxel volume
# Take into account anisotropy of the 3D space ( volume = x * y * z with z > x=y)
volume_convex_hull = (
volume_convex_hull * voxel_size_in_micron[2] / voxel_size_in_micron[0]
)
# Volume of the soma / Volume of its convex hull gives the info about the convexity of the soma
# If close to 0, the soma has a lot of invaginations, if close to 1, it is smooth and convex
Coef_V_soma__V_convex_hull = len(soma.sites[0]) / round(
volume_convex_hull + 0.5
)
# -- Extension features
# Graph features
# number of nodes except the constructed ones from node soma to the roots
N_nodes = soma.skl_graph.n_nodes - len(soma.graph_roots)
# number of edges except the constructed ones from node soma to the roots
N_ext = soma.skl_graph.n_edges - len(soma.graph_roots)
# number of primary edges = linked to the soma except the constructed ones from node soma to the roots
N_primary_ext = len(soma.graph_roots)
# number of secondary edges = not linked to the soma.
N_sec_ext = N_ext - N_primary_ext
LOGGER.info(
f"Soma {soma.uid}\n"
f"N nodes = {N_nodes}\n"
f"N edges = {N_ext}\n"
f"N primary extensions = {N_primary_ext}\n"
f"N secondary extensions = {N_sec_ext}\n"
)
if N_primary_ext > 0:
ext_lengths = [_elm[-1] for _elm in soma.skl_graph.lengths]
for idx, length in enumerate(ext_lengths):
ext_lengths[idx] = brick.task.unit.ToMicron(
length, voxel_size_in_micron, decimals=decimals
)
total_ext_length = brick.task.unit.ToMicron(
soma.skl_graph.length, voxel_size_in_micron, decimals=decimals
)
(
min_length,
max_length,
median_length,
mean_length,
std_lengths,
hist_lengths,
entropy_lengths,
) = _Statistics_mMdashe(ext_lengths, bins=bins_length)
# hist_lengths = nmpy.histogram(ext_lengths, bins_length)[0]
min_length = brick.task.unit.ToMicron(
min_length, voxel_size_in_micron, decimals=decimals
)
mean_length = brick.task.unit.ToMicron(
mean_length, voxel_size_in_micron, decimals=decimals
)
median_length = brick.task.unit.ToMicron(
median_length, voxel_size_in_micron, decimals=decimals
)
max_length = brick.task.unit.ToMicron(
max_length, voxel_size_in_micron, decimals=decimals
)
# std_lengths = nmpy.std(ext_lengths)
# entropy_lengths = st_.entropy(hist_lengths)
# Find the thickness of the extensions
for *_, edge in soma.skl_graph.edges.data(SKL_GRAPH):
if edge is not None:
edge.widths = scale_map[edge.sites] * voxel_size_in_micron[1]
mean_widths = soma.skl_graph.edge_reduced_widths()
ext_thickness = nmpy.array(mean_widths) ** 2
(
min_thickness,
max_thickness,
median_thickness,
mean_thickness,
std_thickness,
_,
entropy_thickness,
) = _Statistics_mMdashe(ext_thickness)
# min_thickness = min(ext_thickness)
# mean_thickness = nmpy.mean(ext_thickness)
# median_thickness = nmpy.median(ext_thickness)
# max_thickness = max(ext_thickness)
# std_thickness = nmpy.std(ext_thickness)
# entropy_thickness = st_.entropy(nmpy.histogram(ext_thickness)[0])
# Find the volume of the extensions
ext_volume = nmpy.array(ext_lengths) * ext_thickness
(
min_volume,
max_volume,
median_volume,
mean_volume,
std_volume,
_,
entropy_volume,
) = _Statistics_mMdashe(ext_volume)
# min_volume = min(ext_volume)
# mean_volume = nmpy.mean(ext_volume)
# median_volume = nmpy.median(ext_volume)
# max_volume = max(ext_volume)
# std_volume = nmpy.std(ext_volume)
# entropy_volume = st_.entropy(nmpy.histogram(ext_volume)[0])
curvatures = soma.skl_graph.curvature_and_torsion()
(
min_curvature,
max_curvature,
median_curvature,
mean_curvature,
std_curvature,
hist_curvature,
entropy_curvature,
) = _Statistics_mMdashe(curvatures, bins=bins_curvature)
# hist_curvature = nmpy.histogram(curvatures, bins_curvature)[0]
# min_curvature = min(curvatures)
# max_curvature = max(curvatures)
# mean_curvature = nmpy.mean(curvatures)
# median_curvature = nmpy.median(curvatures)
# std_curvature = nmpy.std(curvatures)
# entropy_curvature = st_.entropy(hist_curvature)
# PRIMARY extensions
ext_lengths_P = list(soma.skl_graph.primary_edge_lengths(soma))
for idx, length in enumerate(ext_lengths_P):
ext_lengths_P[idx] = brick.task.unit.ToMicron(
length, voxel_size_in_micron, decimals=decimals
)
total_ext_length_P = sum(ext_lengths_P)
(
min_length_P,
max_length_P,
median_length_P,
mean_length_P,
std_lengths_P,
hist_lengths_P,
entropy_lengths_P,
) = _Statistics_mMdashe(ext_lengths_P, bins=bins_length)
# hist_lengths_P = nmpy.histogram(ext_lengths_P, bins_length)[0]
# min_length_P = min(ext_lengths_P)
# mean_length_P = nmpy.mean(ext_lengths_P)
# median_length_P = nmpy.median(ext_lengths_P)
# max_length_P = max(ext_lengths_P)
# std_lengths_P = nmpy.std(ext_lengths_P)
# entropy_lengths_P = st_.entropy(hist_lengths_P)
mean_widths_P = soma.skl_graph.P_edge_reduced_widths(soma)
ext_thickness_P = nmpy.array(mean_widths_P) ** 2
(
min_thickness_P,
max_thickness_P,
median_thickness_P,
mean_thickness_P,
std_thickness_P,
_,
entropy_thickness_P,
) = _Statistics_mMdashe(ext_thickness_P)
# min_thickness_P = min(ext_thickness_P)
# mean_thickness_P = nmpy.mean(ext_thickness_P)
# median_thickness_P = nmpy.median(ext_thickness_P)
# max_thickness_P = max(ext_thickness_P)
# std_thickness_P = nmpy.std(ext_thickness_P)
# entropy_thickness_P = st_.entropy(nmpy.histogram(ext_thickness_P)[0])
ext_volume_P = nmpy.array(ext_lengths_P) * ext_thickness_P
(
min_volume_P,
max_volume_P,
median_volume_P,
mean_volume_P,
std_volume_P,
_,
entropy_volume_P,
) = _Statistics_mMdashe(ext_volume_P)
# min_volume_P = min(ext_volume_P)
# mean_volume_P = nmpy.mean(ext_volume_P)
# median_volume_P = nmpy.median(ext_volume_P)
# max_volume_P = max(ext_volume_P)
# std_volume_P = nmpy.std(ext_volume_P)
# entropy_volume_P = st_.entropy(nmpy.histogram(ext_volume_P)[0])
curvatures_P = soma.skl_graph.P_curvature_and_torsion(soma)
(
min_curvature_P,
max_curvature_P,
median_curvature_P,
mean_curvature_P,
std_curvature_P,
hist_curvature_P,
entropy_curvature_P,
) = _Statistics_mMdashe(curvatures_P, bins=bins_curvature)
# hist_curvature_P = nmpy.histogram(curvatures_P, bins_curvature)[0]
# min_curvature_P = min(curvatures_P)
# max_curvature_P = max(curvatures_P)
# mean_curvature_P = nmpy.mean(curvatures_P)
# median_curvature_P = nmpy.median(curvatures_P)
# std_curvature_P = nmpy.std(curvatures_P)
# entropy_curvature_P = st_.entropy(hist_curvature_P)
# Secondary extensions
if N_sec_ext > 0:
# min, mean, median, max and standard deviation of the degrees of non-leaves nodes
min_degree = soma.skl_graph.min_degree_except_leaves_and_roots
mean_degree = soma.skl_graph.mean_degree_except_leaves_and_roots
median_degree = soma.skl_graph.median_degree_except_leaves_and_roots
max_degree = soma.skl_graph.max_degree_except_leaves_an_roots
std_degree = soma.skl_graph.std_degree_except_leaves_and_roots
# SECONDARY extensions length
ext_lengths_S = list(soma.skl_graph.secondary_edge_lengths(soma))
for idx, length in enumerate(ext_lengths_S):
ext_lengths_S[idx] = brick.task.unit.ToMicron(
length, voxel_size_in_micron, decimals=decimals
)
total_ext_length_S = sum(ext_lengths_S)
(
min_length_S,
max_length_S,
median_length_S,
mean_length_S,
std_lengths_S,
hist_lengths_S,
entropy_lengths_S,
) = _Statistics_mMdashe(ext_lengths_S, bins=bins_length)
# hist_lengths_S = nmpy.histogram(ext_lengths_S, bins_length)[0]
# min_length_S = min(ext_lengths_S)
# mean_length_S = nmpy.mean(ext_lengths_S)
# median_length_S = nmpy.median(ext_lengths_S)
# max_length_S = max(ext_lengths_S)
# std_lengths_S = nmpy.std(ext_lengths_S)
# entropy_lengths_S = st_.entropy(hist_lengths_S)
mean_widths_S = soma.skl_graph.S_edge_reduced_widths(soma)
ext_thickness_S = nmpy.array(mean_widths_S) ** 2
(
min_thickness_S,
max_thickness_S,
median_thickness_S,
mean_thickness_S,
std_thickness_S,
_,
entropy_thickness_S,
) = _Statistics_mMdashe(ext_thickness_S)
# min_thickness_S = min(ext_thickness_S)
# mean_thickness_S = nmpy.mean(ext_thickness_S)
# median_thickness_S = nmpy.median(ext_thickness_S)
# max_thickness_S = max(ext_thickness_S)
# std_thickness_S = nmpy.std(ext_thickness_S)
# entropy_thickness_S = st_.entropy(nmpy.histogram(ext_thickness_S)[0])
ext_volume_S = nmpy.array(ext_lengths_S) * ext_thickness_S
(
min_volume_S,
max_volume_S,
median_volume_S,
mean_volume_S,
std_volume_S,
_,
entropy_volume_S,
) = _Statistics_mMdashe(ext_volume_S)
# min_volume_S = min(ext_volume_S)
# mean_volume_S = nmpy.mean(ext_volume_S)
# median_volume_S = nmpy.median(ext_volume_S)
# max_volume_S = max(ext_volume_S)
# std_volume_S = nmpy.std(ext_volume_S)
# entropy_volume_S = st_.entropy(nmpy.histogram(ext_volume_S)[0])
curvatures_S = soma.skl_graph.S_curvature_and_torsion(soma)
(
min_curvature_S,
max_curvature_S,
median_curvature_S,
mean_curvature_S,
std_curvature_S,
hist_curvature_S,
entropy_curvature_S,
) = _Statistics_mMdashe(curvatures_S, bins=bins_curvature)
# hist_curvature_S = nmpy.histogram(curvatures_S, bins_curvature)[0]
# min_curvature_S = min(curvatures_S)
# max_curvature_S = max(curvatures_S)
# mean_curvature_S = nmpy.mean(curvatures_S)
# median_curvature_S = nmpy.median(curvatures_S)
# std_curvature_S = nmpy.std(curvatures_S)
# entropy_curvature_S = st_.entropy(hist_curvature_S)
# If no secondary extensions, give certain value to parameters
if N_sec_ext == 0:
# min, mean, median, max and standard deviation of the degrees of non-leaves nodes
min_degree = 1
mean_degree = 1
median_degree = 1
max_degree = 1
std_degree = 0
total_ext_length_S = 0
min_length_S = 0
mean_length_S = 0
median_length_S = 0
max_length_S = 0
std_lengths_S = 0
entropy_lengths_S = 0
hist_lengths_S = 0
min_thickness_S = 0
mean_thickness_S = 0
median_thickness_S = 0
max_thickness_S = 0
std_thickness_S = 0
entropy_thickness_S = 0
min_volume_S = 0
mean_volume_S = 0
median_volume_S = 0
max_volume_S = 0
std_volume_S = 0
entropy_volume_S = 0
min_curvature_S = -1
max_curvature_S = -1
mean_curvature_S = -1
median_curvature_S = -1
std_curvature_S = 0
entropy_curvature_S = 0
hist_curvature_S = 0
else:
min_degree = 0
mean_degree = 0
median_degree = 0
max_degree = 0
std_degree = 0
total_ext_length = 0
min_length = 0
mean_length = 0
median_length = 0
max_length = 0
std_lengths = 0
entropy_lengths = 0
hist_lengths = 0
min_thickness = 0
mean_thickness = 0
median_thickness = 0
max_thickness = 0
std_thickness = 0
entropy_thickness = 0
min_volume = 0
mean_volume = 0
median_volume = 0
max_volume = 0
std_volume = 0
entropy_volume = 0
min_curvature = -1
max_curvature = -1
mean_curvature = -1
median_curvature = -1
std_curvature = 0
entropy_curvature = 0
hist_curvature = 0
total_ext_length_P = 0
min_length_P = 0
mean_length_P = 0
median_length_P = 0
max_length_P = 0
std_lengths_P = 0
entropy_lengths_P = 0
hist_lengths_P = 0
min_thickness_P = 0
mean_thickness_P = 0
median_thickness_P = 0
max_thickness_P = 0
std_thickness_P = 0
entropy_thickness_P = 0
min_volume_P = 0
mean_volume_P = 0
median_volume_P = 0
max_volume_P = 0
std_volume_P = 0
entropy_volume_P = 0
min_curvature_P = -1
max_curvature_P = -1
mean_curvature_P = -1
median_curvature_P = -1
std_curvature_P = 0
entropy_curvature_P = 0
hist_curvature_P = 0
total_ext_length_S = 0
min_length_S = 0
mean_length_S = 0
median_length_S = 0
max_length_S = 0
std_lengths_S = 0
entropy_lengths_S = 0
hist_lengths_S = 0
min_thickness_S = 0
mean_thickness_S = 0
median_thickness_S = 0
max_thickness_S = 0
std_thickness_S = 0
entropy_thickness_S = 0
min_volume_S = 0
mean_volume_S = 0
median_volume_S = 0
max_volume_S = 0
std_volume_S = 0
entropy_volume_S = 0
min_curvature_S = -1
max_curvature_S = -1
mean_curvature_S = -1
median_curvature_S = -1
std_curvature_S = 0
entropy_curvature_S = 0
hist_curvature_S = 0
# Create a dictionary with all the features for every somas
somas_features_dict[name_file + f" soma {soma.uid}"] = [
Condition,
Duration,
soma.uid,
Coef_V_soma__V_convex_hull,
Coef_axes_ellips_b__a,
Coef_axes_ellips_c__a,
spherical_angles_eva,
spherical_angles_evb,
N_nodes,
N_ext,
N_primary_ext,
N_sec_ext,
min_degree,
mean_degree,
median_degree,
max_degree,
std_degree,
#
total_ext_length,
min_length,
mean_length,
median_length,
max_length,
std_lengths,
entropy_lengths,
hist_lengths,
min_thickness,
mean_thickness,
median_thickness,
max_thickness,
std_thickness,
entropy_thickness,
min_volume,
mean_volume,
median_volume,
max_volume,
std_volume,
entropy_volume,
min_curvature,
max_curvature,
mean_curvature,
median_curvature,
std_curvature,
entropy_curvature,
hist_curvature,
#
total_ext_length_P,
min_length_P,
mean_length_P,
median_length_P,
max_length_P,
std_lengths_P,
entropy_lengths_P,
hist_lengths_P,
min_thickness_P,
mean_thickness_P,
median_thickness_P,
max_thickness_P,
std_thickness_P,
entropy_thickness_P,
min_volume_P,
mean_volume_P,
median_volume_P,
max_volume_P,
std_volume_P,
entropy_volume_P,
min_curvature_P,
max_curvature_P,
mean_curvature_P,
median_curvature_P,
std_curvature_P,
entropy_curvature_P,
hist_curvature_P,
#
total_ext_length_S,
min_length_S,
mean_length_S,
median_length_S,
max_length_S,
std_lengths_S,
entropy_lengths_S,
hist_lengths_S,
min_thickness_S,
mean_thickness_S,
median_thickness_S,
max_thickness_S,
std_thickness_S,
entropy_thickness_S,
min_volume_S,
mean_volume_S,
median_volume_S,
max_volume_S,
std_volume_S,
entropy_volume_S,
min_curvature_S,
max_curvature_S,
mean_curvature_S,
median_curvature_S,
std_curvature_S,
entropy_curvature_S,
hist_curvature_S,
]
features_df = pd_.DataFrame.from_dict(
somas_features_dict, orient="index", columns=FEATURE_SHEET_COLUMNS
)
return features_df
_Median = lambda _msr: nmpy.median(_msr).item()
_Mean = lambda _msr: nmpy.mean(_msr).item()
_Std = lambda _msr: nmpy.std(_msr).item()
def _Statistics_mMdashe(measures: Sequence, /, *, bins = None) -> tuple:
""""""
if bins is None:
bins = {}
else:
bins = {"bins": bins}
histogram = nmpy.histogram(measures, **bins)[0]
histogram = tuple(histogram.tolist())
entropy = st_.entropy(histogram).item()
output = [
_StatisticOf(measures) for _StatisticOf in (min, max, _Median, _Mean, _Std)
]
output.extend((histogram, entropy))
return tuple(output)
......@@ -34,7 +34,6 @@ from skimage import measure
def Ext_parameters(ext_label, skeleton_label):
ext_p1 = []
ext_p2 = []
ext_p3 = []
......
......@@ -29,7 +29,7 @@
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license and that you accept its terms.
import numpy as np_
import numpy as nmpy
import skimage.measure as ms_
import sklearn.decomposition as dc_
......@@ -54,34 +54,34 @@ def Soma_parameters(image, final_soma_label):
# compute eccentricity using PCA
pca = dc_.PCA()
for l in np_.unique(soma_label):
for l in nmpy.unique(soma_label):
if l != 0:
region = np_.zeros([w, n, m])
loc = np_.where(soma_label == l)
region = nmpy.zeros([w, n, m])
loc = nmpy.where(soma_label == l)
region[loc] = image[loc]
loc = np_.asarray(loc)
loc = nmpy.asarray(loc)
# Estimation, calcul des composantes principales for each soma
C = pca.fit(loc).transform(loc)
egv1 = (C[0][:]).max()
egv2 = (C[1][:]).max()
egv3 = (C[2][:]).max()
egv_vect = np_.array([egv1, egv2, egv3])
egv_max = egv_vect.max()
egv_vect = nmpy.array([egv1, egv2, egv3])
egv_max = nmpy.amax(egv_vect)
ration1 = egv1 / egv_max
ration2 = egv2 / egv_max
ration3 = egv3 / egv_max
loc = np_.where(egv_vect == egv_max)
vect_ration = np_.array([ration1, ration2, ration3])
ration = np_.delete(vect_ration, loc)
loc = nmpy.where(egv_vect == egv_max)
vect_ration = nmpy.array([ration1, ration2, ration3])
ration = nmpy.delete(vect_ration, loc)
soma_p5.append(ration)
soma_p1 = np_.asarray(soma_p1)
soma_p2 = np_.asarray(soma_p2)
soma_p3 = np_.asarray(soma_p3)
soma_p4 = np_.asarray(soma_p4)
soma_p5 = np_.asarray(soma_p5)
soma_p1 = nmpy.asarray(soma_p1)
soma_p2 = nmpy.asarray(soma_p2)
soma_p3 = nmpy.asarray(soma_p3)
soma_p4 = nmpy.asarray(soma_p4)
soma_p5 = nmpy.asarray(soma_p5)
parameters_soma = np_.array(
parameters_soma = nmpy.array(
[soma_p1, soma_p2, soma_p3, soma_p4, soma_p5[:, 0], soma_p5[:, 1]]
)
parameters_soma = parameters_soma.T
......
# Copyright CNRS/Inria/UNS
# Contributor(s): Eric Debreuve (since 2018)
# Contributor(s): Eric Debreuve (since 2019), Morgane Nadal (2020)
#
# eric.debreuve@cnrs.fr
#
......@@ -29,31 +29,20 @@
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license and that you accept its terms.
from string import ascii_uppercase as ASCII_UPPERCASE_LIST
from string import digits as DIGIT_LIST
from typing import Union
from typing import Sequence
from logger_36 import LOGGER
coord_sep_c = "-"
alphabet_c = ASCII_UPPERCASE_LIST + DIGIT_LIST
from brick.type.extension import extension_t
def EncodedNumber(number: Union[int, str], base: int = 0) -> str:
def PrintConnectedExtensions(extensions: Sequence[extension_t]) -> None:
#
if isinstance(number, str):
number = int(number)
elif isinstance(number, int):
pass
else:
raise TypeError(f"{number}: Invalid type; Actual={type(number).__name__}, Expected=int, str")
if base == 0:
base = alphabet_c.__len__()
residual = number
encoding = []
while residual:
residual, remaining = divmod(residual, base)
encoding.append(alphabet_c[remaining])
return "".join(reversed(encoding))
ext_uids = tuple(
extension.uid for extension in extensions if extension.soma_uid is not None
)
LOGGER.info(
f" Connected Ext = {ext_uids.__len__()}"
f"/{extensions.__len__()}\n"
f" {ext_uids}"
)
......@@ -29,28 +29,16 @@
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license and that you accept its terms.
import brick.processing.plot as ot_
from brick.component.extension import extension_t
from brick.component.soma import soma_t
from typing import Sequence, Tuple
import matplotlib as mpl_
import matplotlib.pyplot as pl_
mpl_.use('TkAgg')
import brick.io.screen.plot as ot_
from brick.type.extension import extension_t
from brick.type.soma import soma_t
def PrintConnectedExtensions(extensions: Sequence[extension_t]) -> None:
#
ext_uids = tuple(
extension.uid for extension in extensions if extension.soma_uid is not None
)
print(
f" Connected Ext = {ext_uids.__len__()}"
f"/{extensions.__len__()}\n"
f" {ext_uids}"
)
# mpl_.use('TkAgg')
def PlotSomas(somas: Sequence[soma_t], som_nfo: dict, axes: dict) -> None:
......
......@@ -29,28 +29,36 @@
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license and that you accept its terms.
import brick.processing.dijkstra_1_to_n as dk_
from brick.component.extension import extension_t
from brick.component.soma import soma_t
from brick.general.type import array_t, py_array_picker_h
from typing import Any, Optional, Sequence, Tuple, Union
import dijkstra_img as dk_
import matplotlib.pyplot as pl_
import mpl_toolkits.mplot3d as p3_
import numpy as np_
import numpy as nmpy
import skimage.measure as ms_
def MaximumIntensityProjectionZ(img: array_t, cmap: str ='tab20', axis: int = 0, output_image_file_name: str = None) -> None:
""" Maximum Image Projection on the Z axis. """
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from brick.type.base import array_t, py_array_picker_h
from brick.type.extension import extension_t
from brick.type.soma import soma_t
def MaximumIntensityProjectionZ(
img: array_t,
cmap: str = "tab20",
axis: int = 0,
block=True,
output_image_file_name: str = None,
) -> None:
"""Maximum Image Projection on the Z axis."""
#
xy = np_.amax(img, axis=axis)
xy = nmpy.amax(img, axis=axis)
pl_.imshow(xy, cmap=cmap)
pl_.show(block=True)
pl_.show(block=block)
if output_image_file_name is not None:
pl_.imsave(output_image_file_name, xy, cmap=cmap)
print('Image saved in', output_image_file_name)
pl_.close()
print("Image saved in", output_image_file_name)
colors_c = ("g", "r", "b", "m", "c", "y")
......@@ -58,7 +66,7 @@ mc_precision_c = 5 # mc=marching cubes
def PlotLMap(
lmp: array_t, axes=None, labels: Union[int, Tuple[int, ...]] = None
lmp: array_t, axes=None, labels: Union[int, Tuple[int, ...]] = None
) -> Optional[Any]:
#
depth_factor, depth_limit = __DepthFactorAndLimit__(lmp.shape)
......@@ -69,16 +77,16 @@ def PlotLMap(
if isinstance(labels, int):
labels = (labels,)
elif labels is None:
labels = range(1, lmp.max() + 1)
labels = range(1, nmpy.amax(lmp).item() + 1)
for label in labels:
try:
vertices, faces, _, _ = ms_.marching_cubes_lewiner(
lmp == label, 0.5, step_size=mc_precision_c
vertices, faces, *_ = ms_.marching_cubes_lewiner(
lmp == label, level=0.5, step_size=mc_precision_c
)
vertices[:, 0] *= depth_factor
triangles = vertices[faces]
mesh = p3_.art3d.Poly3DCollection(triangles)
mesh = Poly3DCollection(triangles)
mesh.set_facecolor(colors_c[label % colors_c.__len__()])
axes.add_collection3d(mesh)
except RuntimeError as exc:
......@@ -91,7 +99,7 @@ def PlotLMap(
def PlotConnection(
connection: py_array_picker_h, soma_uid: int, shape: Sequence[int], axes=None
connection: py_array_picker_h, soma_uid: int, shape: Sequence[int], axes=None
) -> Optional[Any]:
#
depth_factor, depth_limit = __DepthFactorAndLimit__(shape)
......@@ -102,7 +110,7 @@ def PlotConnection(
# TODO This test is put here but could be move outside this function
if connection is not None:
axes.plot(
depth_factor * np_.array(connection[0]),
depth_factor * nmpy.array(connection[0]),
*connection[1:],
colors_c[soma_uid % colors_c.__len__()],
)
......@@ -114,9 +122,9 @@ def PlotConnection(
def PlotExtensions(
extensions: Union[extension_t, Sequence[extension_t]],
shape: Sequence[int],
axes=None,
extensions: Union[extension_t, Sequence[extension_t]],
shape: Sequence[int],
axes=None,
) -> Optional[Any]:
#
depth_factor, depth_limit = __DepthFactorAndLimit__(shape)
......@@ -124,26 +132,25 @@ def PlotExtensions(
if new_axes:
_, axes = __FigAndAxes__(shape, depth_limit)
costs = np_.empty(shape, dtype=np_.float32)
costs = nmpy.empty(shape, dtype=nmpy.float32)
if isinstance(extensions, extension_t):
extensions = (extensions,)
for extension in extensions:
# Remove voxels that can be removed w/o breaking connectivity
costs.fill(np_.inf)
costs.fill(nmpy.inf)
costs[extension.sites] = 1
for src_ep_idx in range(extension.end_points_as_array.shape[1] - 1):
src_point = tuple(extension.end_points_as_array[:, src_ep_idx].tolist())
for tgt_ep_idx in range(
src_ep_idx + 1, extension.end_points_as_array.shape[1]
src_ep_idx + 1, extension.end_points_as_array.shape[1]
):
tgt_point = tuple(extension.end_points_as_array[:, tgt_ep_idx].tolist())
sites, _ = dk_.DijkstraShortestPath(
costs,
src_point,
tgt_point,
limit_to_sphere=False,
constrain_direction=False,
should_constrain_steps=False,
)
sites = tuple(zip(*sites))
......@@ -174,12 +181,12 @@ def PlotSomaWithExtensions(soma: soma_t, soma_lmp: array_t, axes=None) -> Option
PlotLMap(soma_lmp, labels=soma.uid, axes=axes)
for connection_path in filter(
lambda path: path is not None, soma.connection_path.values()
lambda path: path is not None, soma.connection_path.values()
):
PlotConnection(connection_path, soma.uid, shape, axes=axes)
for extension in soma.Extensions():
for connection_path in filter(
lambda path: path is not None, extension.connection_path.values()
lambda path: path is not None, extension.connection_path.values()
):
PlotConnection(connection_path, soma.uid, shape, axes=axes)
PlotExtensions(extension, shape, axes=axes)
......@@ -201,7 +208,7 @@ def __DepthFactorAndLimit__(shape: Sequence[int]) -> Tuple[float, int]:
def __FigAndAxes__(shape: Sequence[int], depth_limit: float) -> Tuple[Any, Any]:
#
fig = pl_.figure()
axes = fig.add_subplot(111, projection=p3_.Axes3D.name)
axes = fig.add_subplot(111, projection=Axes3D.name)
axes.set_xlabel(f"depth: {shape[0]}")
axes.set_ylabel(f"row: {shape[1]}")
......
......@@ -30,39 +30,50 @@
# knowledge of the CeCILL license and that you accept its terms.
import sys as sy_
from PySide2.QtGui import QImage, QPixmap
from PySide2.QtWidgets import *
import skimage.io as io_
import matplotlib as mpl_
import matplotlib.pyplot as pl_
import numpy as np_
import skimage.io as io_
from PySide6.QtWidgets import *
from brick.general.type import array_t
from brick.type.base import array_t
mpl_.use('TkAgg')
channel = 'G'
data_path = '././data/DIO_6H_6_1.70bis_2.2_3.tif'
image = io_.imread(data_path)
mpl_.use("TkAgg")
CHANNEL = "G"
DATA_PATH = "././data/DIO_6H_6_1.70bis_2.2_3.tif"
IMAGE = io_.imread(DATA_PATH)
# image = image[:,:,:,1]
class image_verification(QWidget):
def __init__(self, image: array_t = None, channel: str = None, parent=None):
super(image_verification, self).__init__(parent)
self.channel = channel
self.image = image
# Create widgets
# --Text
self.text0 = QLabel('The image input must be not constant.')
self.text1 = QLabel('The image has only one color channel.')
self.text2 = QLabel(f"The image has only 3 dimensions. However, a value for the 'channel' parameter"
f"is specified: {channel}. Continue with this image?")
self.text3 = QLabel(f"The image has multiple color channels. The channel {channel} is specified"
f" in the parameters. Continue with the resulting image?")
self.text4 = QLabel('The image has multiple color channels. Error in the value of the parameter channel.')
self.text5 = QLabel(f'The image dimensions are not correct: {image.ndim}, instead of 3 or 4.')
self.text0 = QLabel(parent=None, text="The image input must be not constant.")
self.text1 = QLabel(parent=None, text="The image has only one color channel.")
self.text2 = QLabel(
parent=None,
text=f"The image has only 3 dimensions. However, a value for the 'channel' parameter"
f"is specified: {channel}. Continue with this image?",
)
self.text3 = QLabel(
parent=None,
text=f"The image has multiple color channels. The channel {channel} is specified"
f" in the parameters. Continue with the resulting image?",
)
self.text4 = QLabel(
parent=None,
text="The image has multiple color channels. Error in the value of the parameter channel.",
)
self.text5 = QLabel(
parent=None,
text=f"The image dimensions are not correct: {image.ndim}, instead of 3 or 4.",
)
# --Buttons
self.button_quit = QPushButton("Quit and modify the parameters.")
self.button_continue = QPushButton("Continue.")
......@@ -79,7 +90,8 @@ class image_verification(QWidget):
def Continue(self):
self.close()
def Quit(self):
@staticmethod
def Quit():
sy_.exit(0)
def ReturnImage(self):
......@@ -87,7 +99,7 @@ class image_verification(QWidget):
# def PlotImage(self, image: array_t) -> None :
# im = image.copy()
# im = np_.rint(im).astype(np_.uint8)
# im = nmpy.rint(im).astype(nmpy.uint8)
# im = QImage(im, im.shape[1], im.shape[0], im.shape[1] * 3, QImage.Format_RGB888)
# pix = QPixmap.fromImage(im)
# img = QLabel(self)
......@@ -96,7 +108,7 @@ class image_verification(QWidget):
def ImVerif(self) -> array_t:
# The image must not be constant
if self.image.max() == self.image.min():
value_error = 'The image input must be not constant.'
value_error = "The image input must be not constant."
print(value_error)
layout = QVBoxLayout()
......@@ -106,15 +118,17 @@ class image_verification(QWidget):
# Verification of the dimension of the image and its coherence with the parameters (channel)
elif self.image.ndim == 3:
print('The image has only one color channel.')
pl_.imshow(self.image[10, :, :], cmap='gray')
print("The image has only one color channel.")
pl_.imshow(self.image[10, :, :], cmap="gray")
pl_.show(block=True)
text = self.text1
if channel is not None:
value_error = f'The image has only 3 dimensions. However, a value for the "channel" parameter is ' \
f'specified : {channel}. Give the channel the value None? '
if CHANNEL is not None:
value_error = (
f'The image has only 3 dimensions. However, a value for the "channel" parameter is '
f"specified : {CHANNEL}. Give the channel the value None? "
)
print(value_error)
text = self.text2
......@@ -126,20 +140,22 @@ class image_verification(QWidget):
self.setLayout(layout)
elif self.image.ndim == 4:
if channel == 'R' or channel == 'G' or channel == 'B':
if CHANNEL == "R" or CHANNEL == "G" or CHANNEL == "B":
# not changed into if --channel in 'RGB'-- because if channel='RG' => True.
value_error = f'The image has multiple color channels. The channel {channel} is specified in the ' \
f'parameters. '
value_error = (
f"The image has multiple color channels. The channel {CHANNEL} is specified in the "
f"parameters. "
)
print(value_error)
text = self.text3
self.image = self.image[:, :, :, 'RGB'.find(channel)]
pl_.imshow(self.image[10, :, :], cmap='gray')
self.image = self.image[:, :, :, "RGB".find(CHANNEL)]
pl_.imshow(self.image[10, :, :], cmap="gray")
pl_.show(block=True)
# The obtained image must not be constant
if self.image.max() == self.image.min():
value_error = 'The image input must be not constant.'
value_error = "The image input must be not constant."
print(value_error)
text = self.text0
......@@ -153,7 +169,9 @@ class image_verification(QWidget):
return self.image # TODO: How to return the image ??
else:
print('The image has multiple color channels. Error in the value of the parameter channel.')
print(
"The image has multiple color channels. Error in the value of the parameter channel."
)
layout = QVBoxLayout()
layout.addWidget(self.text4)
......@@ -161,7 +179,9 @@ class image_verification(QWidget):
self.setLayout(layout)
elif self.image.ndim != 4 and self.image.ndim != 3:
print(f'The image dimensions are not correct: {self.image.ndim}, instead of 3 or 4.')
print(
f"The image dimensions are not correct: {self.image.ndim}, instead of 3 or 4."
)
layout = QVBoxLayout()
layout.addWidget(self.text5)
......@@ -169,8 +189,8 @@ class image_verification(QWidget):
self.setLayout(layout)
if __name__ == '__main__':
app = QApplication(sy_.argv)
verif = image_verification(image, channel)
if __name__ == "__main__":
app = QApplication()
verif = image_verification(IMAGE, CHANNEL)
verif.show()
sy_.exit(app.exec_())
from __future__ import annotations
import tkinter as tknt
from typing import Any, Callable, Optional, Sequence, Tuple, Union
import numpy as nmpy
from brick.io.screen.type.tk_and_pil import (
ColoredVersion,
LabelColors,
ScaledVersion,
image_record_t,
)
array_t = nmpy.ndarray
class mip_widget_t(tknt.Label):
""""""
attributes = (
"mip_records",
"data_mode", # "continuous", "discrete"
"color_mode", # "gray", "color"
"gray_offset",
"colormap",
"colors",
"static",
"probe_able",
"resizeable",
"mip_axis_wgt",
"selected_mip_axis",
"probe_info_wgt",
"companion_mip",
"parent",
)
mip_records: Sequence[image_record_t]
data_mode: str
color_mode: str
gray_offset: int
colormap: str
colors: Sequence[Union[float, array_t]]
static: bool
probe_able: bool
resizeable: bool
mip_axis_wgt: tknt.Menubutton
selected_mip_axis: tknt.IntVar
probe_info_wgt: Optional[tknt.Widget]
companion_mip: Optional[mip_widget_t]
parent: Union[tknt.Widget, tknt.Tk]
def __init__(
self,
parent: Union[tknt.Widget, tknt.Tk],
/,
):
""""""
super().__init__(parent, borderwidth=0, padx=0, pady=0)
for attribute in mip_widget_t.attributes:
setattr(self, attribute, None)
@classmethod
def NewForImage(
cls,
image: array_t,
data_mode: str,
color_mode: str,
/,
*,
mip_axis: int = 0, # Do not use negative values
with_mip_selector: bool = False,
should_pack_selector: bool = True,
with_companion: mip_widget_t = None,
gray_offset: int = 0,
colormap: str = "viridis",
static: bool = False,
probe_able: bool = True,
resizeable: bool = True,
parent: Union[tknt.Widget, tknt.Tk] = None,
) -> mip_widget_t:
""""""
instance = cls(parent)
# Do not comment out; Used in eval(attribute) below
mip_records, colors = _AllMIPImages(
image,
data_mode,
color_mode,
gray_offset,
colormap,
probe_able,
resizeable,
parent,
)
if static and (with_mip_selector or (with_companion is not None)):
raise ValueError(
"MIP widget with MIP selector or companion cannot be static"
)
if with_mip_selector and (with_companion is not None):
raise ValueError(
"MIP widget cannot have both a MIP selector and a companion"
)
elif with_mip_selector:
if should_pack_selector:
selector_parent = instance
else:
selector_parent = parent
# Do not comment out; Used in eval(attribute)
mip_axis_wgt, selected_mip_axis = _MIPAxisSelectorWidget(
mip_axis,
instance.Update,
image.shape,
selector_parent,
)
elif with_companion is not None:
selected_mip_axis = with_companion.selected_mip_axis
_SetMIPAxisSelectorActions(
selected_mip_axis,
(instance.Update, with_companion.Update),
)
else:
raise ValueError(
"MIP widget must have either a MIP selector or a companion"
)
if with_mip_selector and should_pack_selector:
raise NotImplementedError("Packing MIP selector not currently implemented")
if resizeable:
# Note: Binding cannot be done before super init
instance.bind("<Configure>", instance._OnResize)
# --- Saving required variables as object attributes
for attribute in cls.attributes:
try:
value = eval(attribute)
except NameError:
value = None
if value is not None:
setattr(instance, attribute, value)
instance.Update()
return instance
def UpdateImage(self, image: array_t, /) -> None:
""""""
if self.static:
raise RuntimeError("Requesting the update of a static mip")
self.mip_records, self.colors = _AllMIPImages(
image,
self.data_mode,
self.color_mode,
self.gray_offset,
self.colormap,
self.probe_able,
self.resizeable,
self.parent,
)
self.Update()
def Update(self, *_, **__) -> None:
""""""
if self.selected_mip_axis is None:
return
mip_record = self.mip_records[self.selected_mip_axis.get()]
self.configure(image=mip_record.tk_version)
def AddProbe(
self, probe_info_wgt: tknt.Widget, /, *, for_event: str = "<Motion>"
) -> None:
"""
probe_info_wgt: Must have a text attribute updatable through the configure(text=...) method.
The widget constructor could have been declared with a probe info widget as a parameter. However, to avoid
requiring the probe info widget being created before the MIP widget, this 2-step option was chosen.
"""
if not self.probe_able:
raise ValueError(
'Adding a probe to a MIP widget instantiated with "probe_able" to False'
)
self.probe_info_wgt = probe_info_wgt
self.bind(for_event, self._DisplayInfo)
def _DisplayInfo(self, event: tknt.EventType.Motion, /) -> None:
""""""
value, row, col = self.ValueAndIndicesUnderPointer(event.y, event.x)
self.probe_info_wgt.configure(text=f"Label: {value} @ R={row}xC={col}")
def _OnResize(self, event: tknt.EventType.Configure, /) -> None:
""""""
mip_record = self.mip_records[self.selected_mip_axis.get()]
mip_record.Resize(event.width, event.height)
self.configure(image=mip_record.tk_version)
def ValueAndIndicesUnderPointer(
self, pointer_row: int, pointer_col: int, /
) -> Tuple[Union[int, float], int, int]:
""""""
mip_record = self.mip_records[self.selected_mip_axis.get()]
np_version = mip_record.np_version
shape = np_version.shape
row = int(round(shape[0] * pointer_row / self.winfo_height()))
col = int(round(shape[1] * pointer_col / self.winfo_width()))
try:
value = np_version[row, col]
except IndexError:
# If margins/paddings/borders are all set to zero, this should not happen
value = 0
return value, row, col
def _MIPAxisSelectorWidget(
current_axis: int,
Action: Union[Callable, Sequence[Callable]],
shape: Sequence[int],
parent: Union[tknt.Widget, tknt.Tk],
/,
) -> Tuple[tknt.Menubutton, tknt.IntVar]:
""""""
title = f"MIP Axis [{','.join(str(_lgt) for _lgt in shape)}]"
selector = tknt.Menubutton(parent, text=title, relief="raised")
selected_value = tknt.IntVar()
selected_value.set(current_axis)
menu = tknt.Menu(selector, tearoff=False)
entries = ("First dim", "Second dim", "Third dim")
for idx, entry in enumerate(entries):
menu.add_radiobutton(label=entry, value=idx, variable=selected_value)
menu.invoke(selected_value.get())
# Set action only after calling invoke to avoid redundant call at window creation
_SetMIPAxisSelectorActions(selected_value, Action)
selector["menu"] = menu
return selector, selected_value
def _SetMIPAxisSelectorActions(
mip_selector_variable: tknt.IntVar,
Action: Union[Callable, Sequence[Callable]],
) -> None:
""""""
if isinstance(Action, Callable):
Actions = (Action,)
else:
Actions = Action
def Callback(prm1: str, prm2: str, prm3: str, /) -> Any:
#
for OneAction in Actions:
OneAction(prm1, prm2, prm3)
mip_selector_variable.trace_add("write", Callback)
def _AllMIPImages(
image: array_t,
data_mode: str,
color_mode: str,
gray_offset: int,
colormap: str,
probe_able: bool,
resizeable: bool,
parent: Union[tknt.Widget, tknt.Tk],
/,
) -> Tuple[Sequence[image_record_t], Sequence[Union[float, array_t]]]:
""""""
if data_mode == "continuous":
colors = None
else:
colors = LabelColors(
image, color_mode, gray_offset=gray_offset, colormap=colormap
)
mip_records = [
_MIPImages(
image,
_axs,
colors,
parent,
)
for _axs in range(3)
]
for record in mip_records:
record.Optimize(probe_able, resizeable)
return mip_records, colors
def _MIPImages(
image: array_t,
mip_axis: int,
colors: Optional[Sequence[Union[float, array_t]]],
parent: Union[tknt.Widget, tknt.Tk],
/,
) -> image_record_t:
""""""
mip = nmpy.amax(image, axis=mip_axis)
if colors is None:
min_value = nmpy.amin(mip).item()
max_value = nmpy.amax(mip).item()
display_version = (255.0 / (max_value - min_value)) * (mip - min_value)
display_version = nmpy.around(display_version).astype(nmpy.uint8)
elif isinstance(colors[0], float):
display_version = ScaledVersion(mip, colors)
else:
display_version = ColoredVersion(mip, colors)
output = image_record_t(mip, display_version, parent)
return output
# Copyright CNRS/Inria/UNS
# Contributor(s): Eric Debreuve (since 2019)
#
# eric.debreuve@cnrs.fr
#
# This software is governed by the CeCILL license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/ or redistribute the software under the terms of the CeCILL
# license as circulated by CEA, CNRS and INRIA at the following URL
# "http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards their
# requirements in conditions enabling the security of their systems and/or
# data to be ensured and, more generally, to use and operate it in the
# same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license and that you accept its terms.
from __future__ import annotations
import tkinter as tknt
from typing import Optional, Sequence
import numpy as nmpy
import skimage.segmentation as sisg
from brick.io.screen.type.mip_widget import mip_widget_t
from brick.io.screen.type.three_d_widget import three_d_widget_t
array_t = nmpy.ndarray
STATIC_ROW_MIN_HEIGHT = 30
INITIAL_RELATIVE_ISOVALUE = 0.6
class soma_validation_window_t:
""""""
__slots__ = (
"gfp",
"lmap",
"main_window",
"three_d_selector",
"gfp_wgt",
"gfp_3d_wgt",
"lmap_wgt",
"lmap_3d_wgt",
"cursor_nfo",
"invisible_widgets",
)
attributes = __slots__
gfp: array_t
lmap: array_t
main_window: tknt.Tk
three_d_selector: tknt.Checkbutton
gfp_wgt: mip_widget_t
gfp_3d_wgt: Optional[three_d_widget_t]
lmap_wgt: mip_widget_t
lmap_3d_wgt: Optional[three_d_widget_t]
cursor_nfo: tknt.Label
invisible_widgets: Sequence
def __init__(
self,
gfp: array_t,
lmap: array_t,
/,
*,
mip_axis: int = -1,
gray_offset: int = 0,
colormap: str = "viridis",
):
"""
with_cm: "plasma" and "viridis" seem to be good options
"""
exec(f"{'='.join(soma_validation_window_t.attributes)} = None")
main_window = tknt.Tk()
# ---- Creation of widgets
if mip_axis < 0:
mip_axis = gfp.ndim + mip_axis
gfp_wgt = mip_widget_t.NewForImage(
gfp,
"continuous",
"gray",
mip_axis=mip_axis,
with_mip_selector=True,
should_pack_selector=False,
gray_offset=gray_offset,
probe_able=False,
parent=main_window,
)
lmap_wgt = mip_widget_t.NewForImage(
lmap,
"discrete",
"color",
mip_axis=mip_axis,
with_companion=gfp_wgt,
colormap=colormap,
parent=main_window,
)
state_variable = tknt.IntVar()
Toggle3D = lambda *args, **kwargs: self._Toggle3D(state_variable)
three_d_selector = tknt.Checkbutton(
main_window, text="3D View", variable=state_variable, command=Toggle3D
)
cursor_nfo = tknt.Label(main_window, text="")
done_button = tknt.Button(main_window, text="Done", command=main_window.quit)
# --- Event management
lmap_wgt.AddProbe(cursor_nfo)
lmap_wgt.bind("<Button-1>", self._DeleteSoma)
# --- Widget placement in grid
next_available_row = 0
gfp_wgt.mip_axis_wgt.grid(row=next_available_row, column=0)
three_d_selector.grid(row=next_available_row, column=1)
next_available_row += 1
# sticky=... solves the super-slow-to-resize issue!
gfp_wgt.grid(
row=next_available_row, column=0, sticky=tknt.W + tknt.E + tknt.N + tknt.S
)
lmap_wgt.grid(
row=next_available_row, column=1, sticky=tknt.W + tknt.E + tknt.N + tknt.S
)
# Leave 2 rows free for isovalue slider and smoothing width slider
next_available_row += 3
cursor_nfo.grid(row=next_available_row, column=0)
done_button.grid(row=next_available_row, column=1)
next_available_row += 1
# --- Window resize management
n_grid_cols, n_grid_rows = main_window.grid_size()
for row in range(n_grid_rows):
if row == 1:
main_window.rowconfigure(1, weight=10)
else:
main_window.rowconfigure(row, weight=1, minsize=STATIC_ROW_MIN_HEIGHT)
for col in range(n_grid_cols):
main_window.columnconfigure(col, weight=1)
# --- Saving required variables as object attributes
for attribute in soma_validation_window_t.attributes:
setattr(self, attribute, eval(attribute))
self.invisible_widgets = ()
def LaunchValidation(self) -> tuple[array_t, int]:
""""""
self.main_window.mainloop()
self.main_window.destroy()
relabeled, *_ = sisg.relabel_sequential(self.lmap)
return relabeled, nmpy.amax(relabeled).item()
def _DeleteSoma(self, event: tknt.EventType.ButtonPress, /) -> None:
""""""
label, *_ = self.lmap_wgt.ValueAndIndicesUnderPointer(event.y, event.x)
if label > 0:
lmap = self.lmap
where_label = lmap == label
if nmpy.all(nmpy.logical_or(lmap == 0, where_label)):
return
lmap[where_label] = 0
self.lmap_wgt.UpdateImage(lmap)
if self.lmap_3d_wgt is not None:
self.lmap_3d_wgt.UpdateImage(lmap, colors=self.lmap_wgt.colors)
def _Toggle3D(self, state: tknt.IntVar, /) -> None:
""""""
three_d_state = state.get()
if three_d_state == 0:
soon_invisible_widgets = (
self.gfp_3d_wgt,
self.lmap_3d_wgt,
self.gfp_3d_wgt.isolevel_wgt,
self.gfp_3d_wgt.smoothing_width_wgt,
)
else:
soon_invisible_widgets = (
self.gfp_wgt,
self.lmap_wgt,
self.gfp_wgt.mip_axis_wgt,
)
for widget in soon_invisible_widgets:
widget.grid_remove()
if self.invisible_widgets.__len__() > 0:
for widget in self.invisible_widgets:
widget.grid()
else:
mip_axis = self.gfp_wgt.selected_mip_axis.get()
gfp_3d_wgt = three_d_widget_t.NewForImage(
self.gfp,
"continuous",
None,
self.main_window,
mip_axis=mip_axis,
should_pack_slider=False,
initial_relative_isovalue=INITIAL_RELATIVE_ISOVALUE,
)
lmap_3d_wgt = three_d_widget_t.NewForImage(
self.lmap,
"discrete",
self.lmap_wgt.colors,
self.main_window,
mip_axis=mip_axis,
)
gfp_3d_wgt.AddCompanionWidget(lmap_3d_wgt)
lmap_3d_wgt.AddCompanionWidget(gfp_3d_wgt)
gfp_3d_wgt.grid(row=1, column=0, sticky=tknt.W + tknt.E + tknt.N + tknt.S)
lmap_3d_wgt.grid(row=1, column=1, sticky=tknt.W + tknt.E + tknt.N + tknt.S)
gfp_3d_wgt.isolevel_wgt.grid(
row=2, column=0, sticky=tknt.W + tknt.E + tknt.N + tknt.S
)
gfp_3d_wgt.smoothing_width_wgt.grid(
row=3, column=0, sticky=tknt.W + tknt.E + tknt.N + tknt.S
)
self.gfp_3d_wgt = gfp_3d_wgt
self.lmap_3d_wgt = lmap_3d_wgt
self.invisible_widgets = soon_invisible_widgets
if three_d_state == 0:
self.cursor_nfo.configure(text="")
else:
self.cursor_nfo.configure(text="No MIP info in 3-D mode")
from typing import Optional, Tuple
import numpy as nmpy
from scipy import ndimage as spim
from skimage import filters as fltr
array_t = nmpy.ndarray
class image_record_t:
""""""
__slots__ = (
"original",
"resized",
"smoothed",
"mode", # "continuous", "discrete"
"zoom_factors",
"smoothing_width",
)
attributes = __slots__
original: array_t
resized: array_t
smoothed: array_t
mode: str
zoom_factors: Tuple[float, float, float]
smoothing_width: Optional[float]
IMAGE_DTYPE = nmpy.float32
MAX_LENGTH = 500
MIN_LENGTH_RATIO = 0.25
INITIAL_SMOOTHING_WIDTH = 1.0
defaults = {}
def __init__(
self,
image: array_t,
mode: str,
/,
):
""""""
exec(f"{'='.join(image_record_t.attributes)} = None")
for attribute in image_record_t.attributes:
if attribute in image_record_t.defaults:
setattr(self, attribute, image_record_t.defaults[attribute])
else:
setattr(self, attribute, eval(attribute))
self.Update(image, image_record_t.INITIAL_SMOOTHING_WIDTH)
def Update(self, image: array_t, smoothing_width: float, /) -> None:
""""""
self.smoothing_width = None # Ensures that new image will be smoothed even if smoothing width is the same
self.original = _Retyped(image)
self.SetResized()
self.SetSmoothed(smoothing_width)
def SetResized(self) -> None:
""""""
shape = self.original.shape
min_length = min(shape)
max_length = max(shape)
zoom_factors = (1.0, 1.0, 1.0)
if min_length < image_record_t.MIN_LENGTH_RATIO * max_length:
smallest_axis = shape.index(min_length)
zoom_factors = (
zoom_factors[:smallest_axis]
+ (image_record_t.MIN_LENGTH_RATIO * max_length / min_length,)
+ zoom_factors[(smallest_axis + 1) :]
)
if max_length > image_record_t.MAX_LENGTH:
scaling = image_record_t.MAX_LENGTH / max_length
zoom_factors = tuple(scaling * _zmf for _zmf in zoom_factors)
self.zoom_factors = zoom_factors
if any(_zmf != 1.0 for _zmf in zoom_factors):
if self.mode == "continuous":
order = 3
else:
order = 0
resized = spim.zoom(self.original, zoom_factors, order=order)
self.resized = _Retyped(resized)
else:
self.resized = self.original
def SetSmoothed(self, smoothing_width: float, /) -> None:
""""""
if self.mode == "continuous":
if smoothing_width != self.smoothing_width:
smoothed = fltr.gaussian(self.resized, smoothing_width)
self.smoothed = _Retyped(smoothed)
self.smoothing_width = smoothing_width
else:
self.smoothed = self.resized
def _Retyped(image: array_t) -> array_t:
""""""
return image.astype(image_record_t.IMAGE_DTYPE, copy=False)
from __future__ import annotations
import tkinter as tknt
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import numpy as nmpy
from matplotlib import pyplot as pypl
from matplotlib import ticker as tckr
from matplotlib.backends._backend_tk import NavigationToolbar2Tk as toolbar_widget_t
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg as matplotlib_widget_t
from matplotlib.figure import Figure as figure_t
from mpl_toolkits.mplot3d import Axes3D as axes_3d_t
from skimage import measure as sims
from brick.io.screen.type.three_d_image import image_record_t
array_t = nmpy.ndarray
isolevels_t = Sequence[float]
STANDARD_VIEW_POINTS = (
{"elev": 10.0, "azim": 180.0},
{"elev": 10.0, "azim": -90.0},
{"elev": -90.0, "azim": -90.0},
)
DEFAULT_LABEL_STYLE = dict(boxstyle="round,pad=0.1", ec="k", lw=1)
class three_d_widget_t(tknt.Frame):
""""""
attributes = (
"image_record",
"isolevels",
"lower_isovalue_margin",
"upper_isovalue_margin",
"labels",
"colors",
"figure",
"axes",
"isolines",
"isosurfaces",
"linked_labels",
"annotation",
"companion_wgt",
"live_synchronization",
"motion_event_id",
"button_release_id",
"plot_wgt",
"toolbar",
"isolevel_wgt",
"selected_isolevel",
"smoothing_width_wgt",
"selected_smoothing_width",
)
image_record: image_record_t
isolevels: isolevels_t
lower_isovalue_margin: float
upper_isovalue_margin: float
labels: Tuple[int, ...]
colors: Sequence[array_t]
figure: pypl.Figure
axes: pypl.Axes
isolines: Sequence[Any]
isosurfaces: List[Any] # pypl.Axes.art3d.Poly3DCollection
linked_labels: List[Tuple[pypl.Axes.text, pypl.Axes.line3D]]
annotation: pypl.Axes.text
companion_wgt: three_d_widget_t
live_synchronization: bool
motion_event_id: int
button_release_id: int
plot_wgt: matplotlib_widget_t
toolbar: toolbar_widget_t
isolevel_wgt: tknt.Scale
selected_isolevel: tknt.DoubleVar
smoothing_width_wgt: tknt.Scale
selected_smoothing_width: tknt.DoubleVar
defaults = {
"isolines": [],
"isosurfaces": [],
"linked_labels": [],
"live_synchronization": False,
}
def __init__(
self,
parent: Union[tknt.Widget, tknt.Tk],
/,
*args,
**kwargs,
):
""""""
super().__init__(
parent,
*args,
borderwidth=0,
padx=0,
pady=0,
**kwargs,
class_=self.__class__.__name__,
)
for attribute in three_d_widget_t.attributes:
if attribute in three_d_widget_t.defaults:
setattr(self, attribute, three_d_widget_t.defaults[attribute])
else:
setattr(self, attribute, None)
@classmethod
def NewForImage(
cls,
image: array_t,
mode: str,
colors: Optional[Sequence[Union[float, array_t]]],
parent: Union[tknt.Widget, tknt.Tk],
/,
*args,
mip_axis: int = 0,
should_pack_slider: bool = True,
initial_relative_isovalue: float = 0.5,
lower_isovalue_margin: float = 0.1,
upper_isovalue_margin: float = 0.1,
**kwargs,
) -> three_d_widget_t:
""""""
instance = cls(parent, *args, **kwargs)
image_record = image_record_t(image, mode)
# Do not comment out; They are used in eval(attribute) below
if mode == "continuous":
colors = None
else:
isolevels, labels = _DecreasingIsolevelsAndLabels(image)
figure, axes = _FigureAndAxes(image_record.resized.shape, image.shape, mip_axis)
figure.canvas.mpl_connect("key_press_event", instance._OnKeyPress)
figure.canvas.mpl_connect("scroll_event", instance._OnWheelScroll)
plot_wgt = matplotlib_widget_t(figure, master=instance)
toolbar = toolbar_widget_t(plot_wgt, instance, pack_toolbar=False)
plot_wgt.draw()
toolbar.update()
if mode == "continuous":
if should_pack_slider:
slider_parent = instance
else:
slider_parent = parent
isolevel_wgt, selected_isolevel = _IsolevelSelectorWidget(
image,
initial_relative_isovalue,
lower_isovalue_margin,
upper_isovalue_margin,
instance.Update,
slider_parent,
)
(
smoothing_width_wgt,
selected_smoothing_width,
) = _SmoothingWidthSelectorWidget(
image_record.smoothing_width,
instance.UpdateSmoothing,
slider_parent,
)
else:
isolevel_wgt = smoothing_width_wgt = None # selected_smoothing_width
plot_wgt.get_tk_widget().pack(fill=tknt.BOTH, expand=True)
toolbar.pack(side=tknt.BOTTOM, fill=tknt.X)
if (mode == "continuous") and should_pack_slider:
isolevel_wgt.pack(side=tknt.BOTTOM, fill=tknt.X)
smoothing_width_wgt.pack(side=tknt.BOTTOM, fill=tknt.X)
# --- Saving required variables as object attributes
for attribute in cls.attributes:
try:
value = eval(attribute)
except NameError:
value = None
if value is not None:
setattr(instance, attribute, value)
instance.Update()
return instance
def UpdateImage(
self,
image: array_t,
/,
*,
colors: Sequence[Union[float, array_t]] = None,
) -> None:
""""""
self.image_record.Update(image, self.selected_smoothing_width.get())
self.colors = colors
if self.isolevel_wgt is not None:
min_bound, max_bound, level_extent = _IsolevelBounds(
image, self.lower_isovalue_margin, self.upper_isovalue_margin
)
self.isolevel_wgt.configure(
from_=min_bound,
to=max_bound,
resolution=level_extent / 100.0,
tickinterval=level_extent / 10.0,
)
isolevel = self.selected_isolevel.get()
isolevel = max(min(isolevel, max_bound), min_bound)
self.isolevel_wgt.set(isolevel)
else:
self.isolevels, self.labels = _DecreasingIsolevelsAndLabels(
self.image_record.original
)
self.Update()
def UpdateSmoothing(self, *_, **__) -> None:
""""""
self.image_record.SetSmoothed(self.selected_smoothing_width.get())
self.Update()
def Update(self, *_, **__) -> None:
""""""
if self.image_record.mode == "discrete":
self._ClearMIPProjection()
self._PlotMIPProjections()
self._ClearIsosurfaces()
self._PlotIsosurfaces()
self.figure.canvas.draw_idle()
def _ClearMIPProjection(self) -> None:
""""""
for isolines_per_pane in self.isolines:
for line in isolines_per_pane:
line.remove()
self.isolines = []
def _PlotMIPProjections(self) -> None:
""""""
isolines = []
mins, maxs, highs = _get_coord_info(self.axes)
for axis, (limit_min, limit_max, high) in enumerate(zip(mins, maxs, highs)):
isolines_per_pane = []
projection = nmpy.amax(self.image_record.smoothed, axis=axis)
if axis == 1:
first_2d_dim = 1
else:
first_2d_dim = 0
if high:
# self.image_for_isosurface.shape[axis] does not account for margin
coordinate = limit_max
else:
# 0 does not account for margin
coordinate = limit_min
for level, color in zip(self.isolevels, reversed(self.colors)):
contours = sims.find_contours(projection, level)
projection[projection >= level] = 0
for contour in contours:
polygons = (
nmpy.full(contour.shape[0], coordinate),
contour[:, first_2d_dim],
contour[:, (first_2d_dim + 1) % 2],
)
polygons = polygons[(-axis):] + polygons[:(-axis)]
lines = self.axes.plot(*polygons, linewidth=1, color=color)
isolines_per_pane.extend(lines)
isolines.append(isolines_per_pane)
self.isolines = isolines
def _AdjustMIPProjection(self) -> None:
""""""
mins, maxs, highs = _get_coord_info(self.axes)
for axis, (limit_min, limit_max, high) in enumerate(zip(mins, maxs, highs)):
if high:
# self.image_for_isosurface.shape[axis] does not account for margin
expected_coordinate = limit_max
else:
# 0 does not account for margin
expected_coordinate = limit_min
isolines_per_pane = self.isolines[axis]
points = isolines_per_pane[0].get_data_3d()
if points[axis][0] != expected_coordinate:
for line in isolines_per_pane:
points = line.get_data_3d()
n_points = points[0].__len__()
points = (
points[:axis]
+ (n_points * (expected_coordinate,),)
+ points[(axis + 1) :]
)
line.set_data_3d(points)
def _ClearIsosurfaces(self) -> None:
""""""
for isosurface in self.isosurfaces:
isosurface.remove()
self.isosurfaces = []
self._ClearLabels()
self._ClearAnnotations()
def _PlotIsosurfaces(self) -> None:
""""""
if self.image_record.mode == "continuous":
isolevels = (self.isolevel_wgt.get(),)
labels = (1,)
colors = (nmpy.array((0.0, 0.8, 0.0)),)
else:
isolevels = self.isolevels
labels = self.labels
colors = reversed(self.colors)
image = self.image_record.smoothed.copy()
n_triangles = 0
bbox_style = DEFAULT_LABEL_STYLE.copy()
for label, level, color in zip(labels, isolevels, colors):
image_min = nmpy.amin(image).item()
image_max = nmpy.amax(image).item()
if (level < image_min) or (level > image_max):
print(
f"{level}: Isolevel outside image range [{image_min},{image_max}]; Discarding"
)
continue
try:
vertices, triangles, *_ = sims.marching_cubes(
image, level=level, step_size=4
)
except RuntimeError as exception:
print(exception.args[0] + " Discarding")
continue
n_triangles += triangles.shape[0]
image[image >= level] = 0
isosurface = self.axes.plot_trisurf(
vertices[:, 0],
vertices[:, 1],
triangles,
vertices[:, 2],
color=color,
lw=1,
)
self.isosurfaces.append(isosurface)
self._PlotLabel(label, color, bbox_style, vertices)
self._PlotAnnotations(n_triangles)
def _ClearLabels(self) -> None:
""""""
for label, link in self.linked_labels:
label.remove()
link.remove()
self.linked_labels = []
def _PlotLabel(
self,
label: int,
color: array_t,
bbox_style: Dict[str, Any],
vertices: array_t,
/,
) -> None:
""""""
if self.image_record.mode == "discrete":
center = nmpy.mean(vertices, axis=0, keepdims=True)
rays = vertices - center
radii = nmpy.linalg.norm(rays, axis=1)
farthest = nmpy.argmax(radii)
source = vertices[farthest, :]
target = (
source
+ (0.035 * min(self.image_record.resized.shape) / radii[farthest])
* rays[farthest, :]
)
if nmpy.mean(color) >= 0.5:
bk_color = 0.5 * color
else:
bk_color = nmpy.fmin(3.0 * color, 3 * (1.0,))
bbox_style["fc"] = bk_color
label = self.axes.text(
*target,
label,
fontsize="small",
fontweight="bold",
color=color,
bbox=bbox_style,
)
link = self.axes.plot(*zip(source, target), color=color)[0]
self.linked_labels.append((label, link))
def _ToggleLabels(self) -> None:
""""""
for label, link in self.linked_labels:
label.set_visible(not label.get_visible())
link.set_visible(not link.get_visible())
def _ClearAnnotations(self) -> None:
""""""
if self.annotation is not None:
self.annotation.remove()
self.annotation = None
def _PlotAnnotations(self, n_triangles: int, /) -> None:
""""""
zoom_factors = (f"{_zmf:.2f}" for _zmf in self.image_record.zoom_factors)
self.annotation = self.axes.text2D(
0.0,
1.0,
f"Zoom: {' '.join(zoom_factors)}\n" f"Triangles: {n_triangles}",
fontsize="small",
transform=self.axes.transAxes,
color=3 * (0.75,),
)
def AddCompanionWidget(
self, widget: three_d_widget_t, /, *, live_synchronization: bool = False
) -> None:
""""""
self.companion_wgt = widget
self.live_synchronization = live_synchronization
_ = self.figure.canvas.mpl_connect("button_press_event", self._OnButtonPress)
def _SynchronizeCompanionView(self) -> None:
""""""
source = self.axes
companion = self.companion_wgt
target = companion.axes
UpdateCompanion = companion.figure.canvas.draw_idle
target.view_init(elev=source.elev, azim=source.azim)
target.dist = source.dist
if companion.isolines.__len__() > 0:
companion._AdjustMIPProjection()
UpdateCompanion()
def _OnKeyPress(self, event, /) -> None:
""""""
elevation = self.axes.elev
azimuth = self.axes.azim
distance = self.axes.dist
if event.key == "up":
elevation -= 5.0
elif event.key == "down":
elevation += 5.0
elif event.key == "left":
azimuth += 5.0
elif event.key == "right":
azimuth -= 5.0
elif event.key == "+":
distance -= 1.0
elif event.key == "-":
distance += 1.0
elif event.key == "l":
self._ToggleLabels()
else:
return
self.axes.view_init(elev=elevation, azim=azimuth)
self.axes.dist = distance
self._ApplyMotion(True)
def _OnButtonPress(self, _, /) -> None:
""""""
self.motion_event_id = self.figure.canvas.mpl_connect(
"motion_notify_event", self._OnMotion
)
self.button_release_id = self.figure.canvas.mpl_connect(
"button_release_event", self._OnButtonRelease
)
def _OnButtonRelease(self, _, /) -> None:
""""""
self.figure.canvas.mpl_disconnect(self.motion_event_id)
self.figure.canvas.mpl_disconnect(self.button_release_id)
if not self.live_synchronization:
self._SynchronizeCompanionView()
def _OnMotion(self, event, /) -> None:
""""""
if event.inaxes == self.axes:
self._ApplyMotion(self.live_synchronization)
def _OnWheelScroll(self, event, /) -> None:
""""""
self.axes.dist += event.step
self._ApplyMotion(True)
def _ApplyMotion(self, live_synchronization: bool) -> None:
""""""
if self.isolines.__len__() > 0:
self._AdjustMIPProjection()
self.figure.canvas.draw_idle()
if live_synchronization:
self._SynchronizeCompanionView()
def _IsolevelSelectorWidget(
image: array_t,
initial_relative_isovalue: float,
lower_isovalue_margin: float,
upper_isovalue_margin: float,
Action: Callable,
parent: Union[tknt.Widget, tknt.Tk],
) -> Tuple[tknt.Scale, tknt.DoubleVar]:
""""""
min_bound, max_bound, level_extent = _IsolevelBounds(
image, lower_isovalue_margin, upper_isovalue_margin
)
image_max = nmpy.amax(image).item()
selected_value = tknt.DoubleVar()
selected_value.set(initial_relative_isovalue * image_max)
selector = tknt.Scale(
parent,
orient="horizontal",
from_=min_bound,
to=max_bound,
resolution=level_extent / 100.0,
tickinterval=level_extent / 10.0,
variable=selected_value,
label="Isovalue",
takefocus=0,
)
# selected_value.trace_add("write", Action)
selector.bind("<ButtonRelease-1>", Action)
return selector, selected_value
def _IsolevelBounds(
image: array_t,
lower_isovalue_margin: float,
upper_isovalue_margin: float,
) -> Sequence[float]:
""""""
image_min = nmpy.amin(image).item()
image_max = nmpy.amax(image).item()
image_extent = image_max - image_min
level_extent = (1.0 - lower_isovalue_margin - upper_isovalue_margin) * image_extent
min_bound = image_min + lower_isovalue_margin * image_extent
max_bound = image_max - upper_isovalue_margin * image_extent
return min_bound, max_bound, level_extent
def _SmoothingWidthSelectorWidget(
initial_smoothing_width: float,
Action: Callable,
parent: Union[tknt.Widget, tknt.Tk],
) -> Tuple[tknt.Scale, tknt.DoubleVar]:
""""""
selected_value = tknt.DoubleVar()
selected_value.set(initial_smoothing_width)
selector = tknt.Scale(
parent,
orient="horizontal",
from_=0.0,
to=5.0,
resolution=5.0 / 100.0,
tickinterval=5.0 / 10.0,
variable=selected_value,
label="Smoothing Width",
takefocus=0,
)
# selected_value.trace_add("write", Action)
selector.bind("<ButtonRelease-1>", Action)
return selector, selected_value
def _DecreasingIsolevelsAndLabels(
image: array_t,
) -> Tuple[isolevels_t, Tuple[int, ...]]:
""""""
labels = nmpy.unique(image)
levels = labels
levels = 0.5 * (levels[:-1] + levels[1:])
levels = nmpy.flip(levels)
return levels, tuple(reversed(labels.astype(nmpy.uint64, copy=False)))
def _FigureAndAxes(
shape: Tuple[int, ...], actual_shape: Tuple[int, ...], mip_axis: int
) -> Tuple[pypl.Figure, pypl.Axes]:
""""""
figure = figure_t()
axes = figure.add_subplot(111, projection=axes_3d_t.name)
# (("left", "right"), ("bottom", "top"), ("bottom", "top"))
for axis, limit_p_1, actual_limit in zip(("x", "y", "z"), shape, actual_shape):
SetLimits = getattr(axes, f"set_{axis}lim3d")
arguments = {f"{axis}max": 0, f"{axis}min": limit_p_1}
SetLimits(**arguments)
mtpl_axis = getattr(axes, f"{axis}axis")
formatter = lambda _vle, _pos: str(int(round(actual_limit * _vle / limit_p_1)))
formatter = tckr.FuncFormatter(formatter)
mtpl_axis.set_major_formatter(formatter)
axes.invert_xaxis()
axes.invert_zaxis()
axes.set_box_aspect(shape)
axes.view_init(**STANDARD_VIEW_POINTS[mip_axis])
axes.set_xlabel("First")
axes.set_ylabel("Second")
axes.set_zlabel("Third")
return figure, axes
_PLANES = (
(0, 3, 7, 4),
(1, 2, 6, 5), # yz planes
(0, 1, 5, 4),
(3, 2, 6, 7), # xz planes
(0, 1, 2, 3),
(4, 5, 6, 7), # xy planes
)
def _get_coord_info(axes: pypl.Axes) -> Tuple[array_t, array_t, array_t]:
"""
https://github.com/matplotlib/matplotlib/blob/master/lib/mpl_toolkits/mplot3d/axis3d.py
"""
mins, maxs = nmpy.array(
[
axes.get_xbound(),
axes.get_ybound(),
axes.get_zbound(),
]
).T
# centers = (maxs + mins) / 2.0
deltas = (maxs - mins) / 12.0
mins = mins - deltas / 4.0
maxs = maxs + deltas / 4.0
vals = mins[0], maxs[0], mins[1], maxs[1], mins[2], maxs[2]
tc = axes.tunit_cube(vals, axes.M)
avgz = [tc[p1][2] + tc[p2][2] + tc[p3][2] + tc[p4][2] for p1, p2, p3, p4 in _PLANES]
highs = nmpy.array([avgz[2 * i] < avgz[2 * i + 1] for i in range(3)])
return mins, maxs, highs
# coords = tuple(tuple(range(_lgt)) for _lgt in shape)
# new_length = int(round(MIN_LENGTH_RATIO * max_length))
# new_coords = coords[:smallest_axis] + (tuple(range(new_length)),) + coords[(smallest_axis+1):]
#
# image = ntpl.interpn(coords, image, point)
import tkinter as tknt
from typing import Optional, Sequence, Tuple, Union
import numpy as nmpy
from matplotlib import cm as clmp
from PIL import Image as plim
from PIL import ImageTk as pltk
array_t = nmpy.ndarray
pil_image_t = plim.Image
tk_image_t = pltk.PhotoImage
class image_record_t:
""""""
__slots__ = (
"np_version",
"pil_version_original",
"pil_version",
"tk_version",
"parent",
)
attributes = __slots__
np_version: Optional[array_t]
pil_version_original: Optional[pil_image_t]
pil_version: pil_image_t
tk_version: tk_image_t
parent: Union[tknt.Widget, tknt.Tk]
def __init__(
self,
np_version: array_t,
display_version: array_t,
parent: Union[tknt.Widget, tknt.Tk],
/,
):
""""""
# Do not comment out; They are used in eval(attribute) below
tk_version, pil_version = TkAndPILImagesFromNumpyArray(display_version, parent)
pil_version_original = pil_version
# --- Saving required variables as object attributes
for attribute in image_record_t.attributes:
setattr(self, attribute, eval(attribute))
def Optimize(
self,
probe_able: bool,
resizeable: bool,
/,
) -> None:
""""""
if not probe_able:
self.np_version = None
if not resizeable:
self.pil_version_original = None
def Resize(self, width: int, height: int, /) -> None:
""""""
self.pil_version = self.pil_version_original.resize((width, height))
self.tk_version = pltk.PhotoImage(master=self.parent, image=self.pil_version)
def LabelColors(
image: array_t, mode: str, /, *, gray_offset: int = 0, colormap: str = "viridis"
) -> Sequence[Union[float, array_t]]:
"""
mode: "gray", "color"
gray_offset: Value of darkest non-background intensity
"""
# Keep max instead of unique.size to maintain colors in case of deletion
n_labels = int(nmpy.amax(image))
if mode == "gray":
output = nmpy.linspace(max(gray_offset, 1) / 255.0, 1.0, num=n_labels)
elif mode == "color":
LinearValueToRGB = clmp.get_cmap(colormap)
if n_labels > 1:
output = tuple(
nmpy.array(LinearValueToRGB((_lbl - 1.0) / (n_labels - 1.0))[:3])
for _lbl in range(1, n_labels + 1)
)
else:
output = (nmpy.array(LinearValueToRGB(1.0)[:3]),)
else:
raise ValueError(f'{mode}: Invalid mode; Expected="gray" or "color"')
return output
def ScaledVersion(image: array_t, gray_levels: Sequence[float], /) -> array_t:
""""""
output = nmpy.zeros(image.shape, dtype=nmpy.uint8)
for label, gray_level in enumerate(gray_levels, start=1):
output[image == label] = round(255.0 * gray_level)
return output
def ColoredVersion(image: array_t, colors: Sequence[array_t], /) -> array_t:
""""""
output = nmpy.zeros(image.shape + (3,), dtype=nmpy.uint8)
for label, color in enumerate(colors, start=1):
output[image == label] = nmpy.around(255.0 * color)
return output
def TkAndPILImagesFromNumpyArray(
array: array_t, parent: Union[tknt.Widget, tknt.Tk], /
) -> Tuple[tk_image_t, pil_image_t]:
""""""
pil_image = plim.fromarray(array)
tk_image = pltk.PhotoImage(master=parent, image=pil_image)
return tk_image, pil_image
......@@ -29,95 +29,106 @@
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license and that you accept its terms.
import ast as asgt
import configparser
# --Parameters from .INI file
parameters = configparser.ConfigParser(allow_no_value=True)
parameters.read('parameters.ini')
def IfNotFloat(section: str, key: str) -> float:
try:
value = parameters.getfloat(section, key)
return value
except:
return eval(parameters.get(section, key))
# [Input]
data_path = parameters['Input']['data_path']
channel = parameters['Input']['channel']
try:
size_voxel_in_micron = parameters['Input']['size_voxel_in_micron']
if size_voxel_in_micron is not None:
size_voxel_in_micron = size_voxel_in_micron.split(',')
size_voxel_in_micron = list(float(size_voxel_in_micron[i]) for i in range(len(size_voxel_in_micron)))
except:
raise ValueError('The voxel size parameter is not given correctly. Write X,Y,Z without () or [] or spaces. Do not '
'forget ":" between size_voxel_in_micron and its value')
# [Somas]
soma_low_c = IfNotFloat('Somas', 'soma_low_c')
soma_high_c = IfNotFloat('Somas', 'soma_high_c')
soma_selem_micron_c = IfNotFloat('Somas', 'soma_selem_micron_c')
soma_min_area_c = IfNotFloat('Somas', 'soma_min_area_c')
# [Extensions]
ext_low_c = IfNotFloat('Extensions', 'ext_low_c')
ext_high_c = IfNotFloat('Extensions', 'ext_high_c')
ext_selem_micron_c = IfNotFloat('Extensions', 'ext_selem_micron_c')
ext_min_area_c = IfNotFloat('Extensions', 'ext_min_area_c')
# [Connexions]
max_straight_sq_dist_c = IfNotFloat('Connexions', 'max_straight_sq_dist_c')
max_weighted_length_c = IfNotFloat('Connexions', 'max_weighted_length_c')
# [Frangi]
try:
scale_range = parameters['Frangi']['scale_range']
scale_range = scale_range.split(',')
scale_range = tuple(float(scale_range[i]) for i in range(len(scale_range)))
except:
raise ValueError('The scale range parameter is not given correctly. Write min,max without () or [] or spaces. '
'Do not forget ":" between scale_range and its value')
scale_step = IfNotFloat('Frangi', 'scale_step')
alpha = IfNotFloat('Frangi', 'alpha')
beta = IfNotFloat('Frangi', 'beta')
frangi_c = IfNotFloat('Frangi', 'frangi_c')
bright_on_dark = parameters.getboolean('Frangi', 'bright_on_dark')
method = parameters['Frangi']['method']
# [Program running]
with_plot = parameters.getboolean('Program Running', 'with_plot')
in_parallel = parameters.getboolean('Program Running', 'in_parallel')
# data_path = "./data/DIO_6H_6_1.70bis_2.2_3.tif"
# channel = 'G'
# size_voxel_in_micron = None # -> list [X,Y,Z]
# with_plot = False
#
# soma_low_c = 0.15
# soma_high_c = 0.7126
# ext_low_c = 0.2 #3 #1e-7 ##0.2 # 0.02 # 0.2 # ext_low_c = 9.0e-4
# ext_high_c = 0.6 #1e-10 #1.2e-7 ##0.6 # 0.04 # 0.6 # high_ext = 8.0e-3
#
# # soma_selem_c = mp_.disk(2)
# # ext_selem_c = mp_.disk(1)
# soma_selem_micron_c = 0.24050024 * 2
# ext_selem_micron_c = 0.24050024 * 1
#
# max_straight_sq_dist_c = (30 ** 2) * 0.24050024
# max_weighted_length_c = 20.0 * 0.24050024
#
# soma_min_area_c = 1000 * (0.24050024 ** 2)
# ext_min_area_c = 100 * (0.24050024 ** 2)
# # soma_min_area_c = 1000
# # ext_min_area_c = 100
#
# in_parallel = False
import pprint as pprt
from configparser import DEFAULTSECT as DEFAULT_SECTION
from pathlib import Path as path_t
from typing import Any
from logger_36 import LOGGER
from logger_36.handler import AddFileHandler
from logger_36.system import LogSystemDetails
from logger_36.time import TimeStamp
_BOOLEANS = (
("Connection", "dilatation_erosion"),
("Extension", "adapt_hist_equalization"),
("Frangi", "bright_on_dark"),
("Output", "statistical_analysis"),
("Workflow", "in_parallel"),
("Workflow", "interactive"),
("Workflow", "with_plot"),
)
_INTEGERS = (
("Feature", "number_of_bins_curvature"),
("Feature", "number_of_bins_length"),
)
_FLOATS = (
("Connection", "max_straight_sq_dist"),
("Connection", "max_weighted_length"),
("Extension", "ext_high"),
("Extension", "ext_low"),
("Extension", "ext_min_area"),
("Extension", "ext_selem_micron"),
("Feature", "hist_min_curvature"),
("Feature", "hist_min_length"),
("Feature", "hist_step_curvature"),
("Feature", "hist_step_length"),
("Frangi", "alpha"),
("Frangi", "beta"),
("Frangi", "frangi_c"),
("Frangi", "scale_step"),
("Soma", "soma_high"),
("Soma", "soma_low"),
("Soma", "soma_max_area"),
("Soma", "soma_min_area"),
("Soma", "soma_selem_micron"),
)
_PATHS = (
("Input", "data_path"),
("Output", "save_csv"),
("Output", "save_features_analysis"),
("Output", "save_images"),
)
_TO_BE_EVALUATED = (
("Feature", "hist_bins_borders_curvature"),
("Feature", "hist_bins_borders_length"),
("Frangi", "scale_range"),
("Input", "voxel_size_in_micron"),
)
def NutrimorphArguments(path: path_t, /) -> tuple[dict[str, Any], ...]:
""""""
config = configparser.ConfigParser(allow_no_value=True, inline_comment_prefixes="#")
config.read(path)
output: dict[str, dict[str, Any]] = dict(
(_sct, dict(_prm)) for _sct, _prm in config.items() if _sct != DEFAULT_SECTION
)
for parameters, getter in (
(_BOOLEANS, "getboolean"),
(_INTEGERS, "getint"),
(_FLOATS, "getfloat"),
(_PATHS, path_t),
(_TO_BE_EVALUATED, asgt.literal_eval),
):
if isinstance(getter, str):
Converted = getattr(config, getter)
else:
Converted = lambda _sct, _prm: getter(config.get(_sct, _prm))
for section, parameter in parameters:
if output[section][parameter] is not None:
output[section][parameter] = Converted(section, parameter)
base_folder = output["Output"]["save_images"] / output["Input"]["data_path"].stem
if base_folder.exists() and not base_folder.is_dir():
raise RuntimeError(f"{base_folder}: Exists and is not a folder.")
if not base_folder.exists():
base_folder.mkdir(parents=True)
AddFileHandler(
base_folder / f"info-warning-error-{TimeStamp()}.txt", show_memory_usage=True
)
LogSystemDetails()
LOGGER.info(pprt.pformat(output))
# Section order: 'Connection', 'Extension', 'Feature', 'Frangi', 'Input', 'Output', 'Soma', 'Workflow'
return tuple(output[_sct] for _sct in sorted(output.keys()))
# Copyright CNRS/Inria/UNS
# Contributor(s): Eric Debreuve (since 2019)
#
# eric.debreuve@cnrs.fr
#
# This software is governed by the CeCILL license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/ or redistribute the software under the terms of the CeCILL
# license as circulated by CEA, CNRS and INRIA at the following URL
# "http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards their
# requirements in conditions enabling the security of their systems and/or
# data to be ensured and, more generally, to use and operate it in the
# same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license and that you accept its terms.
"""
Dijkstra Shortest Weighted Path from one image/volume pixel/voxel to one or more image/volume pixel(s)/voxel(s)
Graph nodes = pixels/voxels = sites
Graph edges = pixel/voxel neighborhood relationships
Edge weights = typically computed from pixel/voxel values
Adapted from material of a course by Marc Pegon:
https://www.ljll.math.upmc.fr/pegon/teaching.html
https://www.ljll.math.upmc.fr/pegon/documents/BCPST/TP06_src.tar.gz
https://www.researchgate.net/profile/Marc_Pegon
"""
import heapq as hp_
from typing import Final, Iterator, List, Optional, Sequence, Tuple, Union
import numpy as np_
import scipy.ndimage.morphology as mp_
# Slightly slower alternative (look for SSA below)
# import scipy.ndimage as im_
# import skimage.draw as dw_
number_h = Union[int, float]
tintegers_h = Tuple[int, ...] # tintegers=tuple of integers
site_h = tintegers_h
path_h = Tuple[site_h, ...]
path_nfo_h = Tuple[path_h, float]
array_t = np_.ndarray
def DijkstraShortestPath(
costs: array_t,
origin: site_h,
target: Union[site_h, Sequence[site_h]],
limit_to_sphere: bool = True,
constrain_direction: bool = True,
return_all: bool = False,
) -> Union[path_nfo_h, Tuple[path_nfo_h, ...]]:
#
costs, targets, valid_directions, dir_lengths = _DijkstraShortestPathPrologue(
costs, origin, target, limit_to_sphere, constrain_direction,
)
nearest_sites = nearest_site_queue_t()
nearest_sites.Insert(0, origin)
min_distance_to = {origin: 0.0}
predecessor_of = {}
visited_sites = set()
while True:
site_nfo = nearest_sites.Pop()
if site_nfo is None:
# Empty queue: full graph traversal did not allow to reach all targets
# This case is correctly dealt with in the following
break
distance, site = site_nfo
if site in targets:
targets.remove(site)
if targets.__len__() > 0:
continue
else:
break
if site not in visited_sites:
visited_sites.add(site)
for successor, edge_length in _OutgoingEdges(
site, valid_directions, dir_lengths, costs
):
successor = tuple(successor)
next_distance = distance + edge_length
min_distance = min_distance_to.get(successor)
if (min_distance is None) or (next_distance < min_distance):
min_distance_to[successor] = next_distance
predecessor_of[successor] = site
nearest_sites.Insert(next_distance, successor)
if isinstance(target[0], Sequence):
targets = tuple(target)
else:
targets = (target,)
if return_all:
all_paths = []
for one_target in targets:
distance = min_distance_to.get(one_target)
if distance is None:
all_paths.append(((), None))
else:
path = []
site = one_target
while site is not None:
path.append(site)
site = predecessor_of.get(site)
all_paths.append((tuple(reversed(path)), distance))
return tuple(all_paths)
else:
min_distance = np_.inf
closest_target = None
for one_target in targets:
distance = min_distance_to.get(one_target)
if (distance is not None) and (distance < min_distance):
min_distance = distance
closest_target = one_target
path = []
if closest_target is None:
distance = None
else:
distance = min_distance_to.get(closest_target)
site = closest_target
while site is not None:
path.append(site)
site = predecessor_of.get(site)
return tuple(reversed(path)), distance
def _DijkstraShortestPathPrologue(
costs: array_t,
origin: site_h,
target: Union[site_h, Sequence[site_h]],
limit_to_sphere: bool,
constrain_direction: bool,
) -> Tuple[array_t, List[site_h], array_t, array_t]:
#
if isinstance(target[0], Sequence):
targets = list(target)
else:
targets = [target]
if limit_to_sphere:
costs = _SphereLimitedCosts(costs, origin, targets)
if costs.ndim == 2:
if constrain_direction:
valid_directions, dir_lengths = _FilteredDirections(
DIRECTIONS_2D, LENGTHS_2D, origin, targets
)
else:
valid_directions, dir_lengths = DIRECTIONS_2D, LENGTHS_2D
elif costs.ndim == 3:
if constrain_direction:
valid_directions, dir_lengths = _FilteredDirections(
DIRECTIONS_3D, LENGTHS_3D, origin, targets
)
else:
valid_directions, dir_lengths = DIRECTIONS_3D, LENGTHS_3D
else:
raise ValueError(f"Cost matrix has {costs.ndim} dimension(s); Expecting 2 or 3")
return costs, targets, valid_directions, dir_lengths
def _OutgoingEdges(
site: site_h, valid_directions: array_t, dir_lengths: array_t, costs: array_t
) -> Iterator:
#
neighbors = valid_directions + np_.array(site, dtype=valid_directions.dtype)
n_dims = site.__len__()
inside = np_.all(neighbors >= 0, axis=1)
for c_idx in range(n_dims):
np_.logical_and(
inside, neighbors[:, c_idx] <= costs.shape[c_idx] - 1, out=inside
)
neighbors = neighbors[inside, :]
dir_lengths = dir_lengths[inside]
# For any n_dims: neighbors_costs = costs[tuple(zip(*neighbors.tolist()))]
if n_dims == 2:
neighbors_costs = costs[(neighbors[:, 0], neighbors[:, 1])]
else:
neighbors_costs = costs[(neighbors[:, 0], neighbors[:, 1], neighbors[:, 2])]
valid_sites = np_.isfinite(neighbors_costs) # Excludes inf and nan
neighbors = neighbors[valid_sites, :]
neighbors_costs = neighbors_costs[valid_sites] * dir_lengths[valid_sites]
return zip(neighbors.tolist(), neighbors_costs)
DIRECTIONS_2D: Final = np_.array(
tuple((i, j) for i in (-1, 0, 1) for j in (-1, 0, 1) if i != 0 or j != 0),
dtype=np_.int16,
)
DIRECTIONS_3D: Final = np_.array(
tuple(
(i, j, k)
for i in (-1, 0, 1)
for j in (-1, 0, 1)
for k in (-1, 0, 1)
if i != 0 or j != 0 or k != 0
),
dtype=np_.int16,
)
LENGTHS_2D: Final = np_.linalg.norm(DIRECTIONS_2D, axis=1)
LENGTHS_3D: Final = np_.linalg.norm(DIRECTIONS_3D, axis=1)
def _FilteredDirections(
all_directions: array_t,
all_lengths: array_t,
origin: site_h,
targets: Sequence[site_h],
) -> Tuple[array_t, array_t]:
#
n_dims = origin.__len__()
n_targets = targets.__len__()
straight_lines = np_.empty((n_dims, n_targets), dtype=np_.float64)
for p_idx, target in enumerate(targets):
for c_idx in range(n_dims):
straight_lines[c_idx, p_idx] = target[c_idx] - origin[c_idx]
inner_prods = all_directions @ straight_lines
valid_directions = np_.any(inner_prods >= 0, axis=1)
return all_directions[valid_directions, :], all_lengths[valid_directions]
def _SphereLimitedCosts(
costs: array_t, origin: site_h, targets: Sequence[site_h]
) -> array_t:
"""
Note: Un-numba-ble because of slice
"""
valid_sites = np_.zeros_like(costs, dtype=np_.bool)
center_map = np_.ones_like(costs, dtype=np_.uint8)
distances = np_.empty_like(costs, dtype=np_.float64)
targets_as_array = np_.array(targets)
centers = np_.around(0.5 * targets_as_array.__add__(origin)).astype(
np_.int64, copy=False
)
centers = tuple(tuple(center) for center in centers)
n_dims = origin.__len__()
for t_idx, target in enumerate(targets):
sq_radius = max(
(np_.subtract(centers[t_idx], origin) ** 2).sum(),
(np_.subtract(centers[t_idx], target) ** 2).sum(),
)
radius = np_.sqrt(sq_radius).astype(np_.int64, copy=False) + 1
# Note the +1 in slices ends to account for right-open ranginess
bbox = tuple(
slice(
max(centers[t_idx][c_idx] - radius, 0),
min(centers[t_idx][c_idx] + radius + 1, distances.shape[c_idx]),
)
for c_idx in range(n_dims)
)
if t_idx > 0:
center_map[centers[t_idx - 1]] = 1
center_map[centers[t_idx]] = 0
mp_.distance_transform_edt(center_map[bbox], distances=distances[bbox])
distance_thr = max(distances[origin], distances[target])
valid_sites[bbox][distances[bbox] <= distance_thr] = True
# # SSA
# if n_dims == 2:
# valid_coords = dw_.circle(
# *centers[t_idx], radius, shape=valid_sites.shape
# )
# else:
# local_shape = tuple(slc.stop - slc.start for slc in bbox)
# local_center = tuple(centers[t_idx][c_idx] - slc.start for c_idx, slc in enumerate(bbox))
# valid_coords = __SphereCoords__(*local_center, radius, shape=local_shape)
# valid_coords = tuple(valid_coords[c_idx] + slc.start for c_idx, slc in enumerate(bbox))
# valid_sites[valid_coords] = True
local_cost = costs.copy()
local_cost[np_.logical_not(valid_sites)] = np_.inf
return local_cost
class nearest_site_queue_t:
#
__slots__ = ("heap", "insertion_idx", "visited_sites")
def __init__(self):
#
self.heap = []
self.insertion_idx = 0
self.visited_sites = {}
def Insert(self, distance: number_h, site: site_h) -> None:
"""
Insert a new site with its distance, or update the distance of an existing site
"""
if site in self.visited_sites:
self._Delete(site)
self.insertion_idx += 1
site_nfo = [distance, self.insertion_idx, site]
self.visited_sites[site] = site_nfo
hp_.heappush(self.heap, site_nfo)
def Pop(self) -> Optional[Tuple[number_h, site_h]]:
"""
Return (distance, site) for the site of minimum distance, or None if queue is empty
"""
while self.heap:
distance, _, site = hp_.heappop(self.heap)
if site is not None:
del self.visited_sites[site]
return distance, site
return None
def _Delete(self, site: site_h) -> None:
#
site_nfo = self.visited_sites.pop(site)
site_nfo[-1] = None
# # SSA
# def __SphereCoords__(
# row: int, col: int, dep: int, radius: int, shape: Tuple[int, int, int]
# ) -> np_array_picker_h:
# #
# sphere = np_.zeros(shape, dtype=np_.bool)
# # dw_.ellipsoid leaves a one pixel margin around the ellipse, hence [1:-1, 1:-1, 1:-1]
# ellipse = dw_.ellipsoid(radius, radius, radius)[1:-1, 1:-1, 1:-1]
# sp_slices = tuple(
# slice(0, min(sphere.shape[idx_], ellipse.shape[idx_])) for idx_ in (0, 1, 2)
# )
# sphere[sp_slices] = ellipse[sp_slices]
#
# sphere = im_.shift(
# sphere, (row - radius, col - radius, dep - radius), order=0, prefilter=False
# )
#
# return sphere.nonzero()
File deleted