Commit 1df77daf authored by Sylvain Soliman's avatar Sylvain Soliman

Merge branch 'release/4.4.12'

parents 57171437 ac609f56
......@@ -53,8 +53,7 @@ KERNEL_DIR=$(JUPYTER_DIR)/kernel/biocham_kernel
WORKFLOWS_DIR=$(JUPYTER_DIR)/guinbextension/src/config/workflows
# NOTEBOOKS=$(shell find . -type f -name '*.ipynb' -print)
# NOTEBOOKS=library/examples/C2-19-Biochemical-Programming/TD1_lotka_volterra.ipynb library/examples/doctor_in_the_cell/diagnosis.ipynb
NOTEBOOKS=library/examples/C2-19-Biochemical-Programming/TD1_lotka_volterra.ipynb
NOTEBOOKS=library/examples/C2-19-Biochemical-Programming/TD1_lotka_volterra.ipynb library/examples/doctor_in_the_cell/diagnosis.ipynb
REFDIR=nbrefs
all: biocham biocham_debug quick doc/index.html pldoc install_kernel install_gui
......
......@@ -8,7 +8,7 @@
about/0
]).
version('4.4.11').
version('4.4.12').
copyright(
'Copyright (C) 2003-2020 Inria, EPI Lifeware, Saclay-Île de France, France'
......
FROM registry.gitlab.inria.fr/lifeware/biocham:v4.4.11
FROM registry.gitlab.inria.fr/lifeware/biocham:v4.4.12
{
"name": "gui",
"version": "4.4.11",
"version": "4.4.12",
"description": "biocham gui in jupyter notebook",
"main": "src/index.js",
"scripts": {
......
......@@ -132,11 +132,13 @@ commands = [
"list_options",
"list_parameters",
"list_reactions",
"list_reactions_with_autocatalyst",
"list_reactions_with_catalyst",
"list_reactions_with_inhibitor",
"list_reactions_with_product",
"list_reactions_with_reactant",
"list_reactions_with_species",
"list_reactions_with_strict_catalyst",
"list_rows",
"list_rules",
"list_sink_species",
......@@ -211,6 +213,7 @@ commands = [
"set_p_m_rate",
"test_rate_independence",
"test_rate_independence_inputs_sinks",
"test_rate_independence_invariants",
"transition",
"tropicalize",
"undefined",
......
"""Example magic"""
__version__ = '4.4.11'
__version__ = '4.4.12'
......@@ -1639,19 +1639,19 @@ monomial_to_reaction([R,Exp], Product, Name_list) :-
(
R > 0
->
exponant_to_solution(Exp, Name_list, Sol, Kin),
exponant_to_solution(Exp, Name_list, Sol, _Kin),
Reac = '_',
Rate = R,
Prod = Product
;
exponant_to_solution(Exp, Name_list, _Sol, Kin),
exponant_to_solution(Exp, Name_list, _Sol, _Kin),
nth1(N, Name_list, Product),
length(Exp, Len),
Left is N-1,
Right is Len-N,
displace_exponent([-1],Left,Right,EmY),
add_list(Exp, EmY, Exp_cor),
exponant_to_solution(Exp_cor, Name_list, Sol, _Kin),
exponant_to_solution(Exp_cor, Name_list, Sol, _Kinetics),
Reac = Product,
Rate is -R,
Prod = '_'
......@@ -1659,9 +1659,9 @@ monomial_to_reaction([R,Exp], Product, Name_list) :-
(
Sol = '_'
->
add_reaction(Rate*Kin for Reac=>Prod)
add_reaction('MA'(Rate) for Reac=>Prod)
;
add_reaction(Rate*Kin for Reac=[Sol]=>Prod)
add_reaction('MA'(Rate) for Reac=[Sol]=>Prod)
).
......@@ -1701,7 +1701,7 @@ negate_name([], _N, [], []) :- !.
negate_name([Name|TailName], N, [N|TailNeg], [Name_p,Name_m|TailNewName]) :-
!,
name_p_m(Name,Name_p,Name_m),
add_reaction(fast*Name_p*Name_m for Name_p+Name_m=>_),
add_reaction('MA'(fast) for Name_p+Name_m=>_),
NN is N+1,
negate_name(TailName, NN, TailNeg, TailNewName).
......@@ -2007,9 +2007,7 @@ monomial_remove_coeff([_, M], M).
derivatives_to_allmonomials(AllDerivatives, AllMonomials) :-
maplist(derivative_to_allmonomials, AllDerivatives, Monomials),
append(Monomials, SomeMonomials),
sort(SomeMonomials, AllMonomials).
maplist(derivative_to_allmonomials, AllDerivatives, AllMonomials).
derivative_to_allmonomials(Derivative, Monomials) :-
......
This diff is collapsed.
This diff is collapsed.
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Prey-Predator\n",
"\n",
"Let us start by looking again at the Prey-Predator model"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"load(library:examples/lotka_volterra/LVi.bc).\n",
"list_model."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### SSA means Stochastic Simulation Algorithm (from Gillespie)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"numerical_simulation(method: ssa).\n",
"plot."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### SPN is a Stochastic Petri Net, i.e., SSA without time"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"numerical_simulation(method: spn).\n",
"plot."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### SBN is a Stochastic Boolean Net, i.e., a stochastic boolean simulation"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"numerical_simulation(method: sbn).\n",
"plot."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"---\n",
"\n",
"Now let us look at different ways to approach PAC learning for this model.\n",
"\n",
"First, the biocham command: `pac_learning(Model, #Initial_states, Time_horizon)`\n",
"it will read the file `Model` and generate `#Initial_states` random initial states from which it will run simulations for `Time_horizon`.\n",
"\n",
"You can add options for the simulation, notably: `boolean_simulation: yes` to go from default `ssa` to `sbn` method,\n",
"and `cnf_clause_size: 2` to change the size of the clauses considered from the default `3`."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Question 1\n",
"\n",
"Compare the results of trying to learn a model from traces of the above `library:examples/lotka_volterra/LVi.bc` model in the 3 following conditions:\n",
"\n",
"1. A single boolean simulation of length 50\n",
"2. 25 boolean simulations of length 2\n",
"3. 50 stochastic simulations of length 1\n",
"\n",
"Explain what you observe"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Question 2\n",
"\n",
"In the output, the `h` corresponds to Valiant's precision parameter. What we know (see François' slides) is that with $L(h, s)$ samples we have probability higher than $1 - h^{-1}$ to find our approximation, and its total amount of false negatives has measure $< h^{-1}$\n",
"\n",
"How did we turn this into an estimate of the number of samples needed for a given $h$?"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Question 3\n",
"\n",
"Why do we have to provide a `cnf_clause_size` to learn CNF formulae of size less than `K`?\n",
"\n",
"What does it represent \"biologically\"? Where can you see that in the model?\n",
"\n",
"Could we have used the DNF learning algorithm here? why?"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"---\n",
"\n",
"# Circadian Clock\n",
"\n",
"Let us now look at a slightly bigger model of the Circadian Clock by J.-P. Comet and G. Bernot"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"load(library:examples/circadian_cycle/bernot_comet.bc).\n",
"list_model."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Question 4\n",
"\n",
"Using Biocham commands of your choice, what do you observe as the behavior of this model?\n",
"\n",
"You might want to represent graphically the 8-states 3-dimensional state-space and the possible transitions between each state…"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Question 5\n",
"\n",
"Now try PAC learning on that model, choosing yourself the number of initial states and simulation lenght, so that you are satisfied with the result.\n",
"\n",
"What happens if you impose `cnf_clause_size: 1`? How would you expect that to be reflected in the behavior of the model?"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"---\n",
"\n",
"# Th cells\n",
"\n",
"Let us now consider an even bigger model coming from L. Mendoza (Biosystems 2006), and made Boolean by the same author with Remy et al. (Dynamical Roles and Functionality of Feedback Circuits, Springer 2006).\n",
"\n",
"![Th Lymphocite differentiation](RemyEtAl06.png)\n",
"\n",
"The model is about the control and differentiation of Th (lymphocite) cells.\n",
"\n",
"Before \"learning\" it, we will try to understand it a bit…"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"load(library:examples/Th_lymphocytes/lympho.bc).\n",
"list_model."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"draw_influences."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Basically Th0 cells differentiate either into\n",
"\n",
"Th1 cells (marked by the activity of the TBet transcription factor) under the effect of IFNγ\n",
"\n",
"or\n",
"\n",
"Th2 cells under the effect of IL4 that binds to its receptor to activate STAT6 and GATA3…"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"list_stable_states."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Question 6\n",
"\n",
"Why do we have 6 stable states instead of 3?\n",
"\n",
"Hint: the picture of the graph might help…"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Question 7\n",
"\n",
"If one hopes for traces that would present all events with equal probability, what would be the approximate total number of samples needed to learn our 12-species model for $h = 0.1$?"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"For time reasons, we will only use 10000 samples total.\n",
"\n",
"## Question 8\n",
"\n",
"Compare the three following models, and especially the last two ones:\n",
"\n",
"- the model learnt with a single (stochastic) simulation of length 10000\n",
"- the model learnt with 10000 simulations of length 1 (with random initial states)\n",
"- the original model\n",
"\n",
"What do you observe? Can you explain why?\n",
"\n",
"If there are inconsistencies, can you propose a possible solution?"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Question 9\n",
"\n",
"Keeping the total number of samples at 10000, can you find a threshold after which models learnt are of better quality?"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Question 8\n",
"\n",
"Could we have used the DNF learning algorithm? Why?"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Question 10\n",
"\n",
"In order to reduce the number of samples needed for a given $h$, one solution is to use some prior knowledge.\n",
"\n",
"Say we provide to the PAC learning algorithm the influence graph obtained by `draw_influences`.\n",
"\n",
"How and why would that reduce the number of samples?"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,