Mentions légales du service

Skip to content
Snippets Groups Projects
Commit 66613240 authored by GUILLOTEAU Quentin's avatar GUILLOTEAU Quentin
Browse files

update pages

parent 85fcfd37
Branches
Tags
No related merge requests found
Pipeline #765077 passed
Showing
with 47 additions and 2279 deletions
Controller = {}
Controller.__index = Controller
function Controller:new()
local ret = {kp = 0.5500, ki = 0.5300, ref = 1.75, sum = 0}
setmetatable(ret, Controller)
return ret
end
function Controller:ctrl(sensor)
err = self.ref - sensor
self.sum = self.sum + err
u = self.kp * err + self.ki * self.sum
if u < 0 then
u = 0
end
print("sensor: " .. sensor .. ", ref: " .. self.ref .. ", actuator: " .. u .. ", nb threads: " .. math.floor(u))
return math.floor(u)
end
return Controller
Controller = {}
Controller.__index = Controller
function Controller:new()
local ret = {kp = 5.500, ki = 5.300, ref = 2.0, sum = 0}
setmetatable(ret, Controller)
return ret
end
function Controller:ctrl(sensor)
err = self.ref - sensor
self.sum = self.sum + err
u = self.kp * err + self.ki * self.sum
if u < 0 then
u = 0
end
print("sensor: " .. sensor .. ", ref: " .. self.ref .. ", actuator: " .. u .. ", nb iters: " .. math.floor(u))
return math.floor(u)
end
return Controller
...@@ -19,94 +19,7 @@ ...@@ -19,94 +19,7 @@
}; };
in in
rec { rec {
apps.${system} = {
default = {
type = "app";
program = "${self.packages.x86_64-linux.app}/bin/tuto";
};
};
packages.${system} = rec { packages.${system} = rec {
app =
let
myPython = (pkgs.python3.withPackages (ps: with ps; with pkgs.python3Packages; [
jupyterlab
matplotlib
tuto_control_lib
]));
in
pkgs.writeScriptBin "tuto" ''
#!${pkgs.stdenv.shell}
${myPython}/bin/jupyter-lab --port=8888 --ip=0.0.0.0 --allow-root ${./jupyter_notebooks}/
'';
tuto = pkgs.stdenv.mkDerivation {
name = "tuto-ctrl";
src = ./src_real_system;
buildInputs = [
pkgs.gcc
pkgs.lua5_4
pkgs.gnumake
];
buildPhase = ''
mkdir -p $out/bin
make
'';
installPhase = ''
cp tuto-ctrl $out/bin
cp tuto-ctrl-iter $out/bin
'';
};
tuto_control_lib = pkgs.python3Packages.buildPythonPackage {
name = "tuto_control_lib";
inherit version;
src = ./tuto_control_lib;
propagatedBuildInputs = with pkgs.python3Packages; [
# ...
matplotlib
numpy
];
};
tuto-py-docker = pkgs.dockerTools.buildImage {
name = "registry.gitlab.inria.fr/control-for-computing/tutorial/tuto";
tag = version;
contents = [
(pkgs.python3.withPackages (ps: with ps; with pkgs.python3Packages; [
jupyterlab
matplotlib
tuto_control_lib
]))
];
runAsRoot = ''
mkdir -p /tuto
mkdir -p /tuto/data
ln -s ${./jupyter_notebooks}/*.ipynb /tuto
'';
config = {
Cmd = [ "jupyter-lab" "--port=8888" "--ip=0.0.0.0" "--allow-root" "--no-browser" "/tuto" ];
ExposedPorts = {
"8888/tcp" = { };
};
WorkingDir = "/tuto";
Volumes = { "/tuto/data" = { }; };
};
};
tuto-system-docker = pkgs.dockerTools.buildImage {
name = "registry.gitlab.inria.fr/control-for-computing/tutorial/system";
tag = version;
contents = [
tuto
];
config = {
Cmd = [ "${tuto}/bin/tuto-ctrl" ];
# Entrypoint = [ "${tuto}/bin/tuto-ctrl" ];
WorkingDir = "/data";
Volumes = { "/data" = { }; };
};
};
mdbook-admonish = pkgs.callPackage ./pages/mdbook-admonish.nix { }; mdbook-admonish = pkgs.callPackage ./pages/mdbook-admonish.nix { };
doc = pkgs.stdenv.mkDerivation { doc = pkgs.stdenv.mkDerivation {
...@@ -130,22 +43,6 @@ ...@@ -130,22 +43,6 @@
}; };
devShells.${system} = { devShells.${system} = {
default = pkgs.mkShell {
buildInputs = [
pkgs.gcc
pkgs.lua5_4
pkgs.gnumake
];
};
jpy = pkgs.mkShell {
buildInputs = [
pkgs.python3Packages.jupyterlab
pkgs.python3Packages.matplotlib
packages.${system}.tuto_control_lib
];
};
pages = pkgs.mkShell { pages = pkgs.mkShell {
buildInputs = [ buildInputs = [
pkgs.mdbook pkgs.mdbook
......
File deleted
This diff is collapsed.
#include <assert.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define MAX_ITER 100
#define R 2
struct Complex {
double x;
double y;
};
struct Complex *new_complex(double x, double y) {
struct Complex *c = malloc(sizeof(struct Complex));
c->x = x;
c->y = y;
return c;
}
struct Complex add_complex(struct Complex c1, struct Complex c2) {
struct Complex z;
z.x = c1.x + c2.x;
z.y = c1.y + c2.y;
return z;
}
struct Complex mult_complex(struct Complex c1, struct Complex c2) {
struct Complex z;
z.x = c1.x * c2.x - c1.y * c2.y;
z.y = c1.x * c2.y + c2.x * c1.y;
return z;
}
double norm(struct Complex c) { return c.x * c.x + c.y * c.y; }
bool is_in_julia_set(struct Complex z, struct Complex c,
uint64_t max_iterations) {
struct Complex local_z;
local_z.x = z.x;
local_z.y = z.y;
uint64_t iterations = 0;
double max_norm = R * R;
while (norm(local_z) < max_norm && iterations < max_iterations) {
local_z = add_complex(mult_complex(local_z, local_z), c);
iterations++;
}
return (iterations != max_iterations);
}
void draw_julia_set() {
double y_min = -1.0;
double y_max = 1.0;
double x_min = -1.5;
double x_max = 1.5;
double x_inc = 0.0001;
double y_inc = 0.0001;
// struct Complex c = {-0.7, 0.27015};
struct Complex c = {-0.4, 0.6};
double norm_c = norm(c);
double upper = (double)(R * R * (R - 1.0) * (R - 1.0));
assert(upper > norm_c);
for (double y = y_min; y < y_max; y += y_inc) {
for (double x = x_min; x < x_max; x += x_inc) {
struct Complex z = {x, y};
if (is_in_julia_set(z, c, MAX_ITER)) {
printf("#");
} else {
printf(" ");
}
}
printf("\n");
}
}
int main(int argc, char **argv) {
draw_julia_set();
return 0;
}
%% Cell type:markdown id:b15c0557 tags:
# Tutorial Control for Computing
%% Cell type:markdown id:d74f1154 tags:
In this tutorial, we introduce the basic concepts of applying control theory methods to computing systems, or applications.
%% Cell type:markdown id:187c1410-4256-4621-b812-c4de2b41fedf tags:
## Contents
- [Introduction of the System](./01_System.ipynb)
- [The "Bang-Bang" Approach](./02_BangBang.ipynb)
- [Proportional Controller](./03_PController.ipynb)
- [Proportional-Integral Controller](./04_PIController.ipynb)
- [Identification](./05_Identification.ipynb)
- [Exercice: Design of a PI Controller on a Real System](./06_RealSystem.ipynb)
%% Cell type:markdown id:e2114ca5-c4bc-44f4-9eed-ef4ab5447994 tags:
# The System
%% Cell type:code id:2ee6b0b7-8581-44fd-be6d-b747c209b9f4 tags:
``` python
from tuto_control_lib.systems import IntroSystem
from tuto_control_lib.plot import *
import matplotlib.pyplot as plt
import numpy as np
```
%% Cell type:markdown id:54a717f8-7076-4947-b92c-f245f91b72ec tags:
To get familiar with the concepts of Control Theory, we will first use a simulated system to allow for quick iteration.
We will use a real system in the [last section](./06_RealSystem.ipynb).
%% Cell type:markdown id:695e8ac6-da3f-41c5-a5ad-9f560d1b9b7f tags:
The system has the following API:
- a system can be created with `IntroSystem()`
- we can sense the system by running `system.sense()`
- we can apply an input by running `system.apply(INPUT)`
Here is an small example:
%% Cell type:code id:0fe7a0c2-4f8c-492f-90d6-ab6ace7a6941 tags:
``` python
system = IntroSystem()
print(f"Sensor: {system.sense()}")
system.apply(2)
print(f"Sensor: {system.sense()}")
system.apply(1)
print(f"Sensor: {system.sense()}")
```
%% Cell type:markdown id:afc1f6cf-ff70-434f-a364-a5801b8067b2 tags:
Let us perform what is called an open loop experiment.
For this, we apply a constant input and obeserve the output.
In the following code-block we explain how the following code-blocks of this tutorial will be designed.
%% Cell type:code id:7112c698-b6b5-4d99-bbe5-946e692bf4d8 tags:
``` python
# We define the number of iteration to simulate
max_iter = 100
# We initialize the system
system = IntroSystem()
# This list will gather the values of the sensor to plot them after
y_values = []
# This is the constant value that we will apply to our system.
constant_input = 2
for _ in range(max_iter):
# For every iteration....
# ... we read the sensor,
y = system.sense()
# and save its value.
y_values.append(y)
# We then apply the constant input
system.apply(constant_input)
# We finally plot the simulation
plot_y(y_values)
```
%% Cell type:markdown id:00025b4b-0452-4294-a87c-7016f63cf472 tags:
Let us now try to apply stairs input.
This just means that we will have different values of constant input.
%% Cell type:markdown id:9e4b819a-071f-4e8f-a761-e212833b2a7f tags:
Let us say that we want to increase the input by 2 every 25 iterations.
%% Cell type:code id:1ab13ec7-891f-4483-81a0-adbf2434ba51 tags:
``` python
max_iter = 100
system = IntroSystem()
y_values = []
# This list will contain the value for the input to plot them
u_values = []
u = 0
for i in range(max_iter):
y = system.sense()
y_values.append(y)
u = 2 * (i // 25)
u_values.append(u)
system.apply(u)
plot_u_y(u_values, y_values)
```
%% Cell type:markdown id:51979864-268e-432a-891c-e63083b59672 tags:
The goal of control theory is to regulate systems around desired behaviors.
In our case, we would want the system's sensor to be around a specific value.
%% Cell type:markdown id:5ee594a4-012d-4f86-8397-29ed904508f8 tags:
[Back to menu](./00_Main.ipynb) or [Next chapter](./02_BangBang.ipynb)
%% Cell type:markdown id:98f24f94-e1df-4dff-8266-57b3de5f3431 tags:
# The Bang-Bang Approach
%% Cell type:code id:8ffb58c1-62d2-4c90-9b02-db84c416df84 tags:
``` python
from tuto_control_lib.systems import IntroSystem
from tuto_control_lib.plot import plot_u_y
import matplotlib.pyplot as plt
import numpy as np
from statistics import mean
```
%% Cell type:markdown id:fb8193e6-bec3-4cc2-848a-30535df4d159 tags:
One way to do regulate the output of a system would be to have two bounds for the system sensor:
- one upper bound
- one lower bound
When the system output is greater than the upper bound, we decrease the input.
And when the system output is lower than the lower bound, we increase the input.
Else, we keep the previous input.
%% Cell type:markdown id:60d2e25c-4264-45f7-89b0-63fbc9e13ff8 tags:
Say that we want to regulate our system around the value 1.
We now have to chose the values of the bounds.
The issue is that there is no protocol to find the values of the bounds and the incremental part.
So, we have to proceed by try-and-error.
We can say take as lower bound 5 and as upper bound 7.
%% Cell type:code id:62db6d61-c852-43f7-8dff-8b4af2fa93b9 tags:
``` python
system, u, y_values, u_values, max_iter = IntroSystem(), 0, [], [], 100
reference_value = 1
upper_bound = 0.5
lower_bound = 1.5
increment = 0.5
for i in range(max_iter):
y = system.sense()
y_values.append(y)
if y < lower_bound:
u += increment
elif y > upper_bound:
u -= increment
else:
pass
system.apply(u)
u_values.append(u)
plot_u_y(u_values, y_values, reference_value)
```
%% Cell type:markdown id:6267274e-7d36-42c2-9a4d-f7efcaaf0634 tags:
As we can see, the system is somewhat under control, but oscillate a lot.
%% Cell type:code id:ecdc7121-d253-477d-92dc-4485abd97d29 tags:
``` python
mean_error = mean(map(lambda x: abs(reference_value - x), y_values))
max_overshoot = (max(y_values) - reference_value) / reference_value
print(f"Mean Error: {mean_error}")
print(f"Max. Overshoot: {max_overshoot}")
```
%% Cell type:markdown id:2b58c216-0ef8-4d17-8b35-514974b47570 tags:
<div class="alert alert-info">
Try changing the values of the bounds and the increment to see the behaviour of the system.
</div>
%% Cell type:markdown id:73e94170-8855-4a03-a77b-e5261cc27391 tags:
[Back to menu](./00_Main.ipynb) or [Next chapter](./03_PController.ipynb)
%% Cell type:markdown id:86607676-56c2-4bec-b675-309f8f1a5e85 tags:
# Proportional Controller
%% Cell type:code id:6830d828-6569-409d-aeb6-6fbfac190b3f tags:
``` python
from tuto_control_lib.systems import IntroSystem
from tuto_control_lib.plot import *
import matplotlib.pyplot as plt
import numpy as np
from math import exp
```
%% Cell type:markdown id:028756f1-641f-4c33-9879-cbb53ee0f256 tags:
We have seen that a Bang-Bang solution manages to roughly get the system in the desired state.
However, the lack of protocol and guarentees of this solution limits its adoption on production systems.
In this section, we introduce the most basic controller from Control Theory: the *Proportional Controller*.
The idea of the proportional controller is to have a response proportional to the control error.
The control error is the distance between the desired behaviour and the current state of the system.
The equation of a P Controller is the following:
$$
u(k) = K_p \times e(k) = K_p \times \left(y_{ref} - y(k)\right)
$$
where:
- $K_p$ is the propotional gain of the controller
- $y_{ref}$ is the reference value for our system (i.e. the desired value of the system output)
- $y(k)$ is the system output at iteration $k$
- $e(k)$ is the control error at iteration $k$
- $u(k)$ is the input at iteration $k$
%% Cell type:code id:3db2d938-fe75-40e0-b66a-a8b6b0ce4eaa tags:
``` python
system, u_values, y_values, u, max_iter = IntroSystem(), [], [], 0, 100
reference_value = 1
kp = 3.3
for i in range(max_iter):
y = system.sense()
y_values.append(y)
error = reference_value - y
u = kp * error
system.apply(u)
u_values.append(u)
plot_u_y(u_values, y_values, reference_value)
```
%% Cell type:markdown id:63203ee3-be90-4e9a-92d4-57a08d042631 tags:
As we can see, the system converges, but to a values different than the reference value.
The controller introduces oscillations before converging.
%% Cell type:code id:7a031480-3308-4389-82c2-3b104d9d1f6a tags:
``` python
print(f"Steady state value: {y_values[-1]}\nReference value: {reference_value}")
```
%% Cell type:markdown id:f664e07a-5ea6-4d0f-b826-beb0caeb6c90 tags:
<div class="alert alert-info">
Try changing the values of the proportional gain $K_p$ and the reference value.
</div>
%% Cell type:markdown id:5e4ee85a-2551-4b7d-a30f-528befbfc380 tags:
# Design of a Proportional Controller
%% Cell type:markdown id:49936477-c739-476b-a351-c59ec23f9d68 tags:
To design a Proportional Controller with guarentees, we must have a model of our system.
A model, in the sense of Control Theory, is a relation between the inputs and the outputs.
The general form of a model is the following:
$$
y(k + 1) = \sum_{i = 0}^k a_i y(k - i) + \sum_{i = 0}^k b_i u(k - i)
$$
where:
- $y(k + 1)$ is the next value of the output
- $y(k-i)$ and $u(k-i)$ are previous values of the output and the input
- $a_i$ and $b_i$ are the coefficients of the model ($\forall i, (a_i, b_i) \in (\mathbb{R}, \mathbb{R})$)
Usually, and to simplify this introduction, we consider *first order models*.
This means that the model only considers the last values of $y$ and $u$ to get the next value of $y$.
$$
y(k + 1) = a y(k) + b u(k)
$$
In this section, we will suppose that we have a first order model which we know the coefficients.
In a [future section](./05_Identification.ipynb), we will look at how to find these coefficients.
%% Cell type:code id:378caaac-80da-423d-be96-fdc0812e7380 tags:
``` python
# Our system
system = IntroSystem()
# The coefficients
a = 0.8
b = 0.5
```
%% Cell type:markdown id:7fc2aba3-50a3-4a7d-bdcf-baa02a3d5a45 tags:
## Stability
%% Cell type:markdown id:869237a3-bcc0-43f0-905a-0ef25ed61955 tags:
The pole of the closed-loop transfer function is: $a - b K_p$.
For the closed-loop system to be stable, this pole needs to be in the unit circle: $|a - b K_p| < 1$.
Thus:
$$
\frac{a - 1}{b} < K_p < \frac{a + 1}{b}
$$
In our case:
%% Cell type:code id:ce848920-a4a6-416a-9245-94a1ce6c4bdb tags:
``` python
stability_lower_bound = (a - 1) / b
stability_upper_bound = (a + 1) / b
print(f"{stability_lower_bound} < K_p < {stability_upper_bound}")
```
%% Cell type:markdown id:17658df0-4284-4b46-99fb-ad401f75c2ca tags:
## Precision
%% Cell type:markdown id:35f5b723-9eb7-4314-8fe1-7989b612e1fe tags:
Proportional Controllers are inheritly imprecise.
But we can tune their precision ($e_{ss}$) based on the reference value ($r_{ss}$).
$$
\begin{aligned}
e_{ss} &= r_{ss} ( 1 - F_R(1)) \\
&= r_{ss} \left(1 - \frac{b K_p}{1 - (a - b K_p)}\right) < e_{ss}^*
\end{aligned}
$$
Say we want the steady state error to be less that $e_{ss}^*$.
Then,
$$
K_p > \frac{\left(1 - \frac{e_{ss}^*}{r_{ss}}\right)\left(1 - a\right)}{b\frac{e_{ss}^*}{r_{ss}}}
$$
In our case:
%% Cell type:code id:373b93c9-b92a-4f32-bb26-3640a9c05d5f tags:
``` python
r_ss = 1
e_star = 0.15
precision_lower_bound = (1 - e_star/r_ss) * (1 - a)/(b * (e_star/r_ss))
print(f"K_p > {precision_lower_bound}")
```
%% Cell type:markdown id:657451c9-80c4-4a85-9277-965c76969469 tags:
## Settling Time
%% Cell type:markdown id:320e2695-9a0b-4a8d-909b-4385dbebf6af tags:
The settling time, or the time to reach the steady state value is defined as follows:
$$
k_s \simeq \frac{-4}{\log | a - b K_p| }
$$
Let $k_s^*$ be the desired settling time.
Then:
$$
\frac{a - \exp\left(\frac{-4}{k_s^*}\right)}{b} < K_p < \frac{a + \exp\left(\frac{-4}{k_s^*}\right)}{b}
$$
In our case:
%% Cell type:code id:e1737984-27a8-4a7c-8556-ff99ae1bc519 tags:
``` python
ks_star = 10
settling_time_lower_bound = (a - exp(-4/ks_star)) / b
settling_time_upper_bound = (a + exp(-4/ks_star)) / b
print(f"{settling_time_lower_bound} < K_p < {settling_time_upper_bound}")
```
%% Cell type:markdown id:90158219-f1b9-4ec0-9667-e335d1a02854 tags:
## Maximum Overshoot
%% Cell type:markdown id:a04de440-6087-4f45-a302-02c0f3bfdd20 tags:
The maximum overshoot is the maximum error above the reference value.
It is defined as:
$$
M_p = | a - b K_p|
$$
If $M_p^*$ is the desired maximum overshoot, then:
$$
\frac{a - M_p^*}{b} < K_p < \frac{a + M_p^*}{b}
$$
But we really are only interested in the upper bound.
In our case:
%% Cell type:code id:cd8c0735-9f8d-4cf7-9f1c-536741d98e37 tags:
``` python
mp_star = 0.1
max_overshoot_upper_bound = (a + mp_star) / b
print(f"K_p < {max_overshoot_upper_bound}")
```
%% Cell type:code id:cf1f604a-0dd3-4ba2-9fd3-b22613bc0e86 tags:
``` python
max_y = 15
min_y = -5
fig, ax = plt.subplots()
ax.broken_barh([(stability_lower_bound, stability_upper_bound - stability_lower_bound)], (2.5, 5), facecolors='tab:blue')
ax.broken_barh([(precision_lower_bound, max_y)], (7.5, 5), facecolors='tab:red')
ax.broken_barh([(settling_time_lower_bound, settling_time_upper_bound - settling_time_lower_bound)], (12.5, 5), facecolors='tab:green')
ax.broken_barh([(min_y, max_overshoot_upper_bound - min_y)], (17.5, 5), facecolors='tab:orange')
ax.set_ylim(0, 25)
ax.set_xlim(min_y, max_y)
ax.set_xlabel('Kp')
ax.set_yticks([5, 10, 15, 20], labels=['Stability', 'Precision', 'Settling Time', 'Max. Overshoot'])
ax.grid(True)
plt.show()
```
%% Cell type:markdown id:a837089e-3e92-44d9-b348-25fb882dc553 tags:
As we can see, there is no value of $K_p$ that satisfies all the properties.
<div class="alert alert-danger" role="alert">
The key point is that implementing a Proportional controller requires some <b>trade-off</b>!
</div>
In the example above, the value $K_p = 2.5$ seems to statisfy most of the properties.
%% Cell type:code id:9a161f2f-57a4-49f2-b7db-fcca7eddcb99 tags:
``` python
reference_value = 1
kp = 2.5
y_values, u_values, u, system, max_iter = [], [], 0, IntroSystem(), 20
for i in range(max_iter):
y = system.sense()
y_values.append(y)
error = reference_value - y
u = kp * error
system.apply(u)
u_values.append(u)
plot_u_y(u_values, y_values, reference_value)
```
%% Cell type:markdown id:bdecddd8-813c-4321-9366-0e9bb9c93f6e tags:
We can observe the actual behaviour of the closed loop system and compare them to the desired behavior.
%% Cell type:code id:137ccf61-242f-45ed-be2c-f300d21b5f92 tags:
``` python
e_ss = reference_value - y_values[-1]
max_overshoot = (max(y_values) - y_values[-1]) / y_values[-1]
settling_time = len([x for x in y_values if abs(x - y_values[-1]) > 0.05])
print(f"Precision: {e_ss} -> desired: < {e_star}")
print(f"Settling Time: {settling_time} -> desired: < {ks_star}")
print(f"Max. Overshoot: {max_overshoot} -> desired: < {mp_star}")
```
%% Cell type:markdown id:dd67e21e-0d8c-45c4-ab53-d67ce40771e6 tags:
As expected, the closed loop system overshoots too much, but the other properties are respected.
%% Cell type:markdown id:01c36250-4482-47d9-a1ab-6451f7470ca7 tags:
<div class="alert alert-info" role="alert">
Try to change the requirements on the closed-loop properties to find different values of $K_p$ and plot the system.
</div>
%% Cell type:markdown id:09a14ea2-b496-4d8a-b5d7-155a5b4d60c9 tags:
[Back to menu](./00_Main.ipynb) or [Next chapter](./04_PIController.ipynb)
%% Cell type:markdown id:121c4163-0c06-4c32-a7ba-11cc51acfdff tags:
# Proportional-Integral Controller
%% Cell type:code id:4e8c50d8-d7b2-4d85-aab7-f418755d42da tags:
``` python
from tuto_control_lib.systems import IntroSystem
from tuto_control_lib.plot import *
import matplotlib.pyplot as plt
import numpy as np
from math import exp, log, pi, cos
from statistics import mean
```
%% Cell type:markdown id:2c222167-167e-456d-bddf-4f7bde57d923 tags:
As we have seen before, a Proportional controller is inheritly imprecise.
One way to improve the precision of the closed loop system is to add an integral term to the controller.
The integral term aims at canceling the steady state error.
The form of the controller (in discrete time) is the following:
$$
u(k) = K_p e(k) + K_i \sum_{i=0}^k e(i)
$$
We can try to add the $K_i$ term to the previously defined P Controller:
%% Cell type:code id:df65cb57-486d-414d-aae7-e747167d3a91 tags:
``` python
max_iter = 50
reference_value = 1
kp = 2.5
ki = 1.5
y_values, u_values, u, system, integral = [], [], 0, IntroSystem(), 0
for _ in range(max_iter):
y = system.sense()
y_values.append(y)
error = reference_value - y
integral += error
u = kp * error + ki * integral
system.apply(u)
u_values.append(u)
plot_u_y(u_values, y_values, reference_value)
```
%% Cell type:markdown id:fb99ce6a-f051-4074-b049-5f5ed3273628 tags:
We can see that the system converges to the reference value!
However, there are some oscillations and overshooting...
%% Cell type:markdown id:cb414899-941e-4652-93c1-df6ab939c6f8 tags:
<div class="alert alert-info" role="alert">
Try to change the values of $K_p$ and $K_i$ to observe the change of behavior.
</div>
%% Cell type:markdown id:dc185bc1-7b31-41df-8dd3-b260aca6097b tags:
# Design of a PI Controller
%% Cell type:markdown id:d8df0233-6f1c-487d-8861-9de5d432aa9a tags:
As for the P Controller, we have to chose the desired closed loop behavior.
In the case of a PI Controller, we have the precision by the integral term, and the precision as for the P Controller.
There are several methods to find gains for a PI Controller.
In the following we use the *pole placement method*.
The idea is to chose the poles of the closed-loop system to fit the desired behavior.
Without too much details to avoid being too "mathy", we give the equations leading to the gains.
Given the desired values for $k_s$ (settling time) and $M_p$ (max. overshoot), we get:
$$
\begin{cases}
K_p = \frac{a - r^2}{b}\\
K_i = \frac{1 - 2 r \cos \theta + r^2}{b}
\end{cases}
$$
with:
- $r = \exp\left(-\frac{4}{k_s}\right)$
- $\theta = \pi \frac{\log r}{\log M_p}$
In our case:
%% Cell type:code id:ca126771-c51e-4b6e-9203-295c275f0d94 tags:
``` python
# The coefficients of our system
a = 0.8
b = 0.5
# Our desired properties
ks = 10
mp = 0.01
r = exp(-4/ks)
theta = pi * log(r) / log(mp)
kp = (a - r * r) / b
ki = (1 - 2 * r * cos(theta) + r * r) / b
print(f"Kp = {kp}\nKi = {ki}")
```
%% Cell type:code id:1ef86c1f-c0cf-4d73-844b-3e6571061f4b tags:
``` python
max_iter = 50
reference_value = 1
y_values, u_values, u, system, integral_error = [], [], 0, IntroSystem(), 0
for i in range(max_iter):
y = system.sense()
y_values.append(y)
error = reference_value - y
integral_error += error
u = kp * error + ki * integral_error
system.apply(u)
u_values.append(u)
plot_u_y(u_values, y_values, reference_value)
```
%% Cell type:code id:a4ca0de4-079f-4e46-9b8c-704c27e15102 tags:
``` python
e_ss = reference_value - y_values[-1]
max_overshoot = (max(y_values) - y_values[-1]) / y_values[-1]
settling_time = len([x for x in y_values if abs(x - y_values[-1]) > 0.05])
print(f"Precision: {e_ss}")
print(f"Settling Time: {settling_time} -> desired: < {ks}")
print(f"Max. Overshoot: {max_overshoot} -> desired: < {mp}")
```
%% Cell type:markdown id:41159696-e75e-4ae5-8b69-44799bf482d9 tags:
<div class="alert alert-info" role="alert">
Try to change the requirements on the closed-loop properties to find different values of $K_p$ and $K_i$ and plot the system.
</div>
%% Cell type:markdown id:e8e31f05-7d99-4793-801e-51c292c7c8ec tags:
[Back to menu](./00_Main.ipynb) or [Next chapter](./05_Identification.ipynb)
%% Cell type:markdown id:34059152-8e0d-4d75-a1ec-edfab3c25c05 tags:
# Identification
%% Cell type:code id:1655fd96-9ee8-4258-a9fa-34f2b6de8e32 tags:
``` python
from tuto_control_lib.systems import UnknownSystem
from tuto_control_lib.plot import *
import matplotlib.pyplot as plt
import numpy as np
from math import exp, log, pi, cos
from statistics import mean
```
%% Cell type:markdown id:6b3d3141-1e2e-42c0-b822-27c7a0b84471 tags:
For moment, we supposed the model of the system known (i.e. the coeficients $a$ and $b$).
But in practice, we do not know them.
In this Section, we will perform what is called the *Identification* to get the coefficient of the system model.
The idea of the identification phase is simple: "Get the relation between the input and output".
To do this, the most basic way is to perform a serie of step inputs and observe the output.
In this Section, we will use the `UnknownSystem` and try to find its coefficients.
%% Cell type:code id:7e92fbb0-697a-452f-9829-ab0f67a14a89 tags:
``` python
system = UnknownSystem()
y_values_ident, u_values_ident, u, system, max_iter = [], [], 0, UnknownSystem(), 200
for i in range(max_iter):
y = system.sense()
y_values_ident.append(y)
u = (i + 20) // 20
system.apply(u)
u_values_ident.append(u)
plot_u_y(u_values_ident, y_values_ident)
```
%% Cell type:markdown id:161a6257-f786-4437-b264-678feadd1863 tags:
We are looking for an expression for a first order system of the following form:
$$
y(k+1) = a y(k) + b u(k)
$$
In steady state, the input is constant: $u_{ss}$.
Thus:
$$
y_{ss} = a y_{ss} + b u_{ss} \implies \frac{y_{ss}}{u_{ss}} = \frac{b}{1 - a}
$$
In our case:
%% Cell type:code id:7224da8b-7802-40d9-bfec-680bb5225a8d tags:
``` python
previous_u = 1
gain_ss = None
for (u, y) in zip(u_values_ident, y_values_ident):
if u != previous_u:
print(f"u: {previous_u} -> {gain_ss}")
previous_u = u
else:
gain_ss = y / u
print(f"u: {previous_u} -> {gain_ss}")
```
%% Cell type:markdown id:b39169cb-e6c8-4f17-ac6b-003b6be751dd tags:
So the static gain is around `0.85`.
%% Cell type:markdown id:3e9f8daf-b9c4-476b-b6f1-c5ea97b74574 tags:
We will perform a Least Mean Square to get an estimation of the model:
%% Cell type:code id:d7c447ff-390a-4950-80c7-9c53769f651f tags:
``` python
u_values_identification = u_values_ident
y_values_identification = y_values_ident
s1 = sum(y * y for y in y_values_identification)
s2 = sum(u * y for (u, y) in zip(u_values_identification, y_values_identification))
s3 = sum(u * u for u in u_values_identification)
s4 = sum(y * z for (y, z) in zip(y_values_identification[:-2], y_values_identification[1:]))
s5 = sum(u * z for (u, z) in zip(u_values_identification[:-2], y_values_identification[1:]))
a_est = (s3 * s4 - s2 * s5) / (s1 * s3 - s2 * s2)
b_est = (s1 * s5 - s2 * s4) / (s1 * s3 - s2 * s2)
print(f"a: {a_est}, b: {b_est} -> gain: {b_est / (1 - a_est)}")
```
%% Cell type:code id:2a1dda22-375b-42aa-bd49-6d0400d7390e tags:
``` python
max_iter = 200
system = UnknownSystem()
y_values, u_values, u, system, integral = [], [], 0, UnknownSystem(), 0
model = []
y_model = 0
for i in range(max_iter):
y = system.sense()
y_values.append(y)
u = (i + 20) // 20
system.apply(u)
u_values.append(u)
y_model = a_est * y_model + b_est * u
model.append(y_model)
plot_model_compa(y_values, model)
```
%% Cell type:markdown id:5fd48bfc-8960-46e3-b04e-fbb31ecaac6c tags:
# Designing a PI Controller
%% Cell type:code id:21f1a25d-7366-446c-83f8-946fa4257f87 tags:
``` python
ks = 10
mp = 0.05
r = exp(-4/ks)
theta = pi * log(r) / log(mp)
kp = (a_est - r * r) / b_est
ki = (1 - 2 * r * cos(theta) + r * r) / b_est
print(f"Kp = {kp}\nKi = {ki}")
```
%% Cell type:code id:160523c3-f950-4722-8672-a38ac6f17e97 tags:
``` python
reference_value = 1
y_values, u_values, u, system, integral_error, max_iter = [], [], 0, UnknownSystem(), 0, 50
for i in range(max_iter):
y = system.sense()
y_values.append(y)
error = reference_value - y
integral_error += error
u = kp * error + ki * integral_error
system.apply(u)
u_values.append(u)
plot_u_y(u_values, y_values, reference_value)
```
%% Cell type:code id:2759322c-239d-4376-a70a-e9fc6346f0e8 tags:
``` python
e_ss = reference_value - y_values[-1]
max_overshoot = (max(y_values) - y_values[-1]) / y_values[-1]
settling_time = len([x for x in y_values if abs(x - y_values[-1]) > 0.05])
print(f"Precision: {e_ss}")
print(f"Settling Time: {settling_time} -> desired: < {ks}")
print(f"Max. Overshoot: {max_overshoot} -> desired: < {mp}")
```
%% Cell type:markdown id:d916d270-f6c7-4f0c-9f19-869a9c1aac84 tags:
[Back to menu](./00_Main.ipynb) or [Next chapter](./06_RealSystem.ipynb)
%% Cell type:markdown id:8c5592b6-7270-437d-9f89-96dc4a60528d tags:
# 'Real' System
%% Cell type:markdown id:e38ada7d-aae2-4ff0-bd34-20d4c1bed366 tags:
We provide a semi-real system.
The system is as follow:
We want to compute an estimation of $\pi$.
One way to do this is to use Monte-Carlo simulations.
The idea of Monte-Carlo simulations is to execute **a lot of small and independant simulations** and compute the final result based on the results of the simulations.
In our case, each simulation we draw a random number $x$ in $[-1, 1]$, and then compute (in a very inefficient way) $\sqrt{1 - x^2}$.
The final result is the sum of each simulation, which is an approximation of $\int_{-1}^1 \sqrt{1-x^2}dx = \frac{\pi}{2}$
Our sensor is the `loadavg` of the machine. The `loadavg` is a metric representing the CPU utilization.
Our actuator is the number of threads excuting simulations in parallel (between 0 and 8).
> **Our control objective is to control the `loadavg` metric around a given value by adapting the number of threads executing simulations.**
You can run the system with the following `docker` command:
```sh
docker run --privileged -it -v $(pwd):/data registry.gitlab.inria.fr/control-for-computing/tutorial/system:v0.0 tuto-ctrl main.lua 1000000
```
The `main.lua` file is the file containing the controller code, and the last parameter is the total number of iterations to do.
%% Cell type:markdown id:8a1ea320-905c-4784-94ca-71c622046559 tags:
<div class="alert alert-info" role="alert">
Your tasks:
<ol>
<li>Play with the system: Try different constant inputs and see the output</li>
<li>Assuming the underlying model is a first order model: Perform the identification</li>
<li>Design a PI Controller for this systems</li>
<li>Introduce pertubations: At some point in your experiment, run `yes` in another terminal to act as a disturbance, and see how your controller reacts.
</ol>
</div>
%% Cell type:markdown id:629824b3-eeb5-40c2-a55b-922def8fdfbd tags:
## Why `lua`?
Because it is a simple, small language that integrate with `C` easily!
Instead of giving you the source code and asking you to write `C` code to implement the controller, we can just write the controller in `lua` and pass the `lua` file as an argument of the `C` binary to be loaded.
You can find a `lua` cheat sheet [here](https://devhints.io/lua).
%% Cell type:markdown id:e62617e9-e542-4398-867c-57c0984c2e99 tags:
We define here a Proportional Controller:
```lua
-- main.lua file
-- Do not worry about this part
Controller = {}
Controller.__index = Controller
function Controller:new()
-- Here we write the fields of the object.
-- For a P controller we need a proportional gain (kp)
-- and the reference value (ref)
local ret = {kp = 0.5, ref = 2.0}
setmetatable(ret, Controller)
return ret
end
function Controller:ctrl(sensor)
-- This is the main function of the controller.
-- It gets the sensor value from the system,
-- and must return the next input
-- First we compute the control error
err = self.ref - sensor
-- Then we compute the next u
u = self.kp * err
-- We make sure the value of u makes sense for our system
if u < 0 then
u = 0
end
-- We print a line with the different metrics
print("sensor: " .. sensor .. ", ref: " .. self.ref .. ", actuator: " .. u .. ", nb threads: " .. math.floor(u))
-- We finally return the value for the next input
return math.floor(u)
end
return Controller
```
%% Cell type:markdown id:99064358-2958-4ec7-aed1-cf0fc9215fce tags:
[Back to menu](./00_Main.ipynb)
...@@ -4,35 +4,21 @@ Link to this page: [https://tinyurl.com/CtrlComputing](https://tinyurl.com/CtrlC ...@@ -4,35 +4,21 @@ Link to this page: [https://tinyurl.com/CtrlComputing](https://tinyurl.com/CtrlC
## Introduction ## Introduction
This tutorial aims at introducing the notions and tool of the Control-Theory field to computer scientists. This tutorial aims at introducing the tools and notions of the [Control-Theory field](https://en.wikipedia.org/wiki/Control_theory) to computer scientists, and relies on [Jupyter notebooks](https://jupyter.org/), and is composed of two parts:
The tutorial is composed of two parts:
- In the first part, attendees get familiar with the concepts, tools, and methodology of Control-Theory. - In the first part, attendees get familiar with the concepts, tools, and methodology of Control-Theory.
This part is done by experimenting on a simulated system with Jupyter notebooks.
- In the second part, attendees are given a pseudo-realistic system and have to design a controller to regulate its behavior. - In the second part, attendees are given a pseudo-realistic system and have to design a controller to regulate its behavior.
## Requirements ## Requirements
- a machine with `docker` installed. - a recent web browser (Firefox 90+, Chromium 89+)
- basic `python` knowledge.
- basic math knowledge is a plus.
- basic `lua` knowledge is a plus.
## Installation - basic `python` knowledge
The tutorial is contained in a single docker image. - basic math knowledge is a plus
Start the tutorial by running:
```bash
docker run -it -p 8888:8888 -v $(pwd):/tuto/data registry.gitlab.inria.fr/control-for-computing/tutorial/tuto:v0.0
```
This should start a `JupyterLab` instance (you may have to click the link in the terminal). <h1 align="center"><b>Start the tutorial <a href="https://control-for-computing.gitlabpages.inria.fr/jupyter/">here</a></b></h1>
Once in `JupyterLab`, begin with the file named `00_Main.ipynb`.
# Motivation # Motivation
Computing systems are getting more and more complex. ## Motivation of Control for Computing
Computing systems are getting more and more complex.
The software stacks are growing, and are executed on top of complex hardware. The software stacks are growing, and are executed on top of complex hardware.
The behavior of such systems is thus becoming extremely difficult to model correctly. The behavior of applications is thus becoming extremely difficult to predict, or model, correctly.
We need regulation to provide *Quality-of-Service* guarantees.
The usual approach is to have a theoretical model of the system and design complex algorithm based on this model. One approach is to have a theoretical model of the system and design complex algorithm based on this model.
This approach is limited by the quality of the model. Such an approach is limited by the quality of the model.
Modelling all the possible cases, and behaviors is tedious and error-prone. Modelling all the possible cases, and behaviors is tedious and error-prone.
TODO: TAKE AN EXAMPLE OF SYSTEM
A different approach to regulate the behavior of a computing system is to take measurements of the system metrics, and adapt the input based on these measurements. A different approach to regulate the behavior of a computing system is to *periodically* take measurements of the system metrics, and adapt the input based on these measurements.
This approach is called *closed-loop*, and this is the interest of the *Autonomic Computing* community.
There are several ways to adapt the input of the system.
One can use ad-hoc solutions based on arbitrary rules or IA for example.
These solutions do not provide any proven guarantees on the closed-loop system.
On the other hand, the *Control Theory* field has been applying math-proven methods to closed-loop physical systems for decades.
But the application of Control Theory to computing systems is only quite recent.
One example is the one described by Cerf et al. in [Sustaining performance while reducing energy consumption: a control theory approach](https://hal.inria.fr/hal-03259316).
The motivation is the following:
HPC applications are a succession of iterations that are composed of phases.
These phases can be CPU-intensive, IO-intensive, memory-intensive, etc.
During memory-intensive phases, for example, it is useless for the CPU to be at maximum frequency as it will be idle.
Thus, one could decrease the CPU frequency of the compute node with RAPL technology to reduce its energy consumption.
This decrease might introduce some overhead in the execution time of the application.
The aforementioned paper performs an online regulation of compute node CPU frequency based on an accepted degradation given by the user.
The intuition is the following:
If the application is running slower than desired (too much degradation), the CPU frequency is increased to meet user requirements.
If the application is running faster than desired (too small degradation), the CPU frequency is decreased to reduce energy consumption.
This regulation was done using Control Theory tools and was showed to lead to significant energy saving with 10% degradation levels.
## Motivation of this tutorial
One classical example is a *heating device*. We believe that numerous computing systems or applications could benefit from feedback loops architecture,
The user defines the temperature they want their room to be at. and that tools from Control Theory provide interesting guarantees compared to other strategies (ad-hoc, AI, etc.).
The heating device will take a measurement of the temperature of the room.
If the temperature is lower than the desired temperature, the heating device will increase the power of its resistance.
If the temperature is greater than the desired temperature, the heating device must stop heating, and will thus decrease the power of its resistance.
This approach is called **closed-loop**. This tutorial aims at introducing the attendees with Control Theory, and present the methodology through a practical example.
Indeed, the system takes a decision based on a sensor, which will impact the next measure of the sensor and thus the next decision.
There are several ways to "close the loop" in a computing system. At the end of the tutorial, attendees will have:
- AI - Learned about Control Theory motivations and tools
- ad-hoc solutions
- control theory
- Identified a model of a system
- Designed a Proportional Controller, and understood its limitations
- Designed a more complex controller: a Proportional-Integral Controller
CC=gcc
LUA_PATH=/nix/store/dqlpbnddi44921ffq6q7xhzswhyvha0y-lua-5.4.3/
FLAGS=-fPIC -I${LUA_PATH}/include -llua -lpthread
OBJ=monte_carlo.o
all: tuto-ctrl tuto-ctrl-iter
install:
mkdir -p ./bin
cp tuto-ctrl ./bin
cp tuto-ctrl-iter ./bin
%.o: %.c %.h
$(CC) -c $<
tuto-ctrl: main.c ${OBJ}
$(CC) $(FLAGS) -D THREADS -o $@ main.c ${OBJ}
tuto-ctrl-iter: main.c ${OBJ}
$(CC) $(FLAGS) -o $@ main.c ${OBJ}
#ifndef _CONSTANTS_H_
#define _CONSTANTS_H_
#define N 500000
#define ITERS 100
#define MAX_THREADS 8
#define SLEEP_TIME 5
#define A -1
#define B 1
#define MAX_ITERS 100000
#endif
#include "log_thread.h"
#include <lauxlib.h>
#include <lua.h>
#include <lualib.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include "constants.h"
double get_loadavg() {
double loadavg[3];
int result = getloadavg(loadavg, 3);
double load = loadavg[0];
return load;
}
void _loop_function(lua_State* L, mutex_nb* mnb) {
lua_getfield(L, -1, "ctrl");
lua_pushvalue(L, -2);
lua_pushnumber(L, get_loadavg());
lua_pcall(L, 2, 1, 0);
int isnum;
int n = lua_tointegerx(L, -1, &isnum);
lua_remove(L, -1);
pthread_mutex_lock(&mnb->mutex);
mnb->data = n;
pthread_mutex_unlock(&mnb->mutex);
}
void create_lua_ctrlr(lua_State* L) {
lua_getfield(L, -1, "new");
assert(lua_isfunction(L, -1));
lua_insert(L, -2);
int res = lua_pcall(L, 1, 2, 0);
assert(!res);
lua_pop(L, 1);
}
void* load_logger(void* args) {
struct logger_args* myargs = (struct logger_args*)args;
lua_State* L = (lua_State*)(myargs->L);
mutex_nb* mnb = (mutex_nb*)(myargs->mnb);
create_lua_ctrlr(L);
while (1) {
_loop_function(L, mnb);
sleep(SLEEP_TIME);
}
}
#ifndef _LOG_THREAD_H_
#define _LOG_THREAD_H_
#include <assert.h>
#include <lauxlib.h>
#include <lua.h>
#include <lualib.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include "constants.h"
double get_loadavg();
void _loop_function(lua_State* L, mutex_nb* mnb);
struct logger_args {
lua_State* L;
mutex_nb* mnb;
};
void create_lua_ctrlr(lua_State* L);
void* load_logger(void* args);
#endif
#include <assert.h>
#include <lauxlib.h>
#include <lua.h>
#include <lualib.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/resource.h>
#include <time.h>
#include "constants.h"
#include "monte_carlo.h"
pthread_mutex_t mutex_results;
pthread_mutex_t mutex_actuator;
int iterations_done = 0;
double sum_simulations = 0.0;
int actuator = 1;
#ifdef THREADS
#define ACTUATOR_IS_THREADS 1
#else
#define ACTUATOR_IS_THREADS 0
#endif
void* pget_pi(void* arg) {
int iterations = ITERS;
#if !ACTUATOR_IS_THREADS
pthread_mutex_lock(&mutex_actuator);
iterations = actuator;
pthread_mutex_unlock(&mutex_actuator);
#endif
double pi = get_pi(iterations);
pthread_mutex_lock(&mutex_results);
sum_simulations += pi;
iterations_done += iterations;
pthread_mutex_unlock(&mutex_results);
pthread_exit(NULL);
}
void start_threads(pthread_t* threads) {
int nb_threads = 4;
#if ACTUATOR_IS_THREADS
pthread_mutex_lock(&mutex_actuator);
nb_threads = actuator;
pthread_mutex_unlock(&mutex_actuator);
#endif
// printf("Starting %d threads\n", nb_threads);
int rc;
for (int i = 0; i < nb_threads; i++) {
rc = pthread_create(&threads[i], NULL, pget_pi, NULL);
assert(!rc);
}
}
void stop_threads(pthread_t* threads) {
int nb_threads = 4;
#if ACTUATOR_IS_THREADS
pthread_mutex_lock(&mutex_actuator);
nb_threads = actuator;
pthread_mutex_unlock(&mutex_actuator);
#endif
for (int i = 0; i < nb_threads; i++) {
pthread_join(threads[i], NULL);
}
}
double get_loadavg() {
double loadavg[3];
int result = getloadavg(loadavg, 3);
double load = loadavg[0];
return load;
}
void _loop_function(lua_State* L) {
lua_getfield(L, -1, "ctrl");
lua_pushvalue(L, -2);
lua_pushnumber(L, get_loadavg());
lua_pcall(L, 2, 1, 0);
int isnum;
int n = lua_tointegerx(L, -1, &isnum);
lua_remove(L, -1);
pthread_mutex_lock(&mutex_actuator);
actuator = n;
pthread_mutex_unlock(&mutex_actuator);
}
void create_lua_ctrlr(lua_State* L) {
lua_getfield(L, -1, "new");
// assert(lua_isfunction(L, -1));
int res = lua_isfunction(L, -1);
assert(res == 1);
lua_insert(L, -2);
res = lua_pcall(L, 1, 2, 0);
assert(!res);
lua_pop(L, 1);
}
void* load_logger(void* args) {
lua_State* L = (lua_State*)(args);
create_lua_ctrlr(L);
while (1) {
_loop_function(L);
sleep(SLEEP_TIME);
}
}
int main(int argc, char** argv) {
int res = pthread_mutex_init(&(mutex_results), NULL);
assert(!res);
res = pthread_mutex_init(&(mutex_actuator), NULL);
assert(!res);
struct rlimit plop;
plop.rlim_cur = 62678;
plop.rlim_max = 62678;
struct rlimit plap;
setrlimit(RLIMIT_NPROC, &plop);
getrlimit(RLIMIT_NPROC, &plap);
char* myfile = argv[1];
lua_State* L = luaL_newstate();
luaL_openlibs(L);
luaL_dofile(L, myfile);
pthread_t log_thread;
int rc = pthread_create(&log_thread, NULL, load_logger, (void*)L);
assert(!rc);
int total_nb_iterations = atoi(argv[2]);
int current_nb_iterations = 0;
pthread_t* threads = malloc(sizeof(pthread_t) * MAX_THREADS);
do {
start_threads(threads);
stop_threads(threads);
pthread_mutex_lock(&mutex_results);
current_nb_iterations = iterations_done;
pthread_mutex_unlock(&mutex_results);
} while (current_nb_iterations < total_nb_iterations);
// for (int j = 0; j < MAX_ITERS; j++) {
// start_threads(threads);
// stop_threads(threads);
// }
pthread_mutex_lock(&mutex_results);
double half_pi = sum_simulations * (B - A) / iterations_done;
printf("pi = %g\n", half_pi * 2);
pthread_mutex_unlock(&mutex_results);
lua_close(L);
pthread_mutex_destroy(&mutex_results);
pthread_mutex_destroy(&mutex_actuator);
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment