Mentions légales du service

Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • demourad/enoslib
  • discovery/enoslib
  • msimonin/enoslib
  • dloup/enoslib
  • abauskar1/enoslib
  • rolivo/enoslib
  • evalanon/enoslib
  • cservant/enoslib
  • dpertin/enoslib
  • dguyon/enoslib
  • tvalette/enoslib
  • ddelabro/enoslib
  • dsaingre/enoslib
  • drosendo/enoslib
  • dmeranth/enoslib
  • aarmange/enoslib
  • bjonglez/enoslib
  • vparolgu/enoslib
  • wtrabels/enoslib
  • madelave/enoslib
20 results
Show changes
Commits on Source (830)
Showing
with 1570 additions and 616 deletions
[bumpversion]
current_version = 6.2.0
current_version = 10.0.1
commit = true
tag = true
[bumpversion:file:setup.cfg]
[bumpversion:file:enoslib/version.txt]
# tmp dir created here and there
.ipynb_checkpoints
__enoslib*
utest.xml
/enos_*
cachedir
......
......@@ -5,17 +5,123 @@ workflow:
when: never
- if: $CI_COMMIT_REF_NAME =~ /dev\// && $CI_COMMIT_TITLE =~ /WIP/
when: never
- if: $CI_MERGE_REQUEST_ID
when: never
- when: always
stages:
- test
- functionnal
- package
- publish
- precheck
- invivog5k-deb11
- invivog5k-deb12
- test
- functional
- package
- publish
variables:
DEBIAN_FRONTEND: noninteractive
TZ: Europe/Paris
GIT_SUBMODULE_STRATEGY: normal
TOX_PACKAGE: "tox<4"
FUNCTIONAL_TESTS: test_static_run_command.py test_static_gather_facts.py test_static_play_on.py test_static_run_ansible.py test_static_tasks.py test_sync_info.py test_dstat.py
###############################################################################
#
# PRECHECK STAGE
#
###############################################################################
precheck:
image: python:3.13
stage: precheck
tags: [ci.inria.fr]
script:
- pip install pre-commit
- pre-commit run --all-files
###############################################################################
#
# G5K STAGE
#
###############################################################################
test_invivo_g5k_1$debian11$: &test_invivo_g5k_1
# stick to the distribution we'll find on g5k frontends
image: debian:11
stage: invivog5k-deb11
tags: [ci.inria.fr]
# Always run on tagged commits, and also allow manual runs at any time.
# In all cases, don't fail the pipeline, this is mostly informative.
rules:
- if: $CI_COMMIT_TAG
when: on_success
allow_failure: true
- when: manual
allow_failure: true
script:
- env
- apt update
- apt install -y python3 python3-pip python3-venv iproute2 cmake zlib1g-dev libssl-dev libffi-dev ssh
- ip a
- python3 -m venv /venv
- /venv/bin/python3 -m pip install -e .
- mkdir -p ~/.ssh
- rm -f ~/.ssh/*
- cat $PRIVATE_KEY > ~/.ssh/id_rsa
- cat $PUBLIC_KEY > ~/.ssh/id_rsa.pub
- chmod 600 ~/.ssh/id_rsa
- cat $PYTHON_G5K > ~/.python-grid5000.yaml
- cd docs/tutorials/
- /venv/bin/python3 grid5000/tuto_grid5000_commands.py || /venv/bin/python3 cleanup_g5k_job.py tuto_grid5000_commands.py
- /venv/bin/python3 grid5000/tuto_grid5000_deploy.py || /venv/bin/python3 cleanup_g5k_job.py tuto_grid5000_deploy.py
- /venv/bin/python3 grid5000/tuto_grid5000_deploy_centos.py || /venv/bin/python3 cleanup_g5k_job.py tuto_grid5000_deploy_centos.py
- /venv/bin/python3 grid5000/tuto_grid5000_reload.py || /venv/bin/python3 cleanup_g5k_job.py tuto_grid5000_reload.py
- /venv/bin/python3 grid5000/tuto_grid5000_docker_nvidia.py || /venv/bin/python3 cleanup_g5k_job.py tuto_grid5000_docker_nvidia.py
- /venv/bin/python3 grid5000/tuto_grid5000_monitoring_service.py || /venv/bin/python3 cleanup_g5k_job.py tuto_grid5000_monitoring_service.py
# Second batch of tests to parallelize a bit
test_invivo_g5k_2$debian11$: &test_invivo_g5k_2
# stick to the distribution we'll find on g5k frontends
image: debian:11
stage: invivog5k-deb11
tags: [ci.inria.fr]
# Always run on tagged commits, and also allow manual runs at any time.
# In all cases, don't fail the pipeline, this is mostly informative.
rules:
- if: $CI_COMMIT_TAG
when: on_success
allow_failure: true
- when: manual
allow_failure: true
script:
- env
- apt update
- apt install -y python3 python3-pip python3-venv iproute2 cmake zlib1g-dev libssl-dev libffi-dev ssh
- ip a
- python3 -m venv /venv
- /venv/bin/python3 -m pip install -e .
- mkdir -p ~/.ssh
- rm -f ~/.ssh/*
- cat $PRIVATE_KEY > ~/.ssh/id_rsa
- cat $PUBLIC_KEY > ~/.ssh/id_rsa.pub
- chmod 600 ~/.ssh/id_rsa
- cat $PYTHON_G5K > ~/.python-grid5000.yaml
- cd docs/tutorials/
- /venv/bin/python3 vmong5k/tuto_vmong5k_home.py || /venv/bin/python3 cleanup_g5k_job.py tuto_vmong5k_home.py
- /venv/bin/python3 grid5000/tuto_grid5000_multisites.py || /venv/bin/python3 cleanup_g5k_job.py tuto_grid5000_multisites.py
- /venv/bin/python3 grid5000/tuto_grid5000_docker.py || /venv/bin/python3 cleanup_g5k_job.py tuto_grid5000_docker.py
- /venv/bin/python3 grid5000/tuto_grid5000_destroy.py || /venv/bin/python3 cleanup_g5k_job.py tuto_grid5000_destroy.py
# Run same tests on Debian 12 (newer Python version, allows newer Ansible versions)
# We need to run them in a different stage, otherwise the common OAR job names cause interferences.
test_invivo_g5k_1$debian12$:
<<: *test_invivo_g5k_1
stage: invivog5k-deb12
image: debian:12
test_invivo_g5k_2$debian12$:
<<: *test_invivo_g5k_2
stage: invivog5k-deb12
image: debian:12
###############################################################################
#
......@@ -23,201 +129,259 @@ variables:
#
###############################################################################
python3.7:
image: python:3.7
python3.9:
image: python:3.9
stage: test
tags: [qlf-ci.inria.fr]
tags: [ci.inria.fr]
script:
- pip install tox
- tox -e py37
coverage: /^TOTAL\s+\d+\s+\d+\s+(\d+\%)$/
- pip install $TOX_PACKAGE
- tox -e py39
python3.8:
image: python:3.8
python3.10:
image: python:3.10
stage: test
tags: [qlf-ci.inria.fr]
tags: [ci.inria.fr]
script:
- pip install tox
- tox -e py38
- pip install $TOX_PACKAGE
- tox -e py310
python3.11:
image: python:3.11
stage: test
tags: [ci.inria.fr]
script:
- pip install $TOX_PACKAGE
- tox -e py311
python3.12:
image: python:3.12
stage: test
tags: [ci.inria.fr]
script:
- pip install $TOX_PACKAGE
- tox -e py312
python3.13:
image: python:3.13
stage: test
tags: [ci.inria.fr]
script:
- pip install $TOX_PACKAGE
- tox -e py313
coverage: /^TOTAL\s+\d+\s+\d+\s+(\d+\%)$/
artifacts:
when: always
paths:
- htmlcov
reports:
junit: utest.xml
coverage_report:
coverage_format: cobertura
path: cov.xml
pep8:
image: python:3.7
image: python:3.13
stage: test
tags: [qlf-ci.inria.fr]
tags: [ci.inria.fr]
script:
- pip install tox
- tox -e pep8
- pip install $TOX_PACKAGE
- tox -e pep8
typecheck:
image: python:3.7
pylint:
image: python:3.13
stage: test
allow_failure: true
tags: [qlf-ci.inria.fr]
tags: [ci.inria.fr]
script:
- pip install $TOX_PACKAGE
- tox -e pylint
typecheck-3.9:
image: python:3.9
stage: test
tags: [ci.inria.fr]
script:
- pip install $TOX_PACKAGE
- tox -e typecheck
typecheck-3.13:
image: python:3.13
stage: test
tags: [ci.inria.fr]
script:
- pip install tox
- tox -e typecheck
- pip install $TOX_PACKAGE
- tox -e typecheck
sonar:
image: python:3.7-buster
image: python:3.10-buster
stage: test
allow_failure: true
only:
- tags
tags:
- large
- qlf-ci.inria.fr
- large
- ci.inria.fr
script:
- pip install -e .
- apt update
- apt install -y wget unzip openjdk-11-jre
- pip install pylint
- wget https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-4.4.0.2170-linux.zip
- unzip sonar-scanner-cli-4.4.0.2170-linux.zip
- mv sonar-scanner-4.4.0.2170-linux sonar
- wget --no-check-certificate https://sonarqube.inria.fr/static/digicertca.crt
- keytool -import -alias inria -storepass "changeit" -keystore $(pwd)/sonar/jre/lib/security/cacerts -file digicertca.crt
- sonar/bin/sonar-scanner -X -Dsonar.login=$SONAR_LOGIN
artifacts:
paths:
- ./utest.xml
- ./coverage.xml
- pip install -e .
- apt update
- apt install -y wget unzip openjdk-11-jre-headless --no-install-recommends
- pip install pylint
- wget https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-4.4.0.2170-linux.zip
- unzip sonar-scanner-cli-4.4.0.2170-linux.zip
- mv sonar-scanner-4.4.0.2170-linux sonar
- wget --no-check-certificate https://sonarqube.inria.fr/static/digicertca.crt
- keytool -import -alias inria -storepass "changeit" -keystore $(pwd)/sonar/jre/lib/security/cacerts -file digicertca.crt
- pip install $TOX_PACKAGE && tox -e py310
- sonar/bin/sonar-scanner -X -Dsonar.login=$SONAR_LOGIN
doc:
image: python:3.7
image: python:3.10
stage: test
tags: [qlf-ci.inria.fr]
tags: [ci.inria.fr]
script:
- apt update
- apt install -y pandoc
- pip install tox
- tox -e docs
- apt update
- apt install -y pandoc
- pip install $TOX_PACKAGE
- tox -e docs
artifacts:
paths:
- docs/_build/html
- docs/_build/html
###############################################################################
#
# FUNCTIONNAL STAGE
# FUNCTIONAL STAGE
#
###############################################################################
## Debian 10
## TEST VARIOUS ANSIBLE VERSIONS
test_enoslib-ansible_7: &enoslib-ansible
image: debian:11
stage: functional
tags: [ci.inria.fr]
variables:
ENOSLIB_ANSIBLE_VERSION: ">=7,<8"
script:
- |
apt update
# procps is required for "kill" for the dstat test
apt install --no-install-recommends -y python3 python3-pip python3-venv iproute2 procps
ip a
# Create virtualenv
python3 -m venv /tmp/venv
. /tmp/venv/bin/activate
python3 -m pip install -U pip wheel
# Test several versions of ansible
# note that
# python3 -m pip uninstall enoslib-ansible
# python3 -m pip install ansible${ENOSLIB_ANSIBLE_VERSION} should work too!
# but this will install all the ansible_collections shipped with the ansible package
python3 -m pip install enoslib-ansible${ENOSLIB_ANSIBLE_VERSION}
# test_dstat needs pandas
python3 -m pip install -e .[analysis]
cd enoslib/tests/functional
# Make sure we perform all tests and then display a summary
failures=""
retcode=0
for test in ${FUNCTIONAL_TESTS}
do
python3 ${test} || { retcode=$?; failures="$failures $test"; }
done
[ -n "$failures" ] && echo "FAILED TESTS: $failures"
exit $retcode
test_enoslib-ansible_6:
<<: *enoslib-ansible
variables:
ENOSLIB_ANSIBLE_VERSION: ">=6,<7"
test_enoslib-ansible_5:
<<: *enoslib-ansible
variables:
ENOSLIB_ANSIBLE_VERSION: ">=5,<6"
test_enoslib-ansible_4:
<<: *enoslib-ansible
variables:
ENOSLIB_ANSIBLE_VERSION: ">=4,<5"
test_enoslib-ansible_8:
<<: *enoslib-ansible
variables:
ENOSLIB_ANSIBLE_VERSION: ">=8,<9"
test_enoslib-ansible_9:
<<: *enoslib-ansible
# ansible 9+ only work for python>=10
image: debian:12
variables:
ENOSLIB_ANSIBLE_VERSION: ">=9,<10"
test_enoslib-ansible_10:
<<: *enoslib-ansible
# ansible 9+ only work for python>=10
image: debian:12
variables:
ENOSLIB_ANSIBLE_VERSION: ">=10,<11"
## TEST AGAINST DIFFERENT DISTROS
#-----------------------------------------------
test_static_run_command$debian10$: &debian10
image: debian:10
stage: functionnal
tags: [qlf-ci.inria.fr]
test_functional$debian12$: &debian-base
image: debian:12
stage: functional
tags: [ci.inria.fr]
script:
- env
- apt update
- apt install -y python3 python3-pip iproute2 cmake zlib1g-dev libssl-dev
- ip a
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
- pip3 install -e .
- cd enoslib/tests/functionnal
- python3 ${CI_JOB_NAME//$*$/}.py
test_static_gather_facts$debian10$:
<<: *debian10
test_static_play_on$debian10$:
<<: *debian10
test_static_run_ansible$debian10$:
<<: *debian10
test_monitoring_service$debian10$:
<<: *debian10
test_static_tasks$debian10$:
<<: *debian10
test_discover_networks$debian10$:
<<: *debian10
test_dstat$debian10$:
<<: *debian10
## debian latest
- |
apt update
# procps is required for "kill" for the dstat test
apt install --no-install-recommends -y python3 python3-pip python3-venv iproute2 procps
ip a
# Create virtualenv
python3 -m venv /tmp/venv
. /tmp/venv/bin/activate
python3 -m pip install -U pip wheel
# test_dstat needs pandas
python3 -m pip install -e .[analysis]
cd enoslib/tests/functional
# Make sure we perform all tests and then display a summary
failures=""
retcode=0
for test in ${FUNCTIONAL_TESTS}
do
python3 ${test} || { retcode=$?; failures="$failures $(basename $test)"; }
done
[ -n "$failures" ] && echo "FAILED TESTS: $failures"
exit $retcode
## debian 11
#-----------------------------------------------
test_static_run_command$debian$: &debian
<<: *debian10
image: debian:latest
test_static_gather_facts$debian$:
<<: *debian
test_static_play_on$debian$:
<<: *debian
test_static_run_ansible$debian$:
<<: *debian
test_monitoring_service$debian$:
<<: *debian
test_functional$debian11$: &debian11
<<: *debian-base
image: debian:11
test_static_tasks$debian$:
<<: *debian
test_discover_networks$debian$:
<<: *debian
test_dstat$debian$:
<<: *debian
## ubuntu 1804
## debian testing
#-----------------------------------------------
test_static_run_command$ubuntu1804$: &ubuntu2004
<<: *debian10
image: ubuntu:20.04
test_static_gather_facts$ubuntu1804$:
<<: *ubuntu2004
test_static_play_on$ubuntu1804$:
<<: *ubuntu2004
test_static_run_ansible$ubuntu1804$:
<<: *ubuntu2004
test_monitoring_service$ubuntu2004$:
<<: *ubuntu2004
test_static_tasks$ubuntu2004$:
<<: *ubuntu2004
test_discover_networks$debian$:
<<: *ubuntu2004
test_functional$debiantesting$: &debiantesting
<<: *debian-base
image: debian:testing
allow_failure: true
test_dstat$debian$:
<<: *ubuntu2004
## ubuntu latest
## ubuntu 22.04
#-----------------------------------------------
test_static_run_command$ubuntu$: &ubuntu
<<: *debian10
image: ubuntu:latest
test_functional$ubuntu2204$: &ubuntu2204
<<: *debian-base
image: ubuntu:22.04
test_static_gather_facts$ubuntu$:
<<: *ubuntu
test_static_play_on$ubuntu$:
<<: *ubuntu
test_static_run_ansible$ubuntu$:
<<: *ubuntu
test_monitoring_service$ubuntu$:
<<: *ubuntu
test_static_tasks$ubuntu$:
<<: *ubuntu
test_discover_networks$ubuntu$:
<<: *ubuntu
test_dstat$ubuntu$:
<<: *ubuntu
## ubuntu 24.04
#-----------------------------------------------
test_functional$ubuntu2404$: &ubuntu2204
<<: *debian-base
image: ubuntu:24.04
###############################################################################
#
......@@ -227,17 +391,16 @@ test_dstat$ubuntu$:
#### Entering the release zone
package:
image: python:3.7
image: python:3.9
stage: package
tags: [qlf-ci.inria.fr]
tags: [ci.inria.fr]
only:
- tags
- tags
script:
- python setup.py bdist_wheel
- python setup.py bdist_wheel
artifacts:
paths:
- dist/
- dist/
###############################################################################
#
......@@ -248,27 +411,27 @@ package:
pages:
# when: manual
stage: publish
tags: [qlf-ci.inria.fr]
tags: [ci.inria.fr]
only:
- tags
- tags
dependencies:
- doc
- doc
script:
- mkdir -p public/
- cp -r docs/_build/html/* public/
- mkdir -p public/
- cp -r docs/_build/html/* public/
artifacts:
paths:
- public
- public
pypi:
image: python:3.7
image: python:3.9
stage: publish
tags: [qlf-ci.inria.fr]
tags: [ci.inria.fr]
only:
- tags
- tags
dependencies:
- package
- package
script:
- pip install twine
# credentials are set in the env by gitlab
- twine upload dist/* --verbose
- pip install twine
# credentials are set in the env by gitlab
- twine upload dist/* --verbose
[submodule "docs/jupyter"]
path = docs/jupyter
url = https://gitlab.inria.fr/discovery/enoslib-tutorials.git
exclude: "enoslib/service/skydive/skydive*"
repos:
- repo: https://github.com/psf/black
rev: 24.10.0
hooks:
- id: black
- repo: https://github.com/PyCQA/isort.git
rev: 5.13.2
hooks:
- id: isort
args: [ "--profile", "black" ]
- repo: https://github.com/PyCQA/flake8
rev: 7.1.1
hooks:
- id: flake8
args: ["--ignore=E203,W503,E704"]
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
hooks:
- id: check-ast
- id: check-builtin-literals
- id: check-case-conflict
- id: check-merge-conflict
- id: check-toml
- id: check-yaml
- id: debug-statements
- id: end-of-file-fixer
- id: trailing-whitespace
- repo: https://github.com/asottile/pyupgrade
rev: v3.19.1
hooks:
- id: pyupgrade
args: [--py38-plus]
# Contributing
Contributing is available at [https://gitlab.inria.fr/discovery/enoslib](https://gitlab.inria.fr/discovery/enoslib)!
## Tools
## pre-commit
```sh
# install pre-commit
python -m pip install -u pre-commit
# install pre-commit hook
python -m pre_commit install
# useful command
python -m pre_commit run --all-files
```
## pytest
```sh
# simple pytest
python -m pytest path/to/file
```
## tox
```sh
python -m pip install tox
python -m tox
# unit tests (change python target if needed)
tox -e py310
#pylint
tox -e pylint
# typecheck
tox -e typecheck
```
# http://stackoverflow.com/a/24727824
# https://www.youtube.com/watch?v=o-UbWsO9rZk
include LICENSE.txt
include enoslib/version.txt
recursive-include enoslib/ansible *
recursive-include enoslib/html *
......
......@@ -2,7 +2,7 @@
EnOSlib: Surviving the ☆homoterogeneous☆ world
**************************************************
|Build Status| |License| |Pypi| |Pepy| |Chat|
|Code| |Doc| |Build Status| |License| |Pypi| |Pepy| |Chat| |SW|
What the ☆homoterogeneous☆ ?
......@@ -23,47 +23,20 @@ basis.
In this context, EnOSlib smoothes the experimental code and can
- deal with various platforms (e.g. local machine, scientific testbed, virtualized environments)
- interact programmatically with different your remote resources: compute
- interact programmatically with different remote resources: compute
(servers, containers) and networks (ipv4, ipv6)
- deploy *ready-to-use* experimentation services (e.g instrumentation, observability tools).
- emulate complex network topologies (e.g for your FOG experiments)
- integrate your code with interactive development environment like Jupyter.
The software
------------
.. |Code| image:: https://img.shields.io/badge/code-gitlab-orange
:target: https://gitlab.inria.fr/discovery/enoslib
EnOSlib has been initially developed in the context of the `Discovery
<https://beyondtheclouds.github.io/>`_ initiative and is released under the
GPLv3 licence. It's a library written in Python: you are free to import it in
your code and cherry-pick any of its functions.
.. |Doc| image:: https://img.shields.io/badge/documentation-enoslib-blue
:target: https://discovery.gitlabpages.inria.fr/enoslib/index.html
.. hint::
You can install EnOSlib with pip:
.. code-block ::
pip install enoslib
For developping EnOSlib or get the *in development* version:
.. code-block ::
git clone https://gitlab.inria.fr/discovery/enoslib.git
cd enoslib && pip install -U -e .
Links
-----
- Documentation: https://discovery.gitlabpages.inria.fr/enoslib/
- Source: https://gitlab.inria.fr/discovery/enoslib
- Chat: https://framateam.org/enoslib
.. |Build Status| image:: https://gitlab.inria.fr/discovery/enoslib/badges/master/pipeline.svg
.. |Build Status| image:: https://gitlab.inria.fr/discovery/enoslib/badges/main/pipeline.svg
:target: https://gitlab.inria.fr/discovery/enoslib/pipelines
.. |License| image:: https://img.shields.io/badge/License-GPL%20v3-blue.svg
......@@ -79,3 +52,5 @@ Links
.. |Chat| image:: https://img.shields.io/badge/mattermost-enoslib-blueviolet
:target: https://framateam.org/enoslib/channels/town-square
.. |SW| image:: https://archive.softwareheritage.org/badge/origin/https://gitlab.inria.fr/discovery/enoslib.git/
:target: https://archive.softwareheritage.org/browse/origin/?origin_url=https://gitlab.inria.fr/discovery/enoslib.git
This diff is collapsed.
......@@ -17,4 +17,4 @@ help:
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
\ No newline at end of file
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
Conda & Dask Service
====================
Conda & Dask Service Class
--------------------------
.. automodule:: enoslib.service.conda.conda
:members: Conda, Dask, conda_run_command, conda_run
Docker Service
==============
Docker Service Class
--------------------
.. automodule:: enoslib.service.docker.docker
:members: Docker
......@@ -6,25 +6,21 @@ This is an advanced example where
- network emulation will be enforced between those docker containers by
reusing |enoslib| api functions.
"""
import logging
from pathlib import Path
import enoslib as en
logging.basicConfig(level=logging.DEBUG)
en.init_logging(level=logging.INFO)
en.check()
job_name = Path(__file__).name
prod_network = en.G5kNetworkConf(
id="n1", type="prod", roles=["my_network"], site="rennes"
)
conf = (
en.G5kConf.from_settings(job_name=job_name, job_type="allow_classic_ssh")
.add_network_conf(prod_network)
.add_machine(
roles=["control"], cluster="paravance", nodes=5, primary_network=prod_network
)
en.G5kConf.from_settings(job_name=job_name, walltime="0:30:00", job_type=[])
.add_machine(roles=["control"], cluster="econome", nodes=2)
.finalize()
)
......@@ -34,11 +30,14 @@ provider = en.G5k(conf)
roles, networks = provider.init()
# Install docker
d = en.Docker(agent=roles["control"], bind_var_docker="/tmp/docker")
registry_opts = dict(type="external", ip="docker-cache.grid5000.fr", port=80)
d = en.Docker(
agent=roles["control"], bind_var_docker="/tmp/docker", registry_opts=registry_opts
)
d.deploy()
# Start some containers
N = 25
# Start N containers on each G5K host (for a total of 2*N containers)
N = 4
with en.play_on(roles=roles) as p:
p.raw("modprobe ifb")
for i in range(N):
......@@ -50,7 +49,7 @@ with en.play_on(roles=roles) as p:
capabilities=["NET_ADMIN"],
)
# Get all the dockers running on the remote hosts
# Get all the docker containers running on all remote hosts
dockers = en.get_dockers(roles=roles)
# Build the network contraints to apply on the remote docker
......@@ -58,19 +57,16 @@ dockers = en.get_dockers(roles=roles)
sources = []
for idx, host in enumerate(dockers):
delay = idx
print(f"{host.alias} <-> {delay}")
print(f"{host.alias} <-> {delay}ms")
inbound = en.NetemOutConstraint(device="eth0", options=f"delay {delay}ms")
outbound = en.NetemInConstraint(device="eth0", options=f"delay {delay}ms")
sources.append(en.NetemInOutSource(host, constraints=[inbound, outbound]))
sources.append(en.NetemInOutSource(host, constraints={inbound, outbound}))
# The connection plugin used from here is docker protocol (not ssh). The
# Ansible implementation to support this protocol isn't as robust as SSH. For
# instance there's no automatic retries. Fortunately for such lack in the
# Ansible connection backend, enoslib provides an ``ansible_retries`` parameter
# that will keep retrying the whole set of tasks on the failed hosts until all
# hosts have succeeded.
with en.play_on(roles=dict(all=dockers), gather_facts=False, ansible_retries=5) as p:
p.raw("apt update && apt install -y iproute2")
# This requires the Docker client to be installed on the local machine.
# Also, it might not work well because SSH connections are handled by Docker.
# See https://gitlab.inria.fr/discovery/enoslib/-/issues/163 for discussion
with en.play_on(roles=dockers, gather_facts=False) as p:
# We can't use the 'apt' module because python is not installed in containers
p.raw("apt update && DEBIAN_FRONTEND=noninteractive apt install -qq -y iproute2")
en.netem(sources, ansible_retries=5)
en.netem(sources)
%% Cell type:markdown id: tags:
# Dask deployment on Grid'5000
This notebook will deploy a Dask cluster on Grid'5000 and launch a simpe computation.
Requirements:
- A conda[[1]] environment setup on the Grid'5000 frontend with dask installed and EnOSlib.
- The same environment can be use to run this notebook from your local machine.
[1]: https://docs.conda.io/en/latest/miniconda.html#linux-installers
## Initial impors
%% Cell type:code id: tags:
```
from enoslib import *
import logging
# get some logs
logging.basicConfig(level=logging.INFO)
```
%% Cell type:markdown id: tags:
## Get some resources on Grid'5000
This will reserve two nodes, where the Dask cluster will be deployed later.
%% Cell type:code id: tags:
```
prod = G5kNetworkConf(id="prod", roles=["network"], type="prod", site="rennes")
conf = (
G5kConf.from_settings(job_name="dask", job_type="allow_classic_ssh")
G5kConf.from_settings(job_name="dask", job_type=[])
.add_machine(roles=["scheduler"], cluster="parapide", nodes=1, primary_network=prod)
.add_machine(roles=["worker"], cluster="parapide", nodes=1, primary_network=prod)
.add_network_conf(prod)
).finalize()
provider = G5k(conf)
roles, _ = provider.init()
```
%% Output
INFO:enoslib.infra.enos_g5k.g5k_api_utils:Reloading dask from grenoble
INFO:enoslib.infra.enos_g5k.g5k_api_utils:Reloading dask from lille
{'roles': ['scheduler'], 'primary_network': 'prod', 'secondary_networks': [], 'cluster': 'parapide', 'nodes': 1}
{'roles': ['worker'], 'primary_network': 'prod', 'secondary_networks': [], 'cluster': 'parapide', 'nodes': 1}
{
"dhcp": true,
"force_deploy": false,
"env_name": "debian10-x64-nfs",
"job_name": "dask",
"job_type": "allow_classic_ssh",
"key": "/home/msimonin/.ssh/id_rsa.pub",
"queue": "default",
"walltime": "02:00:00",
"resources": {
"machines": [
{
"roles": [
"scheduler"
],
"primary_network": "prod",
"secondary_networks": [],
"cluster": "parapide",
"nodes": 1
},
{
"roles": [
"worker"
],
"primary_network": "prod",
"secondary_networks": [],
"cluster": "parapide",
"nodes": 1
}
],
"networks": [
{
"id": "prod",
"type": "prod",
"roles": [
"network"
],
"site": "rennes"
}
]
}
}
INFO:enoslib.infra.enos_g5k.g5k_api_utils:Reloading dask from luxembourg
INFO:enoslib.infra.enos_g5k.g5k_api_utils:Reloading dask from lyon
INFO:enoslib.infra.enos_g5k.g5k_api_utils:Reloading dask from nancy
INFO:enoslib.infra.enos_g5k.g5k_api_utils:Reloading dask from nantes
INFO:enoslib.infra.enos_g5k.g5k_api_utils:Reloading dask from rennes
INFO:enoslib.infra.enos_g5k.g5k_api_utils:Reloading 1425746 from rennes
INFO:enoslib.infra.enos_g5k.g5k_api_utils:Reloading dask from sophia
INFO:enoslib.infra.enos_g5k.g5k_api_utils:Waiting for 1425746 on rennes [2021-01-12 13:37:51]
INFO:enoslib.infra.enos_g5k.g5k_api_utils:All jobs are Running !
%% Cell type:markdown id: tags:
# Deploy Dask on the nodes
This assumes that the conda environment (dask-base) is configured in your home directory in `/home/<user>/miniconda3`.
If the installation path differs, you can specify it using the `conda_prefix` parameter.
%% Cell type:code id: tags:
```
username = g5k_api_utils.get_api_username()
dask = Dask("dask-base", scheduler=roles["scheduler"][0], workers=roles["worker"], run_as=username)
dask.deploy()
```
%% Output
INFO:enoslib.api:Running playbook /home/msimonin/workspace/repos/enoslib/docs/apidoc/examples/tmpc775senv with vars:
{}
source /home/msimonin/miniconda3/etc/profile.d/conda.sh && conda activate andromak && dask-scheduler
PLAY [scheduler] ***************************************************************
TASK [(tmux ls | grep dask-scheduler )|| tmux new-session -s dask-scheduler -d 'source /home/msimonin/miniconda3/etc/profile.d/conda.sh && conda activate andromak && dask-scheduler'] ***
Tuesday 12 January 2021 14:14:40 +0100 (0:13:33.402) 0:28:23.917 *******
changed: [parapide-12.rennes.grid5000.fr]
TASK [__calling__ wait_for] ****************************************************
Tuesday 12 January 2021 14:14:42 +0100 (0:00:01.219) 0:28:25.136 *******
ok: [parapide-12.rennes.grid5000.fr]
PLAY RECAP *********************************************************************
parapide-12.rennes.grid5000.fr : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
Tuesday 12 January 2021 14:14:42 +0100 (0:00:00.436) 0:28:25.573 *******
===============================================================================
(tmux ls | grep dask-scheduler )|| tmux new-session -s dask-scheduler -d 'source /home/msimonin/miniconda3/etc/profile.d/conda.sh && conda activate andromak && dask-scheduler' --- 1.22s
__calling__ wait_for ---------------------------------------------------- 0.44s
INFO:enoslib.api:Running playbook /home/msimonin/workspace/repos/enoslib/docs/apidoc/examples/tmp8e30zh9l with vars:
{}
{'code': 0, 'result': [{'parapide-12.rennes.grid5000.fr': {'ok': 2, 'failures': 0, 'unreachable': 0, 'changed': 1, 'skipped': 0, 'rescued': 0, 'ignored': 0}}], 'playbook': '/home/msimonin/workspace/repos/enoslib/docs/apidoc/examples/tmpc775senv'}
PLAY [worker] ******************************************************************
TASK [(tmux ls | grep dask-worker )|| tmux new-session -s dask-worker -d 'source /home/msimonin/miniconda3/etc/profile.d/conda.sh && conda activate andromak && dask-worker tcp://parapide-12.rennes.grid5000.fr:8786 '] ***
Tuesday 12 January 2021 14:14:42 +0100 (0:00:00.082) 0:28:25.656 *******
changed: [parapide-16.rennes.grid5000.fr]
PLAY RECAP *********************************************************************
parapide-16.rennes.grid5000.fr : ok=1 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
Tuesday 12 January 2021 14:14:43 +0100 (0:00:01.167) 0:28:26.823 *******
===============================================================================
(tmux ls | grep dask-worker )|| tmux new-session -s dask-worker -d 'source /home/msimonin/miniconda3/etc/profile.d/conda.sh && conda activate andromak && dask-worker tcp://parapide-12.rennes.grid5000.fr:8786 ' --- 1.17s
{'code': 0, 'result': [{'parapide-16.rennes.grid5000.fr': {'ok': 1, 'failures': 0, 'unreachable': 0, 'changed': 1, 'skipped': 0, 'rescued': 0, 'ignored': 0}}], 'playbook': '/home/msimonin/workspace/repos/enoslib/docs/apidoc/examples/tmp8e30zh9l'}
%% Cell type:markdown id: tags:
## Using Dask
Here we go with a simple computation (3 tasks, 2 dependent-ones).
The below code will create all the tunnels needed to access the Dask dashboard and the scheduler.
%% Cell type:code id: tags:
```
from dask import delayed
import time
def inc(x):
time.sleep(5)
return x + 1
def dec(x):
time.sleep(3)
return x - 1
def add(x, y):
time.sleep(7)
return x + y
x = delayed(inc)(1)
y = delayed(dec)(2)
total = delayed(add)(x, y)
```
%% Cell type:markdown id: tags:
## Launch the computation
In the mean time you can check the web dashboard. The connection URL will be displayed.
%% Cell type:code id: tags:
```
from dask.distributed import Client
# Tunnel to the dashboard
addr, port, tunnel = G5kTunnel(dask.scheduler.address, 8787).start()
print(f"dashboard: http://{addr}:{port}")
with G5kTunnel(dask.scheduler.address, 8786) as (addr, port, _):
print(f"Scheduler address: {addr}:{port}")
client = Client(f"tcp://{addr}:{port}")
# launch a computation
print(f"result={total.compute()}")
```
%% Output
INFO:paramiko.transport:Connected (version 2.0, client OpenSSH_7.4p1)
INFO:paramiko.transport:Authentication (publickey) successful!
INFO:paramiko.transport:Connected (version 2.0, client OpenSSH_7.4p1)
dashboard: http://0.0.0.0:38383
INFO:paramiko.transport:Authentication (publickey) successful!
Scheduler address: 0.0.0.0:35945
result=3
%% Cell type:code id: tags:
```
# will stop the tunnel to the dashboard and the Dask cluster.
if tunnel is not None:
tunnel.stop(force=True)
dask.destroy()
```
%% Output
INFO:enoslib.api:Running playbook /home/msimonin/workspace/repos/enoslib/docs/apidoc/examples/tmpt2mbeu_y with vars:
{}
PLAY [scheduler] ***************************************************************
TASK [Killing the dask scheduler] **********************************************
Tuesday 12 January 2021 14:14:57 +0100 (0:00:13.913) 0:28:40.736 *******
changed: [parapide-12.rennes.grid5000.fr]
PLAY RECAP *********************************************************************
parapide-12.rennes.grid5000.fr : ok=1 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
Tuesday 12 January 2021 14:14:57 +0100 (0:00:00.203) 0:28:40.940 *******
===============================================================================
Killing the dask scheduler ---------------------------------------------- 0.21s
INFO:enoslib.api:Running playbook /home/msimonin/workspace/repos/enoslib/docs/apidoc/examples/tmpr0joxpnq with vars:
{}
{'code': 0, 'result': [{'parapide-12.rennes.grid5000.fr': {'ok': 1, 'failures': 0, 'unreachable': 0, 'changed': 1, 'skipped': 0, 'rescued': 0, 'ignored': 0}}], 'playbook': '/home/msimonin/workspace/repos/enoslib/docs/apidoc/examples/tmpt2mbeu_y'}
PLAY [worker] ******************************************************************
TASK [Killing the dask worker] *************************************************
Tuesday 12 January 2021 14:14:57 +0100 (0:00:00.074) 0:28:41.014 *******
changed: [parapide-16.rennes.grid5000.fr]
PLAY RECAP *********************************************************************
parapide-16.rennes.grid5000.fr : ok=1 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
Tuesday 12 January 2021 14:14:58 +0100 (0:00:00.202) 0:28:41.217 *******
===============================================================================
Killing the dask worker ------------------------------------------------ 0.21s
{'code': 0, 'result': [{'parapide-16.rennes.grid5000.fr': {'ok': 1, 'failures': 0, 'unreachable': 0, 'changed': 1, 'skipped': 0, 'rescued': 0, 'ignored': 0}}], 'playbook': '/home/msimonin/workspace/repos/enoslib/docs/apidoc/examples/tmpr0joxpnq'}
%% Cell type:code id: tags:
```
```
......
import logging
from enoslib import *
import enoslib as en
logging.basicConfig(level=logging.INFO)
en.init_logging(level=logging.INFO)
en.check()
conf = VagrantConf()\
.add_machine(roles=["control"],
flavour="tiny",
number=1)\
.add_machine(roles=["compute"],
flavour="tiny",
number=1)\
.add_network(roles=["mynetwork"],
cidr="192.168.42.0/24")\
.finalize()
conf = (
en.VagrantConf()
.add_machine(roles=["control"], flavour="tiny", number=1)
.add_machine(roles=["compute"], flavour="tiny", number=1)
.add_network(roles=["mynetwork"], cidr="192.168.42.0/24")
.finalize()
)
# claim the resources
provider = Vagrant(conf)
provider = en.Vagrant(conf)
roles, networks = provider.init()
# generate an inventory compatible with ansible
roles = sync_info(roles, networks)
roles = en.sync_info(roles, networks)
docker = Docker(registry=roles["control"], agent=roles["compute"])
docker = en.Docker(registry=roles["control"], agent=roles["compute"])
docker.deploy()
docker.backup()
docker.destroy()
......
../../tutorials/grid5000/tuto_grid5000_docker.py
\ No newline at end of file
import logging
from pathlib import Path
import time
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from pathlib import Path
import enoslib as en
logging.basicConfig(level=logging.DEBUG)
en.init_logging(level=logging.INFO)
en.check()
CLUSTER = "parasilo"
SITE = en.g5k_api_utils.get_cluster_site(CLUSTER)
job_name = Path(__file__).name
# claim the resources
conf = en.G5kConf.from_settings(job_type="allow_classic_ssh",
job_name="test-non-deploy")
network = en.G5kNetworkConf(id="n1",
type="prod",
roles=["my_network"],
site=SITE)
conf.add_network_conf(network)\
.add_machine(roles=["control"],
cluster=CLUSTER,
nodes=2,
primary_network=network)\
network = en.G5kNetworkConf(type="prod", roles=["my_network"], site=SITE)
conf = (
en.G5kConf.from_settings(job_name=job_name, walltime="0:30:00", job_type=[])
.add_network_conf(network)
.add_machine(roles=["control"], cluster=CLUSTER, nodes=2, primary_network=network)
.finalize()
)
provider = en.G5k(conf)
roles, networks = provider.init()
......@@ -38,22 +29,11 @@ with en.actions(roles=roles["control"]) as a:
# Start a capture
# - for the duration of the commands
with en.Dstat(nodes=roles["control"]) as d:
with en.Dstat(nodes=roles) as d:
time.sleep(5)
en.run("stress --cpu 4 --timeout 10", roles["control"])
en.run("stress --cpu 4 --timeout 10", roles)
time.sleep(5)
backup_dir = d.backup_dir
# Create a dictionnary of (alias) -> list of pandas df
result = pd.DataFrame()
for host in roles["control"]:
host_dir = backup_dir / host.alias
csvs = host_dir.rglob("*.csv")
for csv in csvs:
df = pd.read_csv(csv, skiprows=5, index_col=False)
df["host"] = host.alias
df["csv"] = csv
result = pd.concat([result, df], axis=0)
sns.lineplot(data=result, x="epoch", y="usr", hue="host", markers=True, style="host")
plt.show()
\ No newline at end of file
# sns.lineplot(data=result, x="epoch", y="usr", hue="host", markers=True, style="host")
# plt.show()
from pathlib import Path
import time
from locust import User, task, between, events
from locust import User, between, events, task
class QuickstartUser(User):
......@@ -10,10 +10,12 @@ class QuickstartUser(User):
def sleep1(self):
# faking a 1 second request
time.sleep(1)
events.request.fire(request_type="noopclient",
name="sleep1",
response_time=1,
response_length=0,
response=None,
context=None,
exception=None)
\ No newline at end of file
events.request.fire(
request_type="noopclient",
name="sleep1",
response_time=1,
response_length=0,
response=None,
context=None,
exception=None,
)
import logging
from pathlib import Path
from enoslib import *
import enoslib as en
en.init_logging(level=logging.INFO)
en.check()
logging.basicConfig(level=logging.INFO)
job_name = Path(__file__).name
# claim the resources
network = G5kNetworkConf(id="n1", type="prod", roles=["my_network"], site="rennes")
conf = (
G5kConf.from_settings(job_type="allow_classic_ssh", job_name="k3s")
.add_network_conf(network)
.add_machine(
roles=["master"], cluster="paravance", nodes=1, primary_network=network
)
.add_machine(
roles=["agent"], cluster="parapluie", nodes=10, primary_network=network
)
en.G5kConf.from_settings(job_name=job_name, walltime="0:45:00", job_type=[])
.add_machine(roles=["master"], cluster="paradoxe", nodes=1)
.add_machine(roles=["agent"], cluster="paradoxe", nodes=10)
.finalize()
)
provider = G5k(conf)
provider = en.G5k(conf)
# Get actual resources
roles, networks = provider.init()
k3s = K3s(master=roles["master"], agent=roles["agent"])
k3s = en.K3s(master=roles["master"], agent=roles["agent"])
k3s.deploy()
......@@ -2,35 +2,41 @@ import logging
import enoslib as en
logging.basicConfig(level=logging.INFO)
en.init_logging(level=logging.INFO)
en.check()
provider_conf = {
"backend": "libvirt",
"resources": {
"machines": [{
"roles": ["master"],
"flavour": "tiny",
"number": 1,
}, {
"roles": ["agent"],
"flavour": "tiny",
"number": 1,
}],
"networks": [{"roles": ["r1"], "cidr": "172.16.42.0/16"}]
}
"machines": [
{
"roles": ["master"],
"flavour": "tiny",
"number": 1,
},
{
"roles": ["agent"],
"flavour": "tiny",
"number": 1,
},
],
"networks": [{"roles": ["r1"], "cidr": "172.16.42.0/16"}],
},
}
conf = en.VagrantConf.from_dictionnary(provider_conf)
conf = en.VagrantConf.from_dictionary(provider_conf)
provider = en.Vagrant(conf)
roles, networks = provider.init()
roles = en.sync_info(roles, networks)
locust = en.Locust(master=roles["master"][0],
workers=roles["agent"],
networks=networks["r1"],
local_expe_dir="expe",
run_time=100)
locust = en.Locust(
master=roles["master"][0],
workers=roles["agent"],
networks=networks["r1"],
local_expe_dir="expe",
run_time=100,
)
locust.deploy()
locust.backup()
\ No newline at end of file
locust.backup()