diff --git a/app/tuto3-ai4t/index.php b/app/tuto3-ai4t/index.php new file mode 100644 index 0000000000000000000000000000000000000000..738c1397e1b8cba722a254f94f3f31c0916a02f1 --- /dev/null +++ b/app/tuto3-ai4t/index.php @@ -0,0 +1,77 @@ +<?php + +$currentURL = $_SERVER['REQUEST_URI']; +$tutoURL = substr($currentURL, 0, strpos($currentURL, 'app/') - 1); +$lang = $_GET['lang'] ?: 'fr'; + +?> + +<!doctype html> +<html> +<head> +<meta charset='UTF-8'> +<meta name="viewport" content="width=device-width, initial-scale=1.0"> +<title>Class'Code IAI : Humains et IA !</title> +<link href='<?php echo $tutoURL; ?>/assets/css/tutos-ia.css' rel='stylesheet' type='text/css' /> +<link href='<?php echo $tutoURL; ?>/assets/css/tuto3-1.css' rel='stylesheet' type='text/css' /> + <link href='<?php echo $tutoURL; ?>/assets/css/tuto3-devices.css' rel='stylesheet' type='text/css' /> +<link href="https://fonts.googleapis.com/css?family=Raleway:400,700,800|Rambla:400,700&display=swap" rel="stylesheet" /> + <script type='text/javascript' src='<?php echo $tutoURL; ?>/assets/js/libs/jquery-3.1.1.min.js'></script> + <script type='text/javascript' src='https://pixees.fr/wp-content/plugins/google-analyticator/external-tracking.min.js?ver=6.5.4'></script> + +</head> +<body > + + <div class='tuto-ia-application' data-baseURL="<?php echo $tutoURL; ?>" data-lang="<?php echo $lang; ?>"> + <div id='step-header' class='step-header'></div> + <div id='step-contents' class='step-contents'></div> + <div id='step-footer' class='step-footer'></div> + <div class='popup hidden'></div> + </div> + + <audio id="audioPlayer1" src="" /></audio> + + <!-- templates ------> + <script id='dataset-template' type='text/x-handlebars-template'> + <ul data-datasetname='{{datasetname}}'> + {{#each files}} + <li class='dataset-image' style='background-image:url({{../datasetpath}}/{{path}})' > + <img + src='{{../datasetpath}}/{{path}}' + class='hidden-dataset-image' + data-path='{{path}}' + + {{#if datasetname}} + data-datasetname='{{datasetname}}' + {{else}} + data-datasetname='{{../datasetname}}' + {{/if}} + /> + </li> + {{/each}} + </ul> + </script> + <div class="message-device" > + <p>Cette partie n'est pas adaptés aux appareils mobiles.</p> + <p>Veuillez la consulter dans Chrome sur un ordinateur.</p> + </div> + + <!-- scripts ------> + <script type='text/javascript' src='<?php echo $tutoURL; ?>/assets/js/libs/encoding-indexes.js'></script> + <script type='text/javascript' src='<?php echo $tutoURL; ?>/assets/js/libs/encoding.js'></script> + <script type='text/javascript' src='https://unpkg.com/ml5@0.4.2/dist/ml5.min.js'></script> + <script type='text/javascript' src='<?php echo $tutoURL; ?>/assets/js/libs/deparam.min.js'></script> + <script type='text/javascript' src='<?php echo $tutoURL; ?>/assets/js/libs/jquery.router.js'></script> + <script type='text/javascript' src='<?php echo $tutoURL; ?>/assets/js/libs/handlebars-v4.4.3.js'></script> + <script type='text/javascript' src='<?php echo $tutoURL; ?>/assets/js/libs/handlebars_utils.js'></script> + <script type='text/javascript' src='<?php echo $tutoURL; ?>/assets/js/libs/i18n.min.js'></script> + <script type='text/javascript' src='<?php echo $tutoURL; ?>/assets/js/libs/tutos_utils.js'></script> + <script type='text/javascript' src='<?php echo $tutoURL; ?>/assets/js/tuto3-devices.js'></script> + <script type='text/javascript' src='<?php echo $tutoURL; ?>/assets/js/tuto3-ai4t.js'></script> + <?php + if ( strpos( $_SERVER['HTTP_HOST'], 'pixees.fr') !== FALSE ) { + include '../shared/analytics.php'; + } + ?> +</body> +</html> diff --git a/assets/images/accueil_tuto1_avec_titre-en.png b/assets/images/accueil_tuto1_avec_titre-en.png new file mode 100644 index 0000000000000000000000000000000000000000..f85b06e246cf24b6b9447ee85b07e259c030f2f0 Binary files /dev/null and b/assets/images/accueil_tuto1_avec_titre-en.png differ diff --git a/assets/images/accueil_tuto2_avec_titre-en.png b/assets/images/accueil_tuto2_avec_titre-en.png new file mode 100644 index 0000000000000000000000000000000000000000..c7f24969bc335d433bec87d0d0e36477c8da608f Binary files /dev/null and b/assets/images/accueil_tuto2_avec_titre-en.png differ diff --git a/assets/js/tuto1.js b/assets/js/tuto1.js index b590a3b48b73ebbd98b1ff4b520a0120cc31f113..b79b5ccd7157ae87929fc747ca4876491c1c5e2a 100644 --- a/assets/js/tuto1.js +++ b/assets/js/tuto1.js @@ -30,6 +30,8 @@ // Lang : ecrit dans le index.php via PHP let lang = $application.attr('data-lang'); + // Lang file extension used for subtitles + let langExt = lang && lang === 'fr' ? '':'-en'; // Chemins let dataFolder = baseURL + "/data"; @@ -101,7 +103,7 @@ // Chemin de la vidéo à insérer dans la template : templateParams.videoPath = "https://files.inria.fr/mecsci/classcodeIAI/tuto1/mp4/tuto1-activite1-vid1.mp4"; - templateParams.subtitlePath = tutorialFolder + "/vtt/tuto1-activite1-vid1.vtt"; + templateParams.subtitlePath = tutorialFolder + "/vtt/tuto1-activite1-vid1" + langExt + ".vtt"; break; case '/tester/etape2': @@ -114,7 +116,7 @@ // Chemin de la vidéo à insérer dans la template : templateParams.videoPath = "https://files.inria.fr/mecsci/classcodeIAI/tuto1/mp4/tuto1-activite1-vid2.mp4"; - templateParams.subtitlePath = tutorialFolder + "/vtt/tuto1-activite1-vid2.vtt"; + templateParams.subtitlePath = tutorialFolder + "/vtt/tuto1-activite1-vid2" + langExt + ".vtt"; break; case '/experimenter/etape2': @@ -143,25 +145,25 @@ case '/experimenter/etape6': // Chemin de la vidéo à insérer dans la template : templateParams.videoPath = "https://files.inria.fr/mecsci/classcodeIAI/tuto1/mp4/tuto1-activite1-vid3.mp4"; - templateParams.subtitlePath = tutorialFolder + "/vtt/tuto1-activite1-vid3.vtt"; + templateParams.subtitlePath = tutorialFolder + "/vtt/tuto1-activite1-vid3" + langExt + ".vtt"; break; case '/experimenter/etape8': // Chemin de la vidéo à insérer dans la template : templateParams.videoPath = "https://files.inria.fr/mecsci/classcodeIAI/tuto1/mp4/tuto1-activite1-vid4.mp4"; - templateParams.subtitlePath = tutorialFolder + "/vtt/tuto1-activite1-vid4.vtt"; + templateParams.subtitlePath = tutorialFolder + "/vtt/tuto1-activite1-vid4" + langExt + ".vtt"; break; case '/experimenter/etape10': // Chemin de la vidéo à insérer dans la template : templateParams.videoPath = "https://files.inria.fr/mecsci/classcodeIAI/tuto1/mp4/tuto1-activite1-vid5.mp4"; - templateParams.subtitlePath = tutorialFolder + "/vtt/tuto1-activite1-vid5.vtt"; + templateParams.subtitlePath = tutorialFolder + "/vtt/tuto1-activite1-vid5" + langExt + ".vtt"; break; case '/creer/etape1': // Chemin de la vidéo à insérer dans la template : templateParams.videoPath = "https://files.inria.fr/mecsci/classcodeIAI/tuto1/mp4/tuto1-activite1-vid6.mp4"; - templateParams.subtitlePath = tutorialFolder + "/vtt/tuto1-activite1-vid6.vtt"; + templateParams.subtitlePath = tutorialFolder + "/vtt/tuto1-activite1-vid6" + langExt + ".vtt"; break; case '/creer/etape2': @@ -185,7 +187,7 @@ // Chemin de la vidéo à insérer dans la template : templateParams.videoPath = "https://files.inria.fr/mecsci/classcodeIAI/tuto1/mp4/tuto1-activite1-vid7.mp4"; - templateParams.subtitlePath = tutorialFolder + "/vtt/tuto1-activite1-vid7.vtt"; + templateParams.subtitlePath = tutorialFolder + "/vtt/tuto1-activite1-vid7" + langExt + ".vtt"; break; case '/conclure/etape2': diff --git a/assets/js/tuto2.js b/assets/js/tuto2.js index 32721568fb55f97e9a9c1a1964e2051be1d619a0..3b7e51cae0c16605b73aca9838bfaa4e25a000d6 100644 --- a/assets/js/tuto2.js +++ b/assets/js/tuto2.js @@ -30,6 +30,8 @@ // Lang : ecrit dans le index.php via PHP let lang = $application.attr('data-lang'); + // Lang file extension used for subtitles + let langExt = lang && lang === 'fr' ? '':'-en'; // Chemins let dataFolder = baseURL + "/data"; @@ -101,7 +103,7 @@ // Chemin de la vidéo à insérer dans la template : templateParams.videoPath = "https://files.inria.fr/mecsci/classcodeIAI/tuto2/mp4/tuto2-activite1-vid1.mp4"; - templateParams.subtitlePath = tutorialFolder + "/vtt/tuto2-activite1-vid1.vtt"; + templateParams.subtitlePath = tutorialFolder + "/vtt/tuto2-activite1-vid1" + langExt + ".vtt"; break; case '/tester/etape2': @@ -112,7 +114,7 @@ case '/tester/etape3': // Chemin de la vidéo à insérer dans la template : templateParams.videoPath = "https://files.inria.fr/mecsci/classcodeIAI/tuto2/mp4/tuto2-activite1-vid2.mp4"; - templateParams.subtitlePath = tutorialFolder + "/vtt/tuto2-activite1-vid2.vtt"; + templateParams.subtitlePath = tutorialFolder + "/vtt/tuto2-activite1-vid2" + langExt + ".vtt"; break; case '/experimenter/etape1': @@ -120,7 +122,7 @@ // Chemin de la vidéo à insérer dans la template : templateParams.videoPath = "https://files.inria.fr/mecsci/classcodeIAI/tuto2/mp4/tuto2-activite1-vid3.mp4"; - templateParams.subtitlePath = tutorialFolder + "/vtt/tuto2-activite1-vid3.vtt"; + templateParams.subtitlePath = tutorialFolder + "/vtt/tuto2-activite1-vid3" + langExt + ".vtt"; // Suppression des catégories et sélections d'images précédemment définies clearRouteData('/experimenter/etape2' ); @@ -152,7 +154,7 @@ case '/experimenter/etape6': // Chemin de la vidéo à insérer dans la template : templateParams.videoPath = "https://files.inria.fr/mecsci/classcodeIAI/tuto2/mp4/tuto2-activite1-vid4.mp4"; - templateParams.subtitlePath = tutorialFolder + "/vtt/tuto2-activite1-vid4.vtt"; + templateParams.subtitlePath = tutorialFolder + "/vtt/tuto2-activite1-vid4" + langExt + ".vtt"; break; case '/experimenter/etape8': @@ -165,13 +167,13 @@ case '/experimenter/etape10': // Chemin de la vidéo à insérer dans la template : templateParams.videoPath = "https://files.inria.fr/mecsci/classcodeIAI/tuto2/mp4/tuto2-activite1-vid5.mp4"; - templateParams.subtitlePath = tutorialFolder + "/vtt/tuto2-activite1-vid5.vtt"; + templateParams.subtitlePath = tutorialFolder + "/vtt/tuto2-activite1-vid5" + langExt + ".vtt"; break; case '/creer/etape1': // Chemin de la vidéo à insérer dans la template : templateParams.videoPath = "https://files.inria.fr/mecsci/classcodeIAI/tuto2/mp4/tuto2-activite1-vid6.mp4"; - templateParams.subtitlePath = tutorialFolder + "/vtt/tuto2-activite1-vid6.vtt"; + templateParams.subtitlePath = tutorialFolder + "/vtt/tuto2-activite1-vid6" + langExt + ".vtt"; // Suppression des catégories et sélections d'images précédemment définies clearRouteData('/creer/etape2' ); @@ -200,7 +202,7 @@ // Chemin de la vidéo à insérer dans la template : templateParams.videoPath = "https://files.inria.fr/mecsci/classcodeIAI/tuto2/mp4/tuto2-activite1-vid7.mp4"; - templateParams.subtitlePath = tutorialFolder + "/vtt/tuto2-activite1-vid7.vtt"; + templateParams.subtitlePath = tutorialFolder + "/vtt/tuto2-activite1-vid7" + langExt + ".vtt"; break; case '/credits' : diff --git a/assets/js/tuto3-1.js b/assets/js/tuto3-1.js index f22b364cf896c919fde6bdb818b08f1b9e54baea..dd4e0d1cbdeb47f0bb940902130f5eb03a986c2d 100644 --- a/assets/js/tuto3-1.js +++ b/assets/js/tuto3-1.js @@ -27,6 +27,8 @@ // Lang : ecrit dans le index.php via PHP let lang = $application.attr('data-lang'); + // Lang file extension used for subtitles + let langExt = lang && lang === 'fr' ? '':'-en'; // Chemins let dataFolder = baseURL + "/data"; @@ -80,7 +82,7 @@ // Chemin de la vidéo à insérer dans la template : templateParams.videoPath = "http://files.inria.fr/mecsci/classcodeIAI/tuto3/mp4/tuto3-activite1-vid1.mp4"; templateParams.posterPath = tutorialFolder + "/medias/poster-neutre.jpg"; - templateParams.subtitlePath = tutorialFolder + "/vtt/tuto3-activite1-vid1.vtt"; + templateParams.subtitlePath = tutorialFolder + "/vtt/tuto3-activite1-vid1" + langExt + ".vtt"; break; case '/tester': case '/tester/etape1': @@ -200,14 +202,14 @@ // Chemin de la vidéo à insérer dans la template : templateParams.videoPath = "http://files.inria.fr/mecsci/classcodeIAI/tuto3/mp4/tuto3-activite1-vid2.mp4"; templateParams.posterPath = tutorialFolder + "/medias/poster-neutre.jpg"; - templateParams.subtitlePath = tutorialFolder + "/vtt/tuto3-activite1-vid2.vtt"; + templateParams.subtitlePath = tutorialFolder + "/vtt/tuto3-activite1-vid2" + langExt + ".vtt"; break; case '/conclure/etape1': // Chemin de la vidéo à insérer dans la template : templateParams.videoPath = "http://files.inria.fr/mecsci/classcodeIAI/tuto3/mp4/tuto3-activite1-vid3.mp4"; templateParams.posterPath = tutorialFolder + "/medias/poster-neutre.jpg"; - templateParams.subtitlePath = tutorialFolder + "/vtt/tuto3-activite1-vid3.vtt"; + templateParams.subtitlePath = tutorialFolder + "/vtt/tuto3-activite1-vid3" + langExt + ".vtt"; break; diff --git a/assets/js/tuto3-2.js b/assets/js/tuto3-2.js index 441b81301bbc24a5a5a99d1f96cd2c138559b014..c3636e114ceab75ba507ad219681c77e45be2ed9 100644 --- a/assets/js/tuto3-2.js +++ b/assets/js/tuto3-2.js @@ -27,6 +27,8 @@ // Lang : ecrit dans le index.php via PHP let lang = $application.attr('data-lang'); + // Lang file extension used for subtitles + let langExt = lang && lang === 'fr' ? '':'-en'; // Chemins let dataFolder = baseURL + "/data"; @@ -72,20 +74,20 @@ // Chemin de la vidéo à insérer dans la template : templateParams.videoPath = "http://files.inria.fr/mecsci/classcodeIAI/tuto3/mp4/tuto3-activite2-vid1.mp4 "; templateParams.posterPath = tutorialFolder + "/medias/poster-minus.jpg"; - templateParams.subtitlePath = tutorialFolder + "/vtt/tuto3-activite2-vid1.vtt"; + templateParams.subtitlePath = tutorialFolder + "/vtt/tuto3-activite2-vid1" + langExt + "" + langExt + ".vtt"; break; case '/comprendre/etape1': // Chemin de la vidéo à insérer dans la template : templateParams.videoPath = "http://files.inria.fr/mecsci/classcodeIAI/tuto3/mp4/tuto3-activite2-vid2.mp4 "; templateParams.posterPath = tutorialFolder + "/medias/poster-neutre.jpg"; - templateParams.subtitlePath = tutorialFolder + "/vtt/tuto3-activite2-vid2.vtt"; + templateParams.subtitlePath = tutorialFolder + "/vtt/tuto3-activite2-vid2" + langExt + ".vtt"; break; case '/conclure/etape1': // Chemin de la vidéo à insérer dans la template : templateParams.videoPath = "http://files.inria.fr/mecsci/classcodeIAI/tuto3/mp4/tuto3-activite2-vid3.mp4 "; templateParams.posterPath = tutorialFolder + "/medias/poster-neutre.jpg"; - templateParams.subtitlePath = tutorialFolder + "/vtt/tuto3-activite2-vid3.vtt"; + templateParams.subtitlePath = tutorialFolder + "/vtt/tuto3-activite2-vid3" + langExt + ".vtt"; break; diff --git a/assets/js/tuto3-3.js b/assets/js/tuto3-3.js index 004580352a404e4bfaf00682a823553e94117f6a..72f352ba1cff4da6d6ce9cbec26f92a5070fa53f 100644 --- a/assets/js/tuto3-3.js +++ b/assets/js/tuto3-3.js @@ -27,6 +27,8 @@ // Lang : ecrit dans le index.php via PHP let lang = $application.attr('data-lang'); + // Lang file extension used for subtitles + let langExt = lang && lang === 'fr' ? '':'-en'; // Chemins let dataFolder = baseURL + "/data"; @@ -82,19 +84,19 @@ templateParams.videoPath = "http://files.inria.fr/mecsci/classcodeIAI/tuto3/mp4/tuto3-activite3-vid1.mp4 "; templateParams.posterPath = tutorialFolder + "/medias/poster-neutre.jpg";//tutorialFolder + "/medias/vid1.jpg"; //!!!!!!!!!!!!!!!!!!!!!!!!!!! à remettre - templateParams.subtitlePath = tutorialFolder + "/vtt/tuto3-activite3-vid1.vtt"; + templateParams.subtitlePath = tutorialFolder + "/vtt/tuto3-activite3-vid1" + langExt + ".vtt"; break; case '/comprendre/etape1': // Chemin de la vidéo à insérer dans la template : templateParams.videoPath = "http://files.inria.fr/mecsci/classcodeIAI/tuto3/mp4/tuto3-activite3-vid2.mp4 "; templateParams.posterPath = tutorialFolder + "/medias/poster-neutre.jpg";//tutorialFolder + "/medias/vid1.jpg"; //!!!!!!!!!!!!!!!!!!!!!!!!!!! à remettre - templateParams.subtitlePath = tutorialFolder + "/vtt/tuto3-activite3-vid2.vtt"; + templateParams.subtitlePath = tutorialFolder + "/vtt/tuto3-activite3-vid2" + langExt + ".vtt"; break; case '/conclure/etape1': // Chemin de la vidéo à insérer dans la template : templateParams.videoPath = "http://files.inria.fr/mecsci/classcodeIAI/tuto3/mp4/tuto3-activite3-vid3.mp4 "; templateParams.posterPath = tutorialFolder + "/medias/poster-neutre.jpg";//tutorialFolder + "/medias/vid1.jpg"; //!!!!!!!!!!!!!!!!!!!!!!!!!!! à remettre - templateParams.subtitlePath = tutorialFolder + "/vtt/tuto3-activite3-vid3.vtt"; + templateParams.subtitlePath = tutorialFolder + "/vtt/tuto3-activite3-vid3" + langExt + ".vtt"; break; } diff --git a/assets/js/tuto3-4.js b/assets/js/tuto3-4.js index 3f895db0e4d7c6db4939b2b905371ec23932145d..b8cfdfb7c56484581f10c96d30afaf73cdf1f3ca 100644 --- a/assets/js/tuto3-4.js +++ b/assets/js/tuto3-4.js @@ -27,6 +27,8 @@ // Lang : ecrit dans le index.php via PHP let lang = $application.attr('data-lang'); + // Lang file extension used for subtitles + let langExt = lang && lang === 'fr' ? '':'-en'; // Chemins let dataFolder = baseURL + "/data"; @@ -72,20 +74,20 @@ // Chemin de la vidéo à insérer dans la template : templateParams.videoPath = "http://files.inria.fr/mecsci/classcodeIAI/tuto3/mp4/tuto3-activite4-vid1.mp4"; templateParams.posterPath = tutorialFolder + "/medias/poster-neutre.jpg"; - templateParams.subtitlePath = tutorialFolder + "/vtt/tuto3-activite4-vid1.vtt"; + templateParams.subtitlePath = tutorialFolder + "/vtt/tuto3-activite4-vid1" + langExt + ".vtt"; break; case '/comprendre/etape1': // Chemin de la vidéo à insérer dans la template : templateParams.videoPath = "http://files.inria.fr/mecsci/classcodeIAI/tuto3/mp4/tuto3-activite4-vid2.mp4"; templateParams.posterPath = tutorialFolder + "/medias/poster-neutre.jpg"; - templateParams.subtitlePath = tutorialFolder + "/vtt/tuto3-activite4-vid2.vtt"; + templateParams.subtitlePath = tutorialFolder + "/vtt/tuto3-activite4-vid2" + langExt + ".vtt"; break; case '/conclure/etape1': // Chemin de la vidéo à insérer dans la template : templateParams.videoPath = "http://files.inria.fr/mecsci/classcodeIAI/tuto3/mp4/tuto3-activite4-vid3.mp4"; templateParams.posterPath = tutorialFolder + "/medias/poster-neutre.jpg"; - templateParams.subtitlePath = tutorialFolder + "/vtt/tuto3-activite4-vid3.vtt"; + templateParams.subtitlePath = tutorialFolder + "/vtt/tuto3-activite4-vid3" + langExt + ".vtt"; break; diff --git a/assets/js/tuto3-ai4t.js b/assets/js/tuto3-ai4t.js new file mode 100644 index 0000000000000000000000000000000000000000..6aa30ff78b087bdba2ea0b0380d7cf8db693df5c --- /dev/null +++ b/assets/js/tuto3-ai4t.js @@ -0,0 +1,637 @@ +(function($){ + + // http://127.0.0.1:8080/edsa-websites/classcodeia/app/tuto3/experimenter/etape2 + // http://127.0.0.1:8080/edsa-websites/inria-tutos/app/test_images/ + + // https://projects.invisionapp.com/share/ZMULMNT879R#/screens/394634306 + // https://docs.google.com/presentation/d/1yz2ib1BCuWb9aY3pAX0cWOII6e2S1UtkCkWzAg7xdpk/edit#slide=id.g70a7ca2b8c_0_1296 + + // http://talkerscode.com/webtricks/preview-image-before-upload-using-javascript.php + + + // Support pour le navigateur Microsoft Edge, version non-Chromium ( Microsoft Edge 44.18362.449.0 ): + // 1. Un polyfill a été également ajouté pour le support de TextEncoding : https://github.com/inexorabletash/text-encoding + // 2. Desactivation du passage par la carte graphique via WebGL + if (document.documentMode || /Edge/.test(navigator.userAgent)) { + _tfengine.ENV.set('WEBGL_PACK', false); + } + + // Elements + let $application = $('.tuto-ia-application'); + let $header = $('#step-header'); + let $content = $('#step-contents'); + let $footer = $('#step-footer'); + + // BaseURL : écrit dans le index.php via PHP + let baseURL = $application.attr('data-baseURL'); + + // Lang : ecrit dans le index.php via PHP + let lang = $application.attr('data-lang'); + + // Chemins + let dataFolder = baseURL + "/data"; + let assetsFolder = baseURL + "/assets"; + + // Tutoriel 1 : + let appBaseRoute = baseURL + "/app/tuto3-ai4t"; + let tutorialFolder = dataFolder + "/tuto3-1"; + + // Templates HandleBars : + Handlebars.templateFolder = tutorialFolder + '/templates/'; + Handlebars.templates = []; + + // Mise en cache des templates JSON des différents écrans pour ne pas les charger plusieurs fois + let _tutorialJson; + + let currentAbsoluteRoute; + let currentRelativeRoute; + let memoRelativeRoute=""; + + // QCM : + let qcm_tabImages=[]; + let qcm_index; + let qcm_nextTimer=1500; + let storeAIImages=""; + let qcm_steps=[-1,-1,-1,-1,-1]; + + let storeAISteps=""; + + + // -------------------------------------------------------------------------------------------------------- // + // FONCTIONS LIEES AU TUTORIEL + // -------------------------------------------------------------------------------------------------------- // + + // + // Evènements + // + + // RQ : Les évènements souris sont détectés à la racine du DOM de l'application + + // Avant le chargement de la template : + function beforeLoadingTemplateJsonForRoute( route ) { + + let templateParams = {}; + + route = route.split(appBaseRoute).join(''); + console.log("....................."+route); + switch(route) { + + case '/': + // Chemin de la vidéo à insérer dans la template : + templateParams.videoPath = "http://files.inria.fr/mecsci/classcodeIAI/tuto3-ai4t/Tuto3-video1-EN.mp4"; + break; + case '/tester': + case '/tester/etape1': + qcm_tabImages=[]; + for(var i=1;i<12;i++){ + qcm_tabImages[qcm_tabImages.length]="GAN1/Person-Gan-"+i+".jpg"; + } + for(var i=1;i<11;i++){ + qcm_tabImages[qcm_tabImages.length]="GAN2/true"+i+".jpg"; + } + //-- vérification qu'il y a bien au moins 1 vrai et un faux + let okshuffle=0; + while (okshuffle==0){ + qcm_tabImages=shuffleAffay(qcm_tabImages); + nbGan1=0; + nbGan2=0; + for(i=0;i<4;i++){ + if(qcm_tabImages[i].substr(0,4)=="GAN1") { + nbGan1+=1; + }else{ + nbGan2+=1; + } + } + if((nbGan1>0)&&(nbGan2>0)) { + okshuffle=1; + }else{ + console.log("!!!!!!!!!!! reshuffle !!!!!!!!!!!!!!"); + } + } + + //console.log(qcm_tabImages); + //console.log(qcm_tabImages.toString()); + storeData(qcm_tabImages.toString()); + qcm_index=0; + qcm_steps=[-1,-1,-1,-1,-1]; + storeSteps(qcm_steps.toString()); + + currentChapter='tester'; + + break; + + case '/tester/etape2': + qcm_index=1; + storeAIImages=restoreAIImages(); + qcm_tabImages=storeAIImages.split(','); + currentChapter='tester'; + break; + case '/tester/etape3': + qcm_index=2; + storeAIImages=restoreAIImages(); + qcm_tabImages=storeAIImages.split(','); + currentChapter='tester'; + break; + case '/tester/etape4': + qcm_index=3; + storeAIImages=restoreAIImages(); + qcm_tabImages=storeAIImages.split(','); + currentChapter='tester'; + break; + case '/tester/etape5': + qcm_index=4; + storeAIImages=restoreAIImages(); + qcm_tabImages=storeAIImages.split(','); + currentChapter='tester'; + break; + + case '/verifier': + case '/verifier/etape1': + storeAIImages=restoreAIImages(); + qcm_tabImages=storeAIImages.split(','); + + if(qcm_tabImages.length==0) { + for(var i=1;i<12;i++){ + qcm_tabImages[qcm_tabImages.length]="GAN1/Person-Gan-"+i+".jpg"; + } + for(var i=1;i<11;i++){ + qcm_tabImages[qcm_tabImages.length]="GAN2/true"+i+".jpg"; + } + qcm_tabImages=shuffleAffay(qcm_tabImages); + }else{ + } + + qcm_index=0; + qcm_steps=[-1,-1,-1,-1,-1]; + storeSteps(qcm_steps.toString()); + currentChapter='verifier'; + + break; + + + case '/verifier/etape2': + qcm_index=1; + storeAIImages=restoreAIImages(); + qcm_tabImages=storeAIImages.split(','); + currentChapter='verifier'; + break; + case '/verifier/etape3': + qcm_index=2; + storeAIImages=restoreAIImages(); + qcm_tabImages=storeAIImages.split(','); + currentChapter='verifier'; + break; + case '/verifier/etape4': + qcm_index=3; + storeAIImages=restoreAIImages(); + qcm_tabImages=storeAIImages.split(','); + currentChapter='verifier'; + break; + case '/verifier/etape5': + qcm_index=4; + storeAIImages=restoreAIImages(); + qcm_tabImages=storeAIImages.split(','); + currentChapter='verifier'; + break; + + case '/comprendre/etape1': + // Chemin de la vidéo à insérer dans la template : + templateParams.videoPath = "http://files.inria.fr/mecsci/classcodeIAI/tuto3-ai4t/Tuto3-video2-EN.mp4"; + + break; + case '/conclure/etape1': + // Chemin de la vidéo à insérer dans la template : + templateParams.videoPath = "http://files.inria.fr/mecsci/classcodeIAI/tuto3-ai4t/Tuto3-video3-EN.mp4"; + + break; + + } + + //-- dans tous les cas + $('qcm-button').css('background-color','#5937B9'); + + return templateParams; + } + + // Une fois la template chargée et insérée dans le DOM : + function afterInsertingTemplateForRoute( route ) { + + route = route.split(appBaseRoute).join(''); + + console.log('Templated Loaded', route); + + console.log(memoRelativeRoute) + + let dataCompleteForCurrentStep; + let infosCategory1, infosCategory2; + + switch(route) { + case '/': + case '/comprendre/etape1': + case '/conclure/etape1': + + switch(memoRelativeRoute){ + case "/comprendre": + case "/conclure": + case "/": + break; + default: + break; + + } + + + break; + case '/tester/etape1': + case '/verifier/etape1': + + qcm_proposal(); + + break; + + case '/tester/etape2': + case '/tester/etape3': + case '/tester/etape4': + case '/tester/etape5': + case '/verifier/etape2': + case '/verifier/etape3': + case '/verifier/etape4': + case '/verifier/etape5': + + qcm_proposal(); + break; + case '/conclure/etape2': + break; + + default: + break; + + } + //-- raz for all -------------- + $(".zoom-div").css('display','none'); + + } + + // Si l'utilisateur clique sur un élement d'interface + $application.on('click', function(e) { + + let target = $(e.target); + let message, $li; + + // Liens basiques (credits) + if (target.hasClass("basic-link")) + { + return; + } + + e.preventDefault(); + + if (target.hasClass("forced")) + { + + let navigationRoute = target.data('route'); + + document.location=navigationRoute; + } + else if (target.hasClass("navigation-link")) + { + let navigationLi = target.closest('.navigation-route'); + let navigationRoute = navigationLi.data('route'); + + $.router.set({ + route: appBaseRoute + navigationRoute + }); + } + else if (target.hasClass("video-parent")) + { + let videoElement = target.find('video'); + playVideo(videoElement); + } + else if ( target.hasClass("video-background") || target.hasClass("video-infos")) { + + let videoParent = target.closest('.video-parent'); + let videoElement = videoParent.find('video'); + playVideo(videoElement); + + } + else if (target.hasClass("outer-link")) + { + window.open(target.attr("href")); + } + else if (target.hasClass("title-link")) + { + document.location=baseURL+"/app/tuto3/"; + } + else if (target.hasClass("qcm-button")) + { + // tester si bonne réponse + let trueOrAi=qcm_tabImages[qcm_index].substr(0,4); + + if(currentChapter=='tester') { + trueOrAi=qcm_tabImages[qcm_index].substr(0,4); + }else{ + trueOrAi=qcm_tabImages[qcm_index+5].substr(0,4); + } + let goodResponse=false; + if (target.hasClass("human")) { + if(trueOrAi=="GAN2") { + goodResponse=true; + }else{ + } + }else{ + if(trueOrAi=="GAN1") { + goodResponse=true; + }else{ + } + } + let audioPlayer = document.getElementById('audioPlayer1'); + let audioFile; + if(goodResponse==true) { + target.css('background-color','#00FF00'); + audioFile=tutorialFolder + '/medias/sounds/good.mp3'; + qcm_steps[qcm_index]=1; + }else{ + target.css('background-color','#FF0000'); + audioFile=tutorialFolder + '/medias/sounds/bad.mp3'; + qcm_steps[qcm_index]=0; + } + + storeSteps(qcm_steps.toString()); + showStepResponses(); + //console.log(audioFile); + audioPlayer.src = audioFile; + audioPlayer.play(); + + var max_index=4; + + if(qcm_index<max_index) { + qcm_index+=1; + setTimeout(go_qcm,qcm_nextTimer); + }else{ + if(currentChapter=='tester') { + setTimeout(goRoute,qcm_nextTimer,'/comprendre/etape1'); + }else{ + setTimeout(goRoute,qcm_nextTimer,'/conclure/etape1'); + } + + } + + + } + else if (target.hasClass("qcm-zoom")) + { + $(".zoom-div").css('display','block'); + } + else if (target.hasClass("zoom-div")) + { + $(".zoom-div").css('display','none'); + } + else if (target.hasClass("credits-click")) + { + console.log("credits"); + goRoute('/'); + } + + }); + + + + + // -------------------------------------------------------------------------------------------------------- // + // FONCTIONS GENERALES + // -------------------------------------------------------------------------------------------------------- // + + // + // Router + // https://github.com/scssyworks/silkrouter/tree/feature/ver2 + // + + $.route(function (data) { + + currentAbsoluteRoute = data.route; + + let relativeRoute = currentAbsoluteRoute.split(appBaseRoute).join(''); + + console.log("Route absolue", currentAbsoluteRoute, "Route relative", relativeRoute); + memoRelativeRoute=relativeRoute; + + if (_tutorialJson === undefined) { + loadChapitresJson( function( json ) { + currentRelativeRoute = updateContentWithRoute( _tutorialJson = json, relativeRoute); + console.log("currentRelativeRoute", currentRelativeRoute); + }); + } else { + currentRelativeRoute = updateContentWithRoute( _tutorialJson, relativeRoute); + } + }); + + + + // init translations then init route + initTutorielTranslations(baseURL, lang, tutorialFolder, function() { + $.router.init(); + }); + + + + // Retour forcé à l'accueil : par exemple, si l'utilisateur recharge une étape intermédiaire + function backHome() { + goRoute('/'); + } + + function goRoute( route ) { + $.router.set({ + route: appBaseRoute + route + }); + } + + + // + // Chargement du JSON des templates + // + + function loadChapitresJson( successCallback ) { + getTutorielJSON( tutorialFolder + '/json/chapitres.json', function( json ) { + if(successCallback) { + successCallback( json ); + } + }); + } + + + // + // Affichage de la template de la route + // + + function updateContentWithRoute( tutoJson, route ) { + if(route=="/comprendre"){ + route="/comprendre/etape1"; + }else{ + } + + console.log("updateContentWithRoute", tutoJson, route); + + // La valeur de la route passée en paramètre peut être modifié ( ex : chapitre ) + let contentDescription = getStepDescriptionForRoute(tutoJson, route); + let relativeRoute = contentDescription.step.route; + + console.log("relativeRoute", contentDescription, relativeRoute); + + // Préparation des données à injecter dans la template + let templateParams = beforeLoadingTemplateJsonForRoute( relativeRoute ); + + // Chargement de la template + displayRoute( tutoJson, appBaseRoute, relativeRoute, $header, $content, $footer, contentDescription, templateParams ); + + // Application des régles liés à la template après chargement + afterInsertingTemplateForRoute(relativeRoute); + + return relativeRoute; + } + + + // + // QCM + // + function qcm_proposal(){ + //console.log(tutorialFolder + '/medias/'+qcm_tabImages[qcm_index]); + //console.log(qcm_tabImages); + //console.log(qcm_index); + if(currentChapter=='tester') { + $(".image-for-qcm").css('background-image', 'url(' +tutorialFolder + '/medias/'+qcm_tabImages[qcm_index]+ ')' ); + $(".zoom-div").css('background-image', 'url(' +tutorialFolder + '/medias/'+qcm_tabImages[qcm_index]+ ')' ); + }else{ + $(".image-for-qcm").css('background-image', 'url(' +tutorialFolder + '/medias/'+qcm_tabImages[qcm_index+5]+ ')' ); + $(".zoom-div").css('background-image', 'url(' +tutorialFolder + '/medias/'+qcm_tabImages[qcm_index+5]+ ')' ); + } + + storeAISteps=restoreSteps(); + qcm_steps=storeAISteps.split(','); + if(qcm_steps.length!=5) { + qcm_steps=[-1,-1,-1,-1,-1]; + }else{ + } + showStepResponses(); + + + } + function showStepResponses(){ + for(i=0;i<5;i++){ + let valStep=parseInt(qcm_steps[i]); + switch(valStep){ + case -1: + break; + case 0: + $(".step-footer ul.navigation li:nth-child("+(i+1)+")").css('background-color','#FF0000'); + break; + case 1: + $(".step-footer ul.navigation li:nth-child("+(i+1)+")").css('background-color','#00FF00'); + break; + } + } + } + function go_qcm(){ + + goRoute('/'+currentChapter+'/etape'+(qcm_index+1)); + } + + + function playVideo(videoElement) { + + if (videoElement) { + + let videoParent = videoElement.closest('.video-parent'); + + let video = videoElement[0]; + video.addEventListener('ended', endOfVideo, false); + + var textTracks = video.textTracks; + if (textTracks && (textTracks.length > 0)) { + var textTrack = textTracks[0]; + textTrack.mode = "showing"; + } + + if (video.paused === true) { + video.play(); + videoParent.addClass('is-playing'); + } else { + video.pause(); + videoParent.removeClass('is-playing'); + } + } + } + + function endOfVideo(e) { + + let video = e.target; + if (video) { + video.removeEventListener('ended', endOfVideo, false); + } + /* + if ($('.navigation-route.next-link a').length) { + $('.navigation-route.next-link a').trigger('click'); + } else if ($('.last-video-before-credits')) { + $('.credits-link a').trigger('click'); + } + */ + } + + + // ======================================================================== +// STORE data +// ======================================================================== + + + function storeData( theArray) { + if (typeof(Storage) !== "undefined") { + // Code for localStorage/sessionStorage. + localStorage.setItem("storeAIImages", theArray); + } else { + // Sorry! No Web Storage support.. + } + } + function storeSteps( theArray) { + if (typeof(Storage) !== "undefined") { + // Code for localStorage/sessionStorage. + localStorage.setItem("storeSteps", theArray); + } else { + // Sorry! No Web Storage support.. + } + } + + function restoreAIImages() { + theData=""; + if (typeof(Storage) !== "undefined") { + // Code for localStorage/sessionStorage. + theData=localStorage.getItem("storeAIImages"); + if(typeof theData != 'string') { + theData=""; + }else{ + + } + } else { + // Sorry! No Web Storage support.. + theData="notsupported"; + } + return theData; + } + function restoreSteps() { + theData=""; + if (typeof(Storage) !== "undefined") { + // Code for localStorage/sessionStorage. + theData=localStorage.getItem("storeSteps"); + if(typeof theData != 'string') { + theData=""; + }else{ + + } + } else { + // Sorry! No Web Storage support.. + theData="notsupported"; + } + return theData; + } + + + +})( jQuery ); diff --git a/assets/js/tuto3.js b/assets/js/tuto3.js index 5f4aff1982e3ec88e37a4131ba2f7ae39ad5cc8f..6cb807cd12320f2f8b7267925470ea9c5bd6c587 100644 --- a/assets/js/tuto3.js +++ b/assets/js/tuto3.js @@ -27,6 +27,8 @@ // Lang : ecrit dans le index.php via PHP let lang = $application.attr('data-lang'); + // Lang file extension used for subtitles + let langExt = lang && lang === 'fr' ? '':'-en'; // Chemins let dataFolder = baseURL + "/data"; @@ -84,14 +86,14 @@ // Chemin de la vidéo à insérer dans la template : templateParams.videoPath = "http://files.inria.fr/mecsci/classcodeIAI/tuto3/mp4/tuto3-0-preambule.mp4"; templateParams.posterPath = tutorialFolder + "/medias/poster-journal.jpg"; - templateParams.subtitlePath = tutorialFolder + "/vtt/tuto3-0-preambule.vtt"; + templateParams.subtitlePath = tutorialFolder + "/vtt/tuto3-0-preambule" + langExt + ".vtt"; break; case '/conclusion': // Chemin de la vidéo à insérer dans la template : templateParams.videoPath = "http://files.inria.fr/mecsci/classcodeIAI/tuto3/mp4/tuto3-conclusion.mp4 "; templateParams.posterPath = tutorialFolder + "/medias/poster-neutre.jpg"; - templateParams.subtitlePath = tutorialFolder + "/vtt/tuto3-conclusion.vtt"; + templateParams.subtitlePath = tutorialFolder + "/vtt/tuto3-conclusion" + langExt + ".vtt"; break; diff --git a/data/tuto1/templates/accueil.handlebars b/data/tuto1/templates/accueil.handlebars index 25ba012cd9edd89313831368a8d8e70ce6b70b47..5d729a59bb74eb6d638bd8cb31ce233e9fb76cb4 100644 --- a/data/tuto1/templates/accueil.handlebars +++ b/data/tuto1/templates/accueil.handlebars @@ -1,4 +1,4 @@ -<div class="intro-step tuto-accueil"> +<div class="intro-step tuto-accueil" style="background-image: url('{{i18n "../../assets/images/accueil_tuto1_avec_titre.png"}}')"> <div> <div class="mobile-warning for-mobile"> {{i18n "Cette partie n’est pas adaptée aux appareils mobiles. Veuillez la consulter dans Chrome, Firefox ou Microsoft Edge sur ordinateur ou tablette en mode paysage"}} diff --git a/data/tuto1/translations/en.json b/data/tuto1/translations/en.json index 69a37b6f880bc7a0fe2bd54ab04fd75f210348f4..0416cce3fd8a67fa6463be0eaa694c300c525c30 100644 --- a/data/tuto1/translations/en.json +++ b/data/tuto1/translations/en.json @@ -1,5 +1,6 @@ { "values": { + "../../assets/images/accueil_tuto1_avec_titre.png": "../../assets/images/accueil_tuto1_avec_titre-en.png", "Vous avez dit <strong>IA</strong> ?": "You said <strong>AI</strong> ?", "Tester": "Test", "Expérimenter": "Experiment", @@ -71,8 +72,8 @@ "Navigateurs supportés : Edge, Chrome, Mozilla, Safari, Opera": "Supported browsers: Edge, Chrome, Mozilla, Safari, Opera", "Création du json du pré-entrainement du modèle : NE SERT QU'EN PROD": "Creation of the json for the pre-training model:", "Chargement du json du pré-entrainement du modèle : NE SERT QU'EN PROD": "Uploading of the json for the pre-training model:", - "Le présent tutoriel ne stocke aucune donnée personnelle. Les images sont traitées au niveau local sur la machine de l’utilisateur.": "", - "Une production Magic Makers en coproduction avec Inria et S24B l’interactive et class’code avec le soutien de Microsoft.":" ", - "© Cette ressource est sous licence CC BY SA 2020": "" + "Le présent tutoriel ne stocke aucune donnée personnelle. Les images sont traitées au niveau local sur la machine de l’utilisateur.": "This tutorial does not store any personal data. The images are processed locally on the user's machine", + "Une production Magic Makers en coproduction avec Inria et S24B l’interactive et class’code avec le soutien de Microsoft.":"A Magic Makers production in co-production with Inria and S24B l'interactive et class'code with the support of Microsoft", + "© Cette ressource est sous licence CC BY SA 2020": "© This content is under licence CC BY SA 2021" } } diff --git a/data/tuto1/vtt/tuto1-activite1-vid1-en.vtt b/data/tuto1/vtt/tuto1-activite1-vid1-en.vtt new file mode 100644 index 0000000000000000000000000000000000000000..a545f086b757ca11e4ca7124b30e18a5de60a0a4 --- /dev/null +++ b/data/tuto1/vtt/tuto1-activite1-vid1-en.vtt @@ -0,0 +1,40 @@ +WEBVTT + +00:00:00.333 --> 00:00:02.914 +What is artificial intelligence? + +00:00:03.541 --> 00:00:06.666 +AI, or artificial intelligence, +is a collection of IT tools + +00:00:06.750 --> 00:00:10.083 +that mimic actions +previously only done by humans. + +00:00:10.370 --> 00:00:12.515 +They are most often cognitive actions. + +00:00:12.583 --> 00:00:16.084 +For example, recognising what is +in an image or even translating a text. + +00:00:16.509 --> 00:00:18.020 +We can find it all around us! + +00:00:18.083 --> 00:00:20.729 +Our phone recognising faces on our photos, + +00:00:20.790 --> 00:00:24.174 +translation sites that immediately +recognise the language being spoken. + +00:00:24.521 --> 00:00:29.382 +Social media apps that can recognise faces +on our photos so we can add rabbit ears. + +00:00:29.458 --> 00:00:32.179 +Voice assistants +who we can speak to. Great, right? + +00:00:32.497 --> 00:00:33.685 +Let's take a closer look. \ No newline at end of file diff --git a/data/tuto1/vtt/tuto1-activite1-vid2-en.vtt b/data/tuto1/vtt/tuto1-activite1-vid2-en.vtt new file mode 100644 index 0000000000000000000000000000000000000000..5ba8ef06222273b20d947581a2edced18d433e1d --- /dev/null +++ b/data/tuto1/vtt/tuto1-activite1-vid2-en.vtt @@ -0,0 +1,39 @@ +WEBVTT + +00:00:00.167 --> 00:00:01.250 +Magic? + +00:00:02.312 --> 00:00:05.660 +For a program to learn to recognise +images, you have to train it first. + +00:00:06.014 --> 00:00:09.330 +This is what is known +as "machine learning." + +00:00:09.992 --> 00:00:12.250 +We'll show it examples, +and tell it what they are. + +00:00:12.640 --> 00:00:15.312 +This is an image of a cat. +This is an image of a dog. + +00:00:15.375 --> 00:00:18.625 +We'll show it thousands +for each thing we want it to recognise. + +00:00:19.167 --> 00:00:21.417 +At the start it will make +a lot of mistakes. + +00:00:21.458 --> 00:00:24.916 +But after seeing plenty of examples, +it will recognise an image of a cat, + +00:00:24.937 --> 00:00:26.958 +even on a photo that it has never seen. + +00:00:27.042 --> 00:00:29.583 +We're now going +to be able to train our own AI! \ No newline at end of file diff --git a/data/tuto1/vtt/tuto1-activite1-vid3-en.vtt b/data/tuto1/vtt/tuto1-activite1-vid3-en.vtt new file mode 100644 index 0000000000000000000000000000000000000000..9712438c7a83f3ac56a5654d6f6169baadaab0c2 --- /dev/null +++ b/data/tuto1/vtt/tuto1-activite1-vid3-en.vtt @@ -0,0 +1,47 @@ +WEBVTT + +00:00:00.000 --> 00:00:02.225 +We just trained our first program! + +00:00:02.708 --> 00:00:05.833 +Well done! +As you could see, it wasn't magic. + +00:00:06.208 --> 00:00:09.751 +First, we told it what we were showing it. +Then we showed it examples of images. + +00:00:10.000 --> 00:00:11.458 +That's the learning phase. + +00:00:11.789 --> 00:00:13.854 +We call the program +we trained "the model". + +00:00:14.610 --> 00:00:17.790 +It can now predict +the category an image belongs to. + +00:00:18.242 --> 00:00:22.040 +Its prediction is still a statistic +approximation, hence the percentage. + +00:00:22.458 --> 00:00:24.625 +According to its calculation, +there's more chance + +00:00:24.687 --> 00:00:27.208 +that this image belongs +to one category over another. + +00:00:27.994 --> 00:00:29.625 +But can it recognise everything? + +00:00:29.917 --> 00:00:32.333 +If we teach it to recognise cats or dogs, + +00:00:32.416 --> 00:00:34.479 +can it recognise lions or tigers? + +00:00:34.541 --> 00:00:36.083 +Can our program get it wrong? \ No newline at end of file diff --git a/data/tuto1/vtt/tuto1-activite1-vid4-en.vtt b/data/tuto1/vtt/tuto1-activite1-vid4-en.vtt new file mode 100644 index 0000000000000000000000000000000000000000..80fb7353ff836030dacda553a4a1a20ebf04162e --- /dev/null +++ b/data/tuto1/vtt/tuto1-activite1-vid4-en.vtt @@ -0,0 +1,36 @@ +WEBVTT + +00:00:00.521 --> 00:00:04.312 +Our program can only do +what it has been trained to do. + +00:00:04.708 --> 00:00:07.208 +The program can only recognise +what we've shown it. + +00:00:07.527 --> 00:00:11.390 +If we teach it to recognise cats or dogs, +it won't be able to recognise lions. + +00:00:11.458 --> 00:00:14.416 +But it can tell us +which category it is closer to. + +00:00:14.500 --> 00:00:18.042 +Cats? It is only as smart +as we taught it to be. + +00:00:18.126 --> 00:00:21.226 +When we discover something new, +we don't know what it is straight away. + +00:00:21.672 --> 00:00:26.160 +But once we learn, we can recognise it +in different positions or contexts. + +00:00:26.632 --> 00:00:28.250 +Is it the same for our program? + +00:00:28.792 --> 00:00:32.390 +If we teach it to recognise lions, +will it recognise soft toy lions? \ No newline at end of file diff --git a/data/tuto1/vtt/tuto1-activite1-vid5-en.vtt b/data/tuto1/vtt/tuto1-activite1-vid5-en.vtt new file mode 100644 index 0000000000000000000000000000000000000000..2e6e2a9dff58f5cdbdacea019191c10534a12101 --- /dev/null +++ b/data/tuto1/vtt/tuto1-activite1-vid5-en.vtt @@ -0,0 +1,41 @@ +WEBVTT + +00:00:00.167 --> 00:00:01.429 +It can get things wrong! + +00:00:01.760 --> 00:00:03.178 +It is just a prediction. + +00:00:03.992 --> 00:00:06.990 +Being able to recognise things +in different positions or contexts + +00:00:07.021 --> 00:00:08.541 +is called generalisation. + +00:00:08.583 --> 00:00:11.875 +That's what we try to do when we train +a program to recognise things. + +00:00:12.218 --> 00:00:16.040 +We are very good at generalising +because we are quite approximate. + +00:00:16.654 --> 00:00:19.104 +Our program is very powerful +because it is very precise. + +00:00:19.490 --> 00:00:21.367 +But it is less good at generalising. + +00:00:22.156 --> 00:00:26.660 +However, if we train it correctly +with thousands of varied examples, + +00:00:27.047 --> 00:00:29.830 +in some cases, it can be +more precise than humans. + +00:00:29.875 --> 00:00:35.646 +Today, we have programs that can read +medical images more reliably than humans. \ No newline at end of file diff --git a/data/tuto1/vtt/tuto1-activite1-vid6-en.vtt b/data/tuto1/vtt/tuto1-activite1-vid6-en.vtt new file mode 100644 index 0000000000000000000000000000000000000000..29ffe2ab53c6a0301ca1171f887a289eaa41fc2f --- /dev/null +++ b/data/tuto1/vtt/tuto1-activite1-vid6-en.vtt @@ -0,0 +1,32 @@ +WEBVTT + +00:00:00.130 --> 00:00:02.054 +What can we do with this program? + +00:00:02.487 --> 00:00:05.729 +Now we know +how to train artificial intelligence, + +00:00:05.791 --> 00:00:08.125 +we can train it with whatever we want. + +00:00:08.740 --> 00:00:10.778 +Can I train a program +to tell the difference + +00:00:10.812 --> 00:00:12.385 +between my cup and my glass? + +00:00:12.917 --> 00:00:15.099 +Between a closed hand and an open hand? + +00:00:15.658 --> 00:00:17.792 +Between my blue shirt and a red T-shirt? + +00:00:17.875 --> 00:00:21.242 +You can choose your two categories +and your ten examples for each. + +00:00:21.750 --> 00:00:24.195 +Make sure that +your two categories are distinct. \ No newline at end of file diff --git a/data/tuto1/vtt/tuto1-activite1-vid7-en.vtt b/data/tuto1/vtt/tuto1-activite1-vid7-en.vtt new file mode 100644 index 0000000000000000000000000000000000000000..545d647ced54396c1404e504a5551c9de249f11b --- /dev/null +++ b/data/tuto1/vtt/tuto1-activite1-vid7-en.vtt @@ -0,0 +1,75 @@ +WEBVTT + +00:00:00.125 --> 00:00:02.352 +Impressive! But how is it useful? + +00:00:02.583 --> 00:00:04.917 +Impressive, right? We agree. + +00:00:05.001 --> 00:00:07.200 +The program doesn't really see +our categories. + +00:00:07.469 --> 00:00:10.521 +But it associates the label we chose +to the example we showed it. + +00:00:10.562 --> 00:00:13.208 +This ability to recognise things +can be very useful. + +00:00:13.445 --> 00:00:15.910 +We're finding more +and more AI in our daily lives. + +00:00:15.994 --> 00:00:19.910 +It lets us automatically translate text, +communicate with voice assistants + +00:00:20.210 --> 00:00:22.807 +or even improve medical diagnoses. + +00:00:23.083 --> 00:00:26.917 +Computer engineers even use it +to try and build autonomous cars! + +00:00:27.000 --> 00:00:30.146 +But many uses are still to be invented +to make the world of tomorrow + +00:00:30.208 --> 00:00:32.958 +fairer, simpler. +more beautiful and more sustainable. + +00:00:33.042 --> 00:00:36.333 +Helping people +with disabilities, for example, + +00:00:36.417 --> 00:00:39.208 +with applications +that describe the environment + +00:00:39.292 --> 00:00:40.708 +for people with vision impairments, + +00:00:40.792 --> 00:00:43.213 +or allowing paralysed people to write, + +00:00:43.297 --> 00:00:48.166 +making work simpler and more interesting, +by automating some repetitive tasks. + +00:00:48.250 --> 00:00:53.792 +reducing climate change by making +more accurate predictions for the future. + +00:00:54.109 --> 00:00:58.720 +What about us? Now you know what AI is, +but what else can we do with it? + +00:00:59.250 --> 00:01:01.739 +What solutions can we invent +for the world of tomorrow? + +00:01:01.840 --> 00:01:04.229 +How can we use it every day to help us? \ No newline at end of file diff --git a/data/tuto2/templates/accueil.handlebars b/data/tuto2/templates/accueil.handlebars index 4b29f858ab0ac40d2c89a8741410957721792056..9714223c1e50c4eb43bebb4fad3aa4f9a5037071 100644 --- a/data/tuto2/templates/accueil.handlebars +++ b/data/tuto2/templates/accueil.handlebars @@ -1,4 +1,4 @@ -<div class="intro-step tuto-accueil"> +<div class="intro-step tuto-accueil" style="background-image: url('{{i18n "../../assets/images/accueil_tuto2_avec_titre.png"}}')"> <div> <div class="mobile-warning for-mobile"> Cette partie n’est pas adaptée aux appareils mobiles. Veuillez la consulter dans Chrome, Firefox ou Microsoft Edge sur ordinateur diff --git a/data/tuto2/translations/en.json b/data/tuto2/translations/en.json index e9cf6b6d268559a5834ed1de6b8304b0f6b5960c..780f32f080167a9c37e1d04dd798600ad71f7112 100644 --- a/data/tuto2/translations/en.json +++ b/data/tuto2/translations/en.json @@ -1,70 +1,73 @@ { "values": { - "Boosté à l'": "", - "IA": "", - "Introduction": "", - "Tester 1/2": "", - "Des algorithmes et des données": "", - "Ce programme a été entraîné à reconnaître des hommes et des femmes. Que remarque-t-on ?": "", - "Demander au programme ce que les images représentent. Choisir une image dans la bibliothèque puis cliquer sur ": "", - "Tester": "", - "Bibliothèque": "", - "Programme": "", - "Tester 2/2": "", - "Sexiste ? Et si c'était nos données...": "", - "Expérimenter 1/3": "", - "De l'art de préparer les données...": "", - "Entraînons notre programme à reconnaître des hommes et des femmes.": "", - "Sélectionner les images dans la Bibliothèque pour la catégorie Femme puis pour la catégorie Homme.": "", - "Femme": "", - "Valider": "", - "Prédiction": "", - "Homme": "", - "Entraînons le programme pour qu’il associe les exemples d’images aux catégories. Cliquer sur ": "", - "Entraîner": "", - "Avons-nous correctement entraîné notre programme ? Que voit-il ? Pourquoi ?": "", - "Sélectionner une image dans la Bibliothèque et la tester. Répéter l’opération autant de fois que désiré puis cliquer sur ": "", - "Suite": "", - "Jeu de données": "", - "Expérimenter 2/3": "", - "Des biais dans les jeux de données !": "", - "A quelles images de mon jeu de données s’est-il référé pour tirer cette conclusion ?": "", - "Tester le programme et identifier les similarités dans le jeu de données d'entraînement puis cliquer sur ": "", - "Modifier": "", - " si nécessaire.": "", - "Comment puis-je améliorer mon jeu de données ?": "", - "Modifier les images en cliquant dessus, puis les remplacer par une image de la bibliothèque ou importée depuis l'ordinateur ou encore prendre une photo. Cliquer sur ": "", - " pour donner les images au programme.": "", - "Prendre une photo": "", - "Notre programme comprend-il encore des biais ?": "", - "Tester le programme et faire autant d’aller-retours que nécessaire pour l’améliorer.": "", - "Expérimenter 3/3": "", - "Maîtriser les jeux de données.": "", - "Créer 1/2": "", - "Et si on jouait avec les données !": "", - "Utile, rigolo, trompeur ? Que voulons-nous faire dire à notre programme ?": "", - "Créons notre set de données et entraînons notre programme avant de le tester. A recommencer sans modération :-)": "", - "Conclure": "", - "Tromper la machine !": "", - "Production et réalisation": "", - "Magic Makers pour Class’Code IAI.": "", - "www.magicmakers.fr": "", - "Jade Becker, ": "", - "Conception et réalisation.": "", - "Claude Terosier et Romain Liblau, ": "", - "Conseil pédagogique.": "", - "Veronica Holguin, ": "", - "Graphisme.": "", - "Benjamin Ninassi et Denis Chiron, ": "", - "Développement.": "", - "Sophie de Quatrebarbes, ": "", - "Réalisation et suivi de production.": "", - "Données, logiciels, etc...": "", - "Hébergement : Inria Rocquencourt.": "", - "Algorithme de machine learning : ML5.": "", - "Navigateurs supportés : Edge, Chrome, Mozilla, Safari, Opera": "", - "Préparation de l'entrainement homme-femme": "", - "Enregistrer": "", + "../../assets/images/accueil_tuto2_avec_titre.png": "../../assets/images/accueil_tuto2_avec_titre-en.png", + "Boosté à l'": "Boosted with", + "IA": "AI", + "Introduction": "Introduction", + "Expérimenter": "Experiment", + "Créer": "Create", + "Tester 1/2": "Test 1/2", + "Des algorithmes et des données": "Algorithms and data", + "Ce programme a été entraîné à reconnaître des hommes et des femmes. Que remarque-t-on ?": "This programme has been trained to recognise men and women. What do we notice?", + "Demander au programme ce que les images représentent. Choisir une image dans la bibliothèque puis cliquer sur ": "Ask the program what the images represent. Choose an image from the gallery and click on ", + "Tester": "Test", + "Bibliothèque": "Gallery", + "Programme": "Programme", + "Tester 2/2": "Test 2/2", + "Sexiste ? Et si c'était nos données...": "Sexist? What if it's our data?", + "Expérimenter 1/3": "Experiment 1/3", + "De l'art de préparer les données...": "The art of data preparation", + "Entraînons notre programme à reconnaître des hommes et des femmes.": "Let's train our programme to recognise men and women.", + "Sélectionner les images dans la Bibliothèque pour la catégorie Femme puis pour la catégorie Homme.": "Select the images in the gallery for the Women's category and then for the Men's category.", + "Femme": "Women", + "Valider": "Validate", + "Prédiction": "Prediction", + "Homme": "Men", + "Entraînons le programme pour qu’il associe les exemples d’images aux catégories. Cliquer sur ": "Let's train the programme to associate the sample images with the categories. Click on", + "Entraîner": "Train", + "Avons-nous correctement entraîné notre programme ? Que voit-il ? Pourquoi ?": "Have we trained our programme correctly? What does it see? Why does it see it?", + "Sélectionner une image dans la Bibliothèque et la tester. Répéter l’opération autant de fois que désiré puis cliquer sur ": "Select an image in the gallery and test it. Repeat the operation as many times as desired and click on ", + "Suite": "Continued", + "Jeu de données": "Data set", + "Expérimenter 2/3": "Experiment 2/3", + "Des biais dans les jeux de données !": "Biases in the data sets!", + "A quelles images de mon jeu de données s’est-il référé pour tirer cette conclusion ?": "Which images in my dataset did it refer to in drawing this conclusion?", + "Tester le programme et identifier les similarités dans le jeu de données d'entraînement puis cliquer sur ": "Test the programme and identify similarities in the training dataset and click on ", + "Modifier": "Modify", + " si nécessaire.": " if necessary.", + "Comment puis-je améliorer mon jeu de données ?": "How can I improve my dataset?", + "Modifier les images en cliquant dessus, puis les remplacer par une image de la bibliothèque ou importée depuis l'ordinateur ou encore prendre une photo. Cliquer sur ": "Modify the images by clicking on them, then replace them with an image from the gallery or imported from the computer or take a photo. Click on ", + " pour donner les images au programme.": " to give the images to the programme.", + "Prendre une photo": "Take a picture", + "Notre programme comprend-il encore des biais ?": "Is there still a bias in our programme?", + "Tester le programme et faire autant d’aller-retours que nécessaire pour l’améliorer.": "Test the programme and go back and forth as necessary to improve it.", + "Expérimenter 3/3": "Experiment 3/3", + "Maîtriser les jeux de données.": "Mastering the data sets.", + "Créer 1/2": "Create 1/2", + "Et si on jouait avec les données !": "Let's play with the data!", + "Utile, rigolo, trompeur ? Que voulons-nous faire dire à notre programme ?": "Useful, funny, misleading? What do we want our programme to say?", + "Créons notre set de données et entraînons notre programme avant de le tester. A recommencer sans modération :-)": "Let's create our data set and train our programme before testing it. To be repeated over and over :-)", + "Conclure": "Conclude", + "Tromper la machine !": "Fooling the machine!", + "Production et réalisation": "Production and Direction", + "Magic Makers pour Class’Code IAI.": "Magic Makers for Class’Code.", + "www.magicmakers.fr": "www.magicmakers.fr", + "Jade Becker, ": "Jade Becker, ", + "Conception et réalisation.": "Design and production.", + "Claude Terosier et Romain Liblau, ": "Claude Terosier and Romain Liblau, ", + "Conseil pédagogique.": "Educational Board.", + "Veronica Holguin, ": "Veronica Holguin, ", + "Graphisme.": "Graphic design.", + "Benjamin Ninassi et Denis Chiron, ": "Benjamin Ninassi and Denis Chiron, ", + "Développement.": "Development.", + "Sophie de Quatrebarbes, ": "Sophie de Quatrebarbes, ", + "Réalisation et suivi de production.": "Production and follow-up.", + "Données, logiciels, etc...": "Data, software, etc...", + "Hébergement : Inria Rocquencourt.": "Hosted by: Inria Rocquencourt.", + "Algorithme de machine learning : ML5.": "Machine learning algorithm: ML5.", + "Navigateurs supportés : Edge, Chrome, Mozilla, Safari, Opera": "Supported browsers: Edge, Chrome, Mozilla, Safari, Opera", + "Préparation de l'entrainement homme-femme": "Preparation of the man-woman training", + "Enregistrer": "Save", "Importer une photo": "Import a picture" } } diff --git a/data/tuto2/vtt/tuto2-activite1-vid1-en.vtt b/data/tuto2/vtt/tuto2-activite1-vid1-en.vtt new file mode 100644 index 0000000000000000000000000000000000000000..5f7c5175b30d00eb6d28b369668cd7ccf47fb29b --- /dev/null +++ b/data/tuto2/vtt/tuto2-activite1-vid1-en.vtt @@ -0,0 +1,41 @@ +WEBVTT + +00:00:00.333 --> 00:00:02.031 +Algorithms and data. + +00:00:02.370 --> 00:00:05.187 +Today, when we talk about AI +or artificial intelligence, + +00:00:05.229 --> 00:00:07.542 +we're often talking +about machine learning. + +00:00:07.736 --> 00:00:10.403 +Unlike algorithms, +which were used previously + +00:00:10.729 --> 00:00:14.375 +and which involved describing +an operation step-by-step, + +00:00:14.792 --> 00:00:16.792 +a bit like a recipe, + +00:00:16.875 --> 00:00:20.312 +machine learning involves +training a program + +00:00:20.363 --> 00:00:22.218 +to make predictions from data. + +00:00:22.654 --> 00:00:26.200 +We use it, for example, +to predict what a user will like + +00:00:26.229 --> 00:00:28.562 +based on what they have +already liked or viewed. + +00:00:28.792 --> 00:00:29.875 +Let's test it out! \ No newline at end of file diff --git a/data/tuto2/vtt/tuto2-activite1-vid2-en.vtt b/data/tuto2/vtt/tuto2-activite1-vid2-en.vtt new file mode 100644 index 0000000000000000000000000000000000000000..ae6399e1abb9deaa5ade8d2d7761cb97a56182ae --- /dev/null +++ b/data/tuto2/vtt/tuto2-activite1-vid2-en.vtt @@ -0,0 +1,44 @@ +WEBVTT + +00:00:00.167 --> 00:00:02.757 +Sexist? What if it is our data? + +00:00:03.250 --> 00:00:06.250 +You surely noticed +that our model recognised + +00:00:06.291 --> 00:00:10.390 +everyone with long hair as women +and everyone with short hair as men. + +00:00:10.917 --> 00:00:15.292 +Without realising, we provided data +that was sorted by criteria + +00:00:15.376 --> 00:00:16.751 +other than what we had defined. + +00:00:17.432 --> 00:00:20.410 +The data sorting was influenced +by our perception of the issue. + +00:00:20.836 --> 00:00:23.660 +When that happens, +we say that the data is biased. + +00:00:23.917 --> 00:00:26.080 +AI algorithms have existed for a long time + +00:00:26.322 --> 00:00:29.267 +but they didn't work as well +because we don't have labelled data. + +00:00:29.739 --> 00:00:31.994 +Today, a lot of data is available. + +00:00:32.429 --> 00:00:34.410 +And that's why AI is so widespread. + +00:00:34.750 --> 00:00:37.817 +But you should be careful +because the data may be biased. \ No newline at end of file diff --git a/data/tuto2/vtt/tuto2-activite1-vid3-en.vtt b/data/tuto2/vtt/tuto2-activite1-vid3-en.vtt new file mode 100644 index 0000000000000000000000000000000000000000..7b38c3d6967c734324d42f84bfbdb2f1cbb405ff --- /dev/null +++ b/data/tuto2/vtt/tuto2-activite1-vid3-en.vtt @@ -0,0 +1,29 @@ +WEBVTT + +00:00:00.292 --> 00:00:02.386 +The art of data preparation. + +00:00:02.789 --> 00:00:07.083 +Preparing the data for AI +is at least 70% of the work. + +00:00:07.167 --> 00:00:09.958 +Data is very important +so a program can learn. + +00:00:10.392 --> 00:00:13.870 +As we saw earlier, +the machine only learns what we show it. + +00:00:14.208 --> 00:00:18.470 +We therefore have to spend time selecting +the data and preparing it properly + +00:00:18.521 --> 00:00:19.875 +to get good results. + +00:00:20.321 --> 00:00:22.658 +But it is not always as easy as it looks! + +00:00:23.246 --> 00:00:25.796 +Let's take a closer look and experiment! \ No newline at end of file diff --git a/data/tuto2/vtt/tuto2-activite1-vid4-en.vtt b/data/tuto2/vtt/tuto2-activite1-vid4-en.vtt new file mode 100644 index 0000000000000000000000000000000000000000..6826e9d35652612aa3e4f9ea47ec99704f2a4131 --- /dev/null +++ b/data/tuto2/vtt/tuto2-activite1-vid4-en.vtt @@ -0,0 +1,42 @@ +WEBVTT + +00:00:00.542 --> 00:00:02.023 +Biases in data. + +00:00:02.574 --> 00:00:04.533 +The program doesn't see things how we do. + +00:00:04.617 --> 00:00:07.080 +It doesn't know +the concept of man and woman. + +00:00:07.164 --> 00:00:10.250 +It makes what are called +statistical approximations. + +00:00:10.984 --> 00:00:15.124 +Is this image closer +to images labelled "women" + +00:00:15.208 --> 00:00:16.833 +or images labelled "men"? + +00:00:16.917 --> 00:00:18.204 +What does it see? + +00:00:18.458 --> 00:00:20.852 +The blue background, skin colour? + +00:00:21.208 --> 00:00:22.542 +A pair of glasses? + +00:00:22.836 --> 00:00:26.305 +The way we choose our input data is key. + +00:00:27.042 --> 00:00:29.917 +Choosing the data is known +to be a big responsibility. + +00:00:30.495 --> 00:00:34.871 +Now, let's try to correct +our data set to eliminate the biases. \ No newline at end of file diff --git a/data/tuto2/vtt/tuto2-activite1-vid5-en.vtt b/data/tuto2/vtt/tuto2-activite1-vid5-en.vtt new file mode 100644 index 0000000000000000000000000000000000000000..cdd16c659cecfd758555efb4d89234f4b3286dfb --- /dev/null +++ b/data/tuto2/vtt/tuto2-activite1-vid5-en.vtt @@ -0,0 +1,38 @@ +WEBVTT + +00:00:00.125 --> 00:00:02.500 +Mastering data sets. + +00:00:02.738 --> 00:00:06.062 +Artificial intelligence can only recognise +what we've taught it. + +00:00:06.375 --> 00:00:10.375 +The data used to train the program has +a strong influence on the results. + +00:00:10.458 --> 00:00:14.328 +Mastering the data is a key element +to master this technology + +00:00:14.412 --> 00:00:16.104 +and the results you get from it. + +00:00:16.150 --> 00:00:19.851 +You should always be vigilant when looking +at the results from an AI program + +00:00:20.250 --> 00:00:23.916 +and always ask +where the data used to train it is from. + +00:00:24.000 --> 00:00:28.521 +Deliberately or not, it can contain biases +that are mechanically reproduced + +00:00:28.574 --> 00:00:30.291 +and which have big consequences. + +00:00:30.375 --> 00:00:33.542 +For job recruitment for example, +or even access to a loan. \ No newline at end of file diff --git a/data/tuto2/vtt/tuto2-activite1-vid6-en.vtt b/data/tuto2/vtt/tuto2-activite1-vid6-en.vtt new file mode 100644 index 0000000000000000000000000000000000000000..7b6546ef1e239c6b004d545e15ef1c00f4b7b41d --- /dev/null +++ b/data/tuto2/vtt/tuto2-activite1-vid6-en.vtt @@ -0,0 +1,64 @@ +WEBVTT + +00:00:00.458 --> 00:00:03.659 +What if we played with the data +to trick the machine? + +00:00:04.296 --> 00:00:08.405 +The program will make predictions +in the categories that we defined + +00:00:08.437 --> 00:00:10.160 +and the examples we showed it. + +00:00:10.458 --> 00:00:12.750 +Once it is trained, we can trick it. + +00:00:12.821 --> 00:00:17.578 +For example, certain jewellery +can deceive facial recognition + +00:00:18.160 --> 00:00:20.406 +or even some signs with patterns + +00:00:21.105 --> 00:00:24.398 +can get around +automatic video surveillance. + +00:00:24.750 --> 00:00:27.916 +But as we know, we can also +trick the machine with our data. + +00:00:28.000 --> 00:00:32.406 +For example, we can train a program +to recognise "beautiful" or "ugly" people + +00:00:32.833 --> 00:00:35.236 +by only showing it +what we think are beautiful people. + +00:00:35.832 --> 00:00:42.500 +Generally speaking, subjective categories, +cute, not cute, stupid, intelligent etc. + +00:00:42.520 --> 00:00:44.875 +depend on the person preparing the data. + +00:00:45.247 --> 00:00:49.830 +We can also train a program to recognise +whether there are people in an image + +00:00:50.379 --> 00:00:53.886 +by putting all our examples +in the "no people" category. + +00:00:54.167 --> 00:00:55.818 +This way you can go incognito! + +00:00:55.902 --> 00:00:59.492 +You now need to hijack the data +to create a biased program. + +00:01:00.042 --> 00:01:02.250 +How can we trick +the program with our data? \ No newline at end of file diff --git a/data/tuto2/vtt/tuto2-activite1-vid7-en.vtt b/data/tuto2/vtt/tuto2-activite1-vid7-en.vtt new file mode 100644 index 0000000000000000000000000000000000000000..9f8d6f87c391f4b6e27d087aa52c9b8b6e94aea2 --- /dev/null +++ b/data/tuto2/vtt/tuto2-activite1-vid7-en.vtt @@ -0,0 +1,58 @@ +WEBVTT + +00:00:00.167 --> 00:00:01.417 +Trick the machine! + +00:00:01.500 --> 00:00:05.667 +As you saw, data plays +an essential role in AI learning. + +00:00:05.750 --> 00:00:07.791 +We can easily trick the machine. + +00:00:07.875 --> 00:00:13.330 +To train AI, we need thousands +of examples and to know what they are. + +00:00:13.796 --> 00:00:19.080 +Today, we can easily find +examples with labels + +00:00:19.365 --> 00:00:20.958 +to solve our AI problems. + +00:00:21.042 --> 00:00:25.333 +You can create your own data set +or use ready-to-use ones. + +00:00:25.417 --> 00:00:28.167 +But, as you now know, +you need to be careful with your data. + +00:00:28.251 --> 00:00:30.578 +Because you can easily get biased data + +00:00:30.917 --> 00:00:33.958 +if you don't ask the right questions +or if you don't sort them properly. + +00:00:34.042 --> 00:00:35.583 +There are two types of bias, + +00:00:35.667 --> 00:00:40.083 +processing or statistical bias, +from badly prepared data + +00:00:40.167 --> 00:00:43.458 +and social or cognitive bias, +which is human bias, + +00:00:43.554 --> 00:00:44.999 +for example, the gender issue. + +00:00:45.083 --> 00:00:48.296 +In the end, AI is a human creation +and is what we put into it: + +00:00:48.542 --> 00:00:50.140 +good intentions and bias. \ No newline at end of file diff --git a/data/tuto3-1/translations/en.json b/data/tuto3-1/translations/en.json index 50a4f1ddb1f9931c0ef8d6be3c119d27d68ff273..dcd93fd4e584784a24205acac5cbabe7168eb8eb 100644 --- a/data/tuto3-1/translations/en.json +++ b/data/tuto3-1/translations/en.json @@ -1,50 +1,51 @@ { "values": { - "Introduction": "", - "Débusquer l’IA : IA ou humains ?": "", - "Un être humain": "", - "Une IA": "", - "S'agit-il d'un vrai être humain ou d'une image créée par une IA ? ": "", - " Cliquez sur la bonne réponse.": "", - "Comprendre": "", - "zoomer": "", - "Etes-vous un bon réseau de neurones discriminateur ? ": "", - " Cliquez sur la bonne réponse. Zoomez dans l’image pour débusquer l’IA.": "", - "Conclure": "", - "En savoir encore plus": "", - "GAN (": "", - "generative adversarial network": "", - "StyleGAN": "", - " (Dec 2018) - ": "", - "Karras": "", - " et al. and Nvidia": "", - "Original GAN": "", - " (2014) - ": "", - "Goodfellow": "", - " et al.": "", - "Don't panic. Learn about ": "", - "how it works": "", - "https://usbeketrica.com/article/ia-tableau-art-buzzword": "", - "thispersondoesnotexist.com/": "", - "https://pixabay.com/fr/": "", - "Production et réalisation": "", - "Data.bingo pour Class’Code IAI.": "", - "data.bingo": "", - "Thu Trinh-Bouvier, Bastien Didier, Julien Levesque, Albertine Meunier et Sylvie Tissot, ": "", - "Conception.": "", - "Bastien Didier et Sylvie Tissot,": "", - "Développement.": "", - " ": "", - "Benjamin Ninassi et Denis Chiron,": "", - "Architecture technique.": "", - "Veronica Holguin, ": "", - "Graphisme.": "", - "Sophie de Quatrebarbes et Sonia Cruchon, ": "", - "Réalisation et suivi de production.": "", - "Données, logiciels, etc...": "", - "Hébergement : Inria Rocquencourt.": "", - "Algorithmes : ": "", - "ML5, Yolo, Web speech Recognition, Web speech synthesis": "", - "Navigateur supporté : Chrome": "" + "Introduction": "Introduction", + "Débusquer l’IA : IA ou humains ?": "Detecting AI: AI or humans?", + "Un être humain": "A human being", + "Une IA": "An AI", + "S'agit-il d'un vrai être humain ou d'une image créée par une IA ? ": "Is it a real human being or an image created by an AI? ", + " Cliquez sur la bonne réponse.": "Click on the correct answer.", + "Vérifier": "Verify", + "Comprendre": "Understand", + "zoomer": "zoom in", + "Etes-vous un bon réseau de neurones discriminateur ? ": "Are you a good discriminator neural network? ", + " Cliquez sur la bonne réponse. Zoomez dans l’image pour débusquer l’IA.": "Click on the correct answer. Zoom in on the image to find the AI.", + "Conclure": "Conclusion", + "En savoir encore plus": "Find out more", + "GAN (": "GAN (", + "generative adversarial network": "generative adversarial network", + "StyleGAN": "StyleGAN", + " (Dec 2018) - ": "(Dec 2018)-", + "Karras": "Karras", + " et al. and Nvidia": "et al. and Nvidia", + "Original GAN": "Original GAN", + " (2014) - ": "(2014) - ", + "Goodfellow": "Goodfellow", + " et al.": "and al.", + "Don't panic. Learn about ": "Don't panic. Learn about", + "how it works": "how it works", + "https://usbeketrica.com/article/ia-tableau-art-buzzword": "https://usbeketrica.com/article/ia-tableau-art-buzzword", + "thispersondoesnotexist.com/": "thispersondoesnotexist.com/", + "https://pixabay.com/fr/": "https://pixabay.com/en/", + "Production et réalisation": "Production and realisation", + "Data.bingo pour Class’Code IAI.": "Data.bingo for Class'Code IAI.", + "data.bingo": "data.bingo", + "Thu Trinh-Bouvier, Bastien Didier, Julien Levesque, Albertine Meunier et Sylvie Tissot, ": "Thu Trinh-Bouvier, Bastien Didier, Julien Levesque, Albertine Meunier and Sylvie Tissot, ", + "Conception.": "Design.", + "Bastien Didier et Sylvie Tissot,": "Bastien Didier and Sylvie Tissot,", + "Développement.": "Development.", + " ": " ", + "Benjamin Ninassi et Denis Chiron,": "Benjamin Ninassi and Denis Chiron", + "Architecture technique.": "Technical architecture.", + "Veronica Holguin, ": "Veronica Holguin, ", + "Graphisme.": "Graphics.", + "Sophie de Quatrebarbes et Sonia Cruchon, ": "Sophie de Quatrebarbes and Sonia Cruchon, ", + "Réalisation et suivi de production.": "Production and follow-up.", + "Données, logiciels, etc...": "Data, software, etc...", + "Hébergement : Inria Rocquencourt.": "Hosting: Inria Rocquencourt.", + "Algorithmes : ": "Algorithms: ", + "ML5, Yolo, Web speech Recognition, Web speech synthesis": "ML5", + "Navigateur supporté : Chrome": "Supported browser: Chrome" } }