diff --git a/data/grid5000/sites/grenoble/clusters/dahu/dahu.json b/data/grid5000/sites/grenoble/clusters/dahu/dahu.json index 416dab1e51ff3ded02b3a566bb1fd1c95a2692cd..4c65a39bb6221d6683f0bb4d38a1af864bda96ca 100644 --- a/data/grid5000/sites/grenoble/clusters/dahu/dahu.json +++ b/data/grid5000/sites/grenoble/clusters/dahu/dahu.json @@ -1,5 +1,5 @@ { - "created_at": "Thu, 22 Mar 2018 11:00:00 GMT", + "created_at": "Thu, 22 Mar 2018 00:00:00 GMT", "kavlan": true, "model": "Dell PowerEdge C6420", "priority": "201800", diff --git a/data/grid5000/sites/grenoble/clusters/yeti/yeti.json b/data/grid5000/sites/grenoble/clusters/yeti/yeti.json index 6ec47d99d00e19486860d8f214b915fa8a4efd3a..dd5edeaddd3d52a0c98422197cd5c5f1b647cd56 100644 --- a/data/grid5000/sites/grenoble/clusters/yeti/yeti.json +++ b/data/grid5000/sites/grenoble/clusters/yeti/yeti.json @@ -1,5 +1,5 @@ { - "created_at": "Tue, 16 Jan 2018 15:00:00 GMT", + "created_at": "Tue, 16 Jan 2018 00:00:00 GMT", "kavlan": true, "model": "Dell PowerEdge R940", "queues": [ diff --git a/data/grid5000/sites/lille/clusters/chetemi/chetemi.json b/data/grid5000/sites/lille/clusters/chetemi/chetemi.json index d5b78b31ed96c7801342e83edaf6bf4e78d5f82e..38162ae3ec6c09ed4aedaa8a2961a0b1666965d4 100644 --- a/data/grid5000/sites/lille/clusters/chetemi/chetemi.json +++ b/data/grid5000/sites/lille/clusters/chetemi/chetemi.json @@ -1,5 +1,5 @@ { - "created_at": "Wed, 30 Nov 2016 23:00:00 GMT", + "created_at": "Thu, 01 Dec 2016 00:00:00 GMT", "kavlan": true, "model": "Dell PowerEdge R630", "priority": "201650", diff --git a/data/grid5000/sites/lille/clusters/chiclet/chiclet.json b/data/grid5000/sites/lille/clusters/chiclet/chiclet.json index d3bb2fac94ff49b75dbf3e5df5b8336a24d05340..3435d67a200deedebc6ddfd7b8932982d4db01ee 100644 --- a/data/grid5000/sites/lille/clusters/chiclet/chiclet.json +++ b/data/grid5000/sites/lille/clusters/chiclet/chiclet.json @@ -1,5 +1,5 @@ { - "created_at": "Sun, 05 Aug 2018 22:00:00 GMT", + "created_at": "Mon, 06 Aug 2018 00:00:00 GMT", "kavlan": true, "model": "Dell PowerEdge R7425", "priority": "201850", diff --git a/data/grid5000/sites/lille/clusters/chifflet/chifflet.json b/data/grid5000/sites/lille/clusters/chifflet/chifflet.json index 84c31597cc1bd5561ee60b6c05ebf7d13ed5a02a..5324770851dfb258638bae348c98893dc94e42f8 100644 --- a/data/grid5000/sites/lille/clusters/chifflet/chifflet.json +++ b/data/grid5000/sites/lille/clusters/chifflet/chifflet.json @@ -1,5 +1,5 @@ { - "created_at": "Wed, 30 Nov 2016 23:00:00 GMT", + "created_at": "Thu, 01 Dec 2016 00:00:00 GMT", "kavlan": true, "model": "Dell PowerEdge R730", "priority": "201670", diff --git a/data/grid5000/sites/lille/clusters/chifflot/chifflot.json b/data/grid5000/sites/lille/clusters/chifflot/chifflot.json index bd7b779851453d9ab15204a79876cb98305cba53..467c8f73bb0dbef3156a4a42684c0155b54c9831 100644 --- a/data/grid5000/sites/lille/clusters/chifflot/chifflot.json +++ b/data/grid5000/sites/lille/clusters/chifflot/chifflot.json @@ -1,5 +1,5 @@ { - "created_at": "Tue, 31 Jul 2018 22:00:00 GMT", + "created_at": "Wed, 01 Aug 2018 00:00:00 GMT", "kavlan": true, "model": "Dell PowerEdge R740", "priority": "201870", diff --git a/data/grid5000/sites/luxembourg/clusters/granduc/granduc.json b/data/grid5000/sites/luxembourg/clusters/granduc/granduc.json index 4f9f5b66b1e7f67c2935887df26678ef54167941..7e07025f246c61bb6a931fc51ad651873715770a 100644 --- a/data/grid5000/sites/luxembourg/clusters/granduc/granduc.json +++ b/data/grid5000/sites/luxembourg/clusters/granduc/granduc.json @@ -1,5 +1,5 @@ { - "created_at": "Wed, 30 Nov 2011 23:00:00 GMT", + "created_at": "Thu, 01 Dec 2011 00:00:00 GMT", "kavlan": true, "model": "Dell PowerEdge 1950", "queues": [ diff --git a/data/grid5000/sites/luxembourg/clusters/petitprince/petitprince.json b/data/grid5000/sites/luxembourg/clusters/petitprince/petitprince.json index 41c96b6cb307905d6a141bd6a88e4732e6170af8..8a60605b6e19bfd26a4b6d847679efc553e81ab3 100644 --- a/data/grid5000/sites/luxembourg/clusters/petitprince/petitprince.json +++ b/data/grid5000/sites/luxembourg/clusters/petitprince/petitprince.json @@ -1,5 +1,5 @@ { - "created_at": "Mon, 09 Sep 2013 22:00:00 GMT", + "created_at": "Tue, 10 Sep 2013 00:00:00 GMT", "kavlan": true, "model": "Dell PowerEdge M620", "queues": [ diff --git a/data/grid5000/sites/lyon/clusters/hercule/hercule.json b/data/grid5000/sites/lyon/clusters/hercule/hercule.json index 74454b161df6221e3c7fa4468c316a441c87e5b5..33b1ffcc5ca47c95afe013ac84aa2e5ef8809d86 100644 --- a/data/grid5000/sites/lyon/clusters/hercule/hercule.json +++ b/data/grid5000/sites/lyon/clusters/hercule/hercule.json @@ -1,5 +1,5 @@ { - "created_at": "Tue, 02 Oct 2012 12:00:00 GMT", + "created_at": "Tue, 02 Oct 2012 00:00:00 GMT", "kavlan": true, "model": "Dell PowerEdge C6220", "priority": "201299", diff --git a/data/grid5000/sites/lyon/clusters/nova/nova.json b/data/grid5000/sites/lyon/clusters/nova/nova.json index ea9783c9976825d634c915e76542fcb1174ac7b6..f0cd7e24062b7069f6f0acd36786b273027fe2fa 100644 --- a/data/grid5000/sites/lyon/clusters/nova/nova.json +++ b/data/grid5000/sites/lyon/clusters/nova/nova.json @@ -1,5 +1,5 @@ { - "created_at": "Thu, 01 Dec 2016 12:00:00 GMT", + "created_at": "Thu, 01 Dec 2016 00:00:00 GMT", "kavlan": true, "model": "Dell PowerEdge R430", "priority": "201700", diff --git a/data/grid5000/sites/lyon/clusters/orion/orion.json b/data/grid5000/sites/lyon/clusters/orion/orion.json index 894183c6138e48a925fdff7b1830ca62550397c6..571b1ed31e8f2362006f758cb86c0fb19c2d4747 100644 --- a/data/grid5000/sites/lyon/clusters/orion/orion.json +++ b/data/grid5000/sites/lyon/clusters/orion/orion.json @@ -1,5 +1,5 @@ { - "created_at": "Fri, 14 Sep 2012 12:00:00 GMT", + "created_at": "Fri, 14 Sep 2012 00:00:00 GMT", "kavlan": true, "model": "Dell PowerEdge R720", "priority": "201299", diff --git a/data/grid5000/sites/lyon/clusters/sagittaire/sagittaire.json b/data/grid5000/sites/lyon/clusters/sagittaire/sagittaire.json index fa088ddf4d01be9fbd29d4e9b935a816629fc82a..dea4f1d88449d286fbc4ae194e674dd95b6997b5 100644 --- a/data/grid5000/sites/lyon/clusters/sagittaire/sagittaire.json +++ b/data/grid5000/sites/lyon/clusters/sagittaire/sagittaire.json @@ -1,5 +1,5 @@ { - "created_at": "Sat, 01 Jul 2006 12:00:00 GMT", + "created_at": "Sat, 01 Jul 2006 00:00:00 GMT", "kavlan": true, "model": "Sun Fire V20z", "priority": "201899", diff --git a/data/grid5000/sites/lyon/clusters/taurus/taurus.json b/data/grid5000/sites/lyon/clusters/taurus/taurus.json index 0d159f7f56454adb8c38cda80be40ffe4436e2d0..5df6f21b7517c253a6fc4e008cd7ea6954a59fc2 100644 --- a/data/grid5000/sites/lyon/clusters/taurus/taurus.json +++ b/data/grid5000/sites/lyon/clusters/taurus/taurus.json @@ -1,5 +1,5 @@ { - "created_at": "Fri, 14 Sep 2012 12:00:00 GMT", + "created_at": "Fri, 14 Sep 2012 00:00:00 GMT", "kavlan": true, "model": "Dell PowerEdge R720", "queues": [ diff --git a/data/grid5000/sites/nancy/clusters/graffiti/graffiti.json b/data/grid5000/sites/nancy/clusters/graffiti/graffiti.json index 6953ec4a18fa82430017163385e055857bbed986..2e71409660f4a95efd90d95cc23ca44b6ab7d05b 100644 --- a/data/grid5000/sites/nancy/clusters/graffiti/graffiti.json +++ b/data/grid5000/sites/nancy/clusters/graffiti/graffiti.json @@ -1,5 +1,5 @@ { - "created_at": "Thu, 06 Jun 2019 22:00:00 GMT", + "created_at": "Fri, 07 Jun 2019 00:00:00 GMT", "kavlan": true, "model": "Dell PowerEdge T640", "queues": [ diff --git a/data/grid5000/sites/nancy/clusters/graoully/graoully.json b/data/grid5000/sites/nancy/clusters/graoully/graoully.json index 5f4fbcfc36544661c3360565d221cee4ca8f8764..4eef9c604bfe4a37459f7f32c0ce288e3243692c 100644 --- a/data/grid5000/sites/nancy/clusters/graoully/graoully.json +++ b/data/grid5000/sites/nancy/clusters/graoully/graoully.json @@ -1,5 +1,5 @@ { - "created_at": "Sun, 03 Jan 2016 23:00:00 GMT", + "created_at": "Mon, 04 Jan 2016 00:00:00 GMT", "kavlan": true, "model": "Dell PowerEdge R630", "queues": [ diff --git a/data/grid5000/sites/nancy/clusters/graphique/graphique.json b/data/grid5000/sites/nancy/clusters/graphique/graphique.json index 2bc1c0a5972eee2ebd6ed7b0b2f37b7f8bc76a8e..d09ecbf265eb5d6b545de512c81de955005766d6 100644 --- a/data/grid5000/sites/nancy/clusters/graphique/graphique.json +++ b/data/grid5000/sites/nancy/clusters/graphique/graphique.json @@ -1,5 +1,5 @@ { - "created_at": "Tue, 12 May 2015 17:00:00 GMT", + "created_at": "Tue, 12 May 2015 00:00:00 GMT", "kavlan": true, "model": "Dell PowerEdge R720", "queues": [ diff --git a/data/grid5000/sites/nancy/clusters/graphite/graphite.json b/data/grid5000/sites/nancy/clusters/graphite/graphite.json index 77d81ff1417083ffa84f2a4bef26999802e9a383..bdcc489b919406214d173962ef9ec6c2ba488de1 100644 --- a/data/grid5000/sites/nancy/clusters/graphite/graphite.json +++ b/data/grid5000/sites/nancy/clusters/graphite/graphite.json @@ -1,5 +1,5 @@ { - "created_at": "Wed, 04 Dec 2013 23:00:00 GMT", + "created_at": "Thu, 05 Dec 2013 00:00:00 GMT", "kavlan": true, "model": "Dell PowerEdge R720", "queues": [ diff --git a/data/grid5000/sites/nancy/clusters/grcinq/grcinq.json b/data/grid5000/sites/nancy/clusters/grcinq/grcinq.json index 93a733be376a03081d84898331fff9a24514c261..b6c45dd2dd58aab70c654cb0abd422520961b2ee 100644 --- a/data/grid5000/sites/nancy/clusters/grcinq/grcinq.json +++ b/data/grid5000/sites/nancy/clusters/grcinq/grcinq.json @@ -1,5 +1,5 @@ { - "created_at": "Mon, 08 Apr 2013 22:00:00 GMT", + "created_at": "Tue, 09 Apr 2013 00:00:00 GMT", "kavlan": true, "model": "Dell PowerEdge C6220", "queues": [ diff --git a/data/grid5000/sites/nancy/clusters/grele/grele.json b/data/grid5000/sites/nancy/clusters/grele/grele.json index 07e1ccf27265f5f0f4d7ad7b25b435c838d19158..5efad864db9bd026c9d4ad902284f8ea9e90640e 100644 --- a/data/grid5000/sites/nancy/clusters/grele/grele.json +++ b/data/grid5000/sites/nancy/clusters/grele/grele.json @@ -1,5 +1,5 @@ { - "created_at": "Sun, 25 Jun 2017 22:00:00 GMT", + "created_at": "Mon, 26 Jun 2017 00:00:00 GMT", "kavlan": true, "model": "Dell PowerEdge R730", "queues": [ diff --git a/data/grid5000/sites/nancy/clusters/grimani/grimani.json b/data/grid5000/sites/nancy/clusters/grimani/grimani.json index 9a6126904e4648c8770261985051cad521772240..3453c69b69f2e4c3c070d40f904b96fa5dbfd7a8 100644 --- a/data/grid5000/sites/nancy/clusters/grimani/grimani.json +++ b/data/grid5000/sites/nancy/clusters/grimani/grimani.json @@ -1,5 +1,5 @@ { - "created_at": "Mon, 29 Aug 2016 22:00:00 GMT", + "created_at": "Tue, 30 Aug 2016 00:00:00 GMT", "kavlan": true, "model": "Dell PowerEdge R730", "queues": [ diff --git a/data/grid5000/sites/nancy/clusters/grimoire/grimoire.json b/data/grid5000/sites/nancy/clusters/grimoire/grimoire.json index 08010046c24d19d21d6df9bfc9c46c881bc37cd9..a1038a60dda2d635cea1a0d16ffc170bfb65e6b1 100644 --- a/data/grid5000/sites/nancy/clusters/grimoire/grimoire.json +++ b/data/grid5000/sites/nancy/clusters/grimoire/grimoire.json @@ -1,5 +1,5 @@ { - "created_at": "Thu, 21 Jan 2016 23:00:00 GMT", + "created_at": "Fri, 22 Jan 2016 00:00:00 GMT", "kavlan": true, "model": "Dell PowerEdge R630", "queues": [ diff --git a/data/grid5000/sites/nancy/clusters/grisou/grisou.json b/data/grid5000/sites/nancy/clusters/grisou/grisou.json index a1b3d1c7135d7b5a9a6ae8ae81e824d4e0274c77..c9b008ca89283a913b85bbea276efe39b28ff4ef 100644 --- a/data/grid5000/sites/nancy/clusters/grisou/grisou.json +++ b/data/grid5000/sites/nancy/clusters/grisou/grisou.json @@ -1,5 +1,5 @@ { - "created_at": "Sun, 03 Jan 2016 23:00:00 GMT", + "created_at": "Mon, 04 Jan 2016 00:00:00 GMT", "kavlan": true, "model": "Dell PowerEdge R630", "priority": "201201", diff --git a/data/grid5000/sites/nancy/clusters/grvingt/grvingt.json b/data/grid5000/sites/nancy/clusters/grvingt/grvingt.json index 7df195eb10be1037ea63a5b83c7d27fbee380021..296db226fe8777e7829add8a894d931d532cf936 100644 --- a/data/grid5000/sites/nancy/clusters/grvingt/grvingt.json +++ b/data/grid5000/sites/nancy/clusters/grvingt/grvingt.json @@ -1,5 +1,5 @@ { - "created_at": "Tue, 10 Apr 2018 22:00:00 GMT", + "created_at": "Wed, 11 Apr 2018 00:00:00 GMT", "kavlan": true, "model": "Dell PowerEdge C6420", "queues": [ diff --git a/data/grid5000/sites/nantes/clusters/econome/econome.json b/data/grid5000/sites/nantes/clusters/econome/econome.json index 76f58b4e26e7195a872adb4c69e83b0a32894b39..0e50c521aef0be6a3419f6ca1fcbf1913c1fa0cb 100644 --- a/data/grid5000/sites/nantes/clusters/econome/econome.json +++ b/data/grid5000/sites/nantes/clusters/econome/econome.json @@ -1,5 +1,5 @@ { - "created_at": "Tue, 15 Apr 2014 22:00:00 GMT", + "created_at": "Wed, 16 Apr 2014 00:00:00 GMT", "kavlan": true, "model": "Dell PowerEdge C6220", "queues": [ diff --git a/data/grid5000/sites/nantes/clusters/ecotype/ecotype.json b/data/grid5000/sites/nantes/clusters/ecotype/ecotype.json index 3906407eb10a82e9d306ff232a04ecf94c6438e4..b234310cba13c8a7c08c513f933a08d80dedfc0a 100644 --- a/data/grid5000/sites/nantes/clusters/ecotype/ecotype.json +++ b/data/grid5000/sites/nantes/clusters/ecotype/ecotype.json @@ -1,5 +1,5 @@ { - "created_at": "Mon, 16 Oct 2017 17:40:55 GMT", + "created_at": "Mon, 16 Oct 2017 00:00:00 GMT", "kavlan": true, "model": "Dell PowerEdge R630", "queues": [ diff --git a/data/grid5000/sites/rennes/clusters/paranoia/paranoia.json b/data/grid5000/sites/rennes/clusters/paranoia/paranoia.json index 8a7fc86552af13b32b310db04c605abdbafcf6e8..86fc39a0bcbb22e7ed14dc1683c6a00e1c41982e 100644 --- a/data/grid5000/sites/rennes/clusters/paranoia/paranoia.json +++ b/data/grid5000/sites/rennes/clusters/paranoia/paranoia.json @@ -1,5 +1,5 @@ { - "created_at": "Thu, 20 Feb 2014 23:00:00 GMT", + "created_at": "Fri, 21 Feb 2014 00:00:00 GMT", "kavlan": true, "model": "Dell PowerEdge C6220 II", "queues": [ diff --git a/data/grid5000/sites/rennes/clusters/parapide/parapide.json b/data/grid5000/sites/rennes/clusters/parapide/parapide.json index a45f694c111716c4959e10c13c6e65b4c8d10a1e..f3051e3a4b1085dadf6ca5719125c53ecc7f7419 100644 --- a/data/grid5000/sites/rennes/clusters/parapide/parapide.json +++ b/data/grid5000/sites/rennes/clusters/parapide/parapide.json @@ -1,5 +1,5 @@ { - "created_at": "Sun, 24 Jan 2010 23:00:00 GMT", + "created_at": "Mon, 25 Jan 2010 00:00:00 GMT", "kavlan": true, "model": "SUN FIRE X2270", "queues": [ diff --git a/data/grid5000/sites/rennes/clusters/parapluie/parapluie.json b/data/grid5000/sites/rennes/clusters/parapluie/parapluie.json index d9d25a22579c02a1363bd10c9b25e9bfe485923f..0f3f67c56d75f366520e4125a5027474a5565563 100644 --- a/data/grid5000/sites/rennes/clusters/parapluie/parapluie.json +++ b/data/grid5000/sites/rennes/clusters/parapluie/parapluie.json @@ -1,5 +1,5 @@ { - "created_at": "Mon, 01 Nov 2010 23:00:00 GMT", + "created_at": "Tue, 02 Nov 2010 00:00:00 GMT", "kavlan": true, "model": "HP ProLiant DL165 G7", "queues": [ diff --git a/data/grid5000/sites/rennes/clusters/parasilo/parasilo.json b/data/grid5000/sites/rennes/clusters/parasilo/parasilo.json index 7d687d4ae4d9fc00046282925cf5d6374831dee8..a35afb80d325f6282fe26360de20caa704c3e92e 100644 --- a/data/grid5000/sites/rennes/clusters/parasilo/parasilo.json +++ b/data/grid5000/sites/rennes/clusters/parasilo/parasilo.json @@ -1,5 +1,5 @@ { - "created_at": "Mon, 12 Jan 2015 23:00:00 GMT", + "created_at": "Tue, 13 Jan 2015 00:00:00 GMT", "kavlan": true, "model": "Dell PowerEdge R630", "queues": [ diff --git a/data/grid5000/sites/rennes/clusters/paravance/paravance.json b/data/grid5000/sites/rennes/clusters/paravance/paravance.json index 11d636f9dcd931ec46c85defd9fe65658d358050..67e1a6f033e457d42e2c31d9ebd58c7130bac1b7 100644 --- a/data/grid5000/sites/rennes/clusters/paravance/paravance.json +++ b/data/grid5000/sites/rennes/clusters/paravance/paravance.json @@ -1,5 +1,5 @@ { - "created_at": "Mon, 12 Jan 2015 23:00:00 GMT", + "created_at": "Tue, 13 Jan 2015 00:00:00 GMT", "kavlan": true, "model": "Dell PowerEdge R630", "queues": [ diff --git a/data/grid5000/sites/sophia/clusters/suno/suno.json b/data/grid5000/sites/sophia/clusters/suno/suno.json index f6d9a652fd9a2553ff3dcad74975af90fa878451..30c142b75ecf9393ac40303bd6b83396cb280aa3 100644 --- a/data/grid5000/sites/sophia/clusters/suno/suno.json +++ b/data/grid5000/sites/sophia/clusters/suno/suno.json @@ -1,5 +1,5 @@ { - "created_at": "Tue, 26 Jan 2010 23:00:00 GMT", + "created_at": "Wed, 27 Jan 2010 00:00:00 GMT", "kavlan": true, "model": "Dell PowerEdge R410", "queues": [ diff --git a/data/grid5000/sites/sophia/clusters/uvb/uvb.json b/data/grid5000/sites/sophia/clusters/uvb/uvb.json index c1b9464e63d2cdedeb8e7df43b558513605f2b0c..98b5680969d86f956472ac0c7c5c3ba0290ad318 100644 --- a/data/grid5000/sites/sophia/clusters/uvb/uvb.json +++ b/data/grid5000/sites/sophia/clusters/uvb/uvb.json @@ -1,5 +1,5 @@ { - "created_at": "Mon, 03 Jan 2011 23:00:00 GMT", + "created_at": "Tue, 04 Jan 2011 00:00:00 GMT", "kavlan": true, "model": "Dell PowerEdge C6100", "queues": [ diff --git a/lib/refrepo.rb b/lib/refrepo.rb index a23ed171f8a4063cc5304ebae7a0140da6771130..f7137b490be3fa9ff2a2e5321688eb31fb15c8b9 100644 --- a/lib/refrepo.rb +++ b/lib/refrepo.rb @@ -25,3 +25,4 @@ end # load sub-parts that are used by many scripts anyway require 'refrepo/utils' require 'refrepo/input_loader' +require 'refrepo/data_loader' diff --git a/lib/refrepo/data_loader.rb b/lib/refrepo/data_loader.rb index 9564dc5e2dc50ec108d521f391e8ab93e3903101..ed19e5ea02463b4eaf45e4ce9f909b35e995d09b 100644 --- a/lib/refrepo/data_loader.rb +++ b/lib/refrepo/data_loader.rb @@ -22,7 +22,7 @@ def load_data_hierarchy path_hierarchy = File.dirname(filename).split('/') # Split the file path (path relative to input/) path_hierarchy = [] if path_hierarchy == ['.'] - if ['nodes', 'network_equipments'].include?(path_hierarchy.last) + if ['nodes', 'network_equipments', 'servers', 'pdus'].include?(path_hierarchy.last) # it's a node or a network_equipment, add the uid path_hierarchy << file_hash['uid'] end diff --git a/lib/refrepo/gen/puppet/bindg5k.rb b/lib/refrepo/gen/puppet/bindg5k.rb index 3ded86a4bf400659b05ad7d0bd4984d758226240..79fc84f7d73ef9b5084ffcbe23f33cbb4ecdadb7 100644 --- a/lib/refrepo/gen/puppet/bindg5k.rb +++ b/lib/refrepo/gen/puppet/bindg5k.rb @@ -145,7 +145,7 @@ def get_networks_records(site, key) next end - eth_net_uid = node['network_adapters'].select{ |u, h| h['mounted'] && /^eth[0-9]$/.match(u) } # eth* interfaces + eth_net_uid = node['network_adapters'].select{ |u, h| h['mounted'] && /^eth[0-9]$/.match(u) } # eth* interfaces node['network_adapters'].each { |net_uid, net_hash| if ! eth_net_uid.include?(net_uid) && node['network_adapters'].size > 1 hostsuffix = "-#{net_uid}" @@ -164,7 +164,7 @@ end def get_node_records(cluster_uid, node_uid, network_adapters) records = [] - + network_adapters.each { |net_uid, net_hash| next unless net_hash['ip'] @@ -415,7 +415,7 @@ def generate_puppet_bindg5k(options) $written_files = [] - refapi = load_yaml_file_hierarchy + refapi = load_data_hierarchy # Loop over Grid'5000 sites refapi["sites"].each { |site_uid, site| @@ -445,7 +445,7 @@ def generate_puppet_bindg5k(options) site_records['pdus'] = get_pdus_records(site) unless site['pdus'].nil? # Networks and laptops (same input format) - site_records['networks'] = get_networks_records(site, 'networks') unless site['networks'].nil? + site_records['networks'] = get_networks_records(site, 'network_equipments') unless site['network_equipments'].nil? site_records['laptops'] = get_networks_records(site, 'laptops') unless site['laptops'].nil? site.fetch("clusters", []).sort.each { |cluster_uid, cluster| @@ -457,8 +457,8 @@ def generate_puppet_bindg5k(options) network_adapters = {} # Nodes - node.fetch('network_adapters').each { |net_uid, net_hash| - network_adapters[net_uid] = {"ip" => net_hash["ip"], "mounted" => net_hash["mounted"], "alias" => net_hash["alias"]} + node.fetch('network_adapters').each { |net| + network_adapters[net['device']] = {"ip" => net["ip"], "mounted" => net["mounted"], 'alias' => net['alias']} } # Mic @@ -474,9 +474,15 @@ def generate_puppet_bindg5k(options) kavlan_adapters = {} node.fetch('kavlan').each { |net_uid, net_hash| net_hash.each { |kavlan_net_uid, ip| - kavlan_adapters["#{net_uid}-#{kavlan_net_uid}"] = {"ip" => ip, "mounted" => node['network_adapters'][net_uid]['mounted']} + kavlan_adapters["#{net_uid}-#{kavlan_net_uid}"] = { + 'ip' => ip, + 'mounted' => node['network_adapters'].select { |n| + n['device'] == net_uid + }[0]['mounted'] + } } } + site_records["#{cluster_uid}-kavlan"] ||= [] site_records["#{cluster_uid}-kavlan"] += get_node_kavlan_records(cluster_uid, node_uid, network_adapters, kavlan_adapters) end diff --git a/lib/refrepo/gen/puppet/conmang5k.rb b/lib/refrepo/gen/puppet/conmang5k.rb index 13b1eb721ac3e42636c04595cfb1d7c0518ac656..0b8eb0b11e991cf29378d4bc3c5ef2c989fc5c97 100644 --- a/lib/refrepo/gen/puppet/conmang5k.rb +++ b/lib/refrepo/gen/puppet/conmang5k.rb @@ -23,7 +23,7 @@ def generate_puppet_conmang5k(options) puts "Using configuration directory: #{options[:conf_dir]}" puts "For site(s): #{options[:sites].join(', ')}" - refapi = load_yaml_file_hierarchy + refapi = load_data_hierarchy config = YAML::load_file(options[:conf_dir] + 'console.yaml') credentials = YAML::load_file(options[:conf_dir] + 'console-password.yaml') diff --git a/lib/refrepo/gen/puppet/dhcpg5k.rb b/lib/refrepo/gen/puppet/dhcpg5k.rb index 69733059c89747cf2415f773515a3e31dfa381ec..be0821cf59a905736beaa2d060b063bae2b29a63 100644 --- a/lib/refrepo/gen/puppet/dhcpg5k.rb +++ b/lib/refrepo/gen/puppet/dhcpg5k.rb @@ -6,19 +6,22 @@ def get_network_info(node_hash, network_interface) # For the production network, find the mounted interface (either eth0 or eth1) neti = network_interface if neti == "eth" then - (0..5).each {|i| - if node_network_adapters.fetch("eth#{i}").fetch("mounted") - neti = "eth#{i}" - break - end - } - raise 'none of the eth[0-4] interfaces have the property "mounted" set to "true"' if neti == 'eth' + neti = node_network_adapters.select { |i| i['device'] =~ /eth/ and i['mounted'] }[0]['device'] + unless neti + raise 'none of the eth[0-4] interfaces have the property "mounted" set to "true"' if neti == 'eth' + end end - node_network_interface = node_network_adapters.fetch(neti) + node_network_interface = nil + case node_network_adapters + when Array + node_network_interface = node_network_adapters.select { |i| i['device'] == neti }[0] + when Hash + node_network_interface = node_network_adapters[neti] + end - node_mac = node_network_interface.fetch("mac") - node_ip = node_network_interface.fetch("ip") + node_mac = node_network_interface.fetch('mac') + node_ip = node_network_interface.fetch('ip') raise '"mac" is nil' unless node_mac raise '"ip" is nil' unless node_ip @@ -39,7 +42,7 @@ def write_dhcp_file(data, options) end def generate_puppet_dhcpg5k(options) - global_hash = load_yaml_file_hierarchy + global_hash = load_data_hierarchy puts "Writing DHCP configuration files to: #{options[:output_dir]}" @@ -62,10 +65,12 @@ def generate_puppet_dhcpg5k(options) next if node_hash == nil || node_hash['status'] == 'retired' if node_hash['mic'] && node_hash['mic']['ip'] && node_hash['mic']['mac'] - node_hash['network_adapters'] ||= {} - node_hash['network_adapters']['mic0'] ||= {} - node_hash['network_adapters']['mic0']['ip'] = node_hash['mic'].delete('ip') - node_hash['network_adapters']['mic0']['mac'] = node_hash['mic'].delete('mac') + node_hash['network_adapters'] ||= [] + node_hash['network_adapters'].push({ + 'device' => 'mic0', + 'ip'=> node_hash['mic'].delete('ip'), + 'mac' => node_hash['mic'].delete('mac') + }) end } } @@ -74,7 +79,6 @@ def generate_puppet_dhcpg5k(options) site_hash.fetch("clusters").each { |cluster_uid, cluster_hash| # networks = ["eth", "bmc"] # networks << 'mic0' if cluster_hash['nodes'].values.any? {|x| x['network_adapters']['mic0'] } - write_dhcp_file({ "filename" => "cluster-" + cluster_uid + ".conf", "site_uid" => site_uid, @@ -84,9 +88,6 @@ def generate_puppet_dhcpg5k(options) }, options) } - # - # - # # Other dhcp files ["networks", "laptops", "servers"].each { |key| diff --git a/lib/refrepo/gen/puppet/kadeployg5k.rb b/lib/refrepo/gen/puppet/kadeployg5k.rb index fe5b372ab52329a7046d3a3000f5b982f0b764f0..edb7b7fe31b31b148eef9022bb77f645881e9b34 100644 --- a/lib/refrepo/gen/puppet/kadeployg5k.rb +++ b/lib/refrepo/gen/puppet/kadeployg5k.rb @@ -26,16 +26,14 @@ end # Extract the node ip from the node hash def get_ip(node) - node['network_adapters'].each { |device, network_adapter| - if network_adapter['mounted'] && /^eth[0-9]$/.match(device) - return network_adapter['ip'] - end - } + return node['network_adapters'].select { |n| + n['mounted'] && n['device'] =~ /eth/ + }[0]['ip'] end def generate_puppet_kadeployg5k(options) - global_hash = load_yaml_file_hierarchy + global_hash = load_data_hierarchy if not options[:conf_dir] options[:conf_dir] = "#{options[:output_dir]}/platforms/production/generators/kadeploy" diff --git a/lib/refrepo/gen/puppet/kavlang5k.rb b/lib/refrepo/gen/puppet/kavlang5k.rb index 8c2761ec31d2063a36ace613e41144c6158d538a..673a5df4443b7c3031c7915e05858a1a680d39b9 100644 --- a/lib/refrepo/gen/puppet/kavlang5k.rb +++ b/lib/refrepo/gen/puppet/kavlang5k.rb @@ -1,5 +1,29 @@ require 'refrepo/hash/hash' +def kavlan_switch_port_lookup(switch, node_uid, interface='') + switch["linecards"].each_with_index do |lc, lc_uid| + next if not lc["ports"] + lc["ports"].each_with_index do |port, port_uid| + if port.is_a?(Hash) + switch_remote_port = port["port"] || lc["port"] || "" + switch_remote_uid = port["uid"] + else + switch_remote_port = lc["port"] || "" + switch_remote_uid = port + end + #warn "#{node_uid}, #{switch_uid}, #{lc_uid}, #{port_uid}, #{switch_remote_uid}, #{switch_remote_port}, #{interface}" + if switch_remote_uid == node_uid and switch_remote_port == interface + # Build port name from snmp_naming_pattern + # Example: '3 2 GigabitEthernet%LINECARD%/%PORT%' -> 'GigabitEthernet3/2' + pattern = lc.has_key?("kavlan_pattern") ? lc["kavlan_pattern"] : lc["snmp_pattern"] + port_name = pattern.sub("%LINECARD%",lc_uid.to_s).sub("%PORT%",port_uid.to_s) + return port_name + end + end + end + return nil +end + def generate_puppet_kavlang5k(options) if not options[:conf_dir] @@ -12,7 +36,7 @@ def generate_puppet_kavlang5k(options) puts "Using configuration directory: #{options[:conf_dir]}" puts "For site(s): #{options[:sites].join(', ')}" - refapi = load_yaml_file_hierarchy + refapi = load_data_hierarchy refapi['sites'].each { |site_uid, site_refapi| @@ -41,7 +65,8 @@ def generate_puppet_kavlang5k(options) end # Look for site's global kavlan - kavlan_id = refapi['sites'][site_uid]['kavlans'].each_key.select {|k| k.is_a?(Numeric) and k>9}.pop() + # TODO fix dirty convertion to_i below + kavlan_id = refapi['sites'][site_uid]['kavlans'].each_key.select {|k| k.to_i > 9}.pop() output = ERB.new(File.read(File.expand_path('templates/kavlan-dhcp.conf.erb', File.dirname(__FILE__))), nil, '-').result(binding) output_file = Pathname("#{options[:output_dir]}//platforms/production/modules/generated/files/grid5000/kavlan/#{site_uid}/dhcp/dhcpd-0.conf") File.write(output_file, output) diff --git a/lib/refrepo/gen/puppet/lanpowerg5k.rb b/lib/refrepo/gen/puppet/lanpowerg5k.rb index 1cbc443516615718a187c9f672722277e7c05fdd..7d8132b379625e0d29a2746dcd19e1487f210a03 100644 --- a/lib/refrepo/gen/puppet/lanpowerg5k.rb +++ b/lib/refrepo/gen/puppet/lanpowerg5k.rb @@ -16,7 +16,7 @@ def generate_puppet_lanpowerg5k(options) config = YAML::load_file("#{options[:conf_dir]}/console.yaml") credentials = YAML::load_file("#{options[:conf_dir]}/console-password.yaml") - refapi = load_yaml_file_hierarchy + refapi = load_data_hierarchy refapi['sites'].each { |site_uid, site_refapi| diff --git a/lib/refrepo/gen/puppet/templates/dhcp.erb b/lib/refrepo/gen/puppet/templates/dhcp.erb index ed8e14d02cbca91bf92bb4205c66234341d83c28..853ef49c6ba6e0554a628135ce81a87201a1feca 100644 --- a/lib/refrepo/gen/puppet/templates/dhcp.erb +++ b/lib/refrepo/gen/puppet/templates/dhcp.erb @@ -8,7 +8,7 @@ data.fetch('network_adapters').each { |network_interface| dhcp_entries = [] - data.fetch('nodes').each_sort_by_node_uid { |node_uid, node| + data.fetch('nodes').each_sort_by_node_uid { |node_uid, node| next if node == nil || (node['status'] && node['status'] == 'retired') case network_interface @@ -30,7 +30,6 @@ next end } - if not dhcp_entries.empty? %> group { diff --git a/lib/refrepo/gen/puppet/templates/kavlan-cluster.conf.erb b/lib/refrepo/gen/puppet/templates/kavlan-cluster.conf.erb index f588dfe297be1179d0e0f33e77f50e2a5d9cd0ee..66cf295bfbf8fcc336af02a397aae5a99d18307e 100644 --- a/lib/refrepo/gen/puppet/templates/kavlan-cluster.conf.erb +++ b/lib/refrepo/gen/puppet/templates/kavlan-cluster.conf.erb @@ -3,32 +3,6 @@ # GENERATED by kavlang5k.rb # <% -def net_switch_port_lookup(site, node_uid, interface='') - site["networks"].each do |switch_uid, switch| - #pp switch_uid - switch["linecards"].each do |lc_uid,lc| - lc["ports"].each do |port_uid,port| - if port.is_a?(Hash) - switch_remote_port = port["port"] || lc["port"] || "" - switch_remote_uid = port["uid"] - else - switch_remote_port = lc["port"] || "" - switch_remote_uid = port - end - #warn "#{node_uid}, #{switch_uid}, #{lc_uid}, #{port_uid}, #{switch_remote_uid}, #{switch_remote_port}, #{interface}" - if switch_remote_uid == node_uid and switch_remote_port == interface - # Build port name from snmp_naming_pattern - # Example: '3 2 GigabitEthernet%LINECARD%/%PORT%' -> 'GigabitEthernet3/2' - pattern = lc.has_key?("kavlan_pattern") ? lc["kavlan_pattern"] : lc["snmp_pattern"] - port_name = pattern.sub("%LINECARD%",lc_uid.to_s).sub("%PORT%",port_uid.to_s) - return switch_uid, port_name - end - end - end - end - return nil -end - refapi['sites'][site_uid]['clusters'].sort.to_h.each do |cluster_uid, cluster| -%> <% if cluster['kavlan'] -%> @@ -36,15 +10,17 @@ refapi['sites'][site_uid]['clusters'].sort.to_h.each do |cluster_uid, cluster| <% cluster['nodes'].sort.to_h.each do |node_uid, node| next if node['status'] and node['status'] == "retired" - node['network_adapters'].each do |interface_uid, interface| - interface_has_kavlan = interface.has_key?('kavlan') ? interface['kavlan'] : node['kavlan'].has_key?(interface_uid) + node['network_adapters'].each do |interface| + # puts interface.class + # pp interface + #nil.crash + interface_has_kavlan = interface['kavlan'] if interface_has_kavlan - switch_uid, kavlan_port_name = net_switch_port_lookup(refapi['sites'][site_uid], node_uid, interface_uid) - if switch_uid and kavlan_port_name + switch_uid = interface['switch'] + kavlan_port_name = kavlan_switch_port_lookup(refapi['sites'][site_uid]["network_equipments"][switch_uid], node_uid, interface['device']) -%> -<%= node_uid %><%= interface['mounted']? "" : "-"+interface_uid %>.<%= site_uid %>.grid5000.fr <%= kavlan_port_name %> <%= switch_uid %> +<%= node_uid %><%= interface['mounted']? "" : "-"+interface['device'] %>.<%= site_uid %>.grid5000.fr <%= kavlan_port_name %> <%= switch_uid %> <% - end end end end diff --git a/lib/refrepo/gen/puppet/templates/kavlan-dhcp.conf.erb b/lib/refrepo/gen/puppet/templates/kavlan-dhcp.conf.erb index 2afccec77b2954c60b1e99962f0e67c2ee7441c9..0881016c66b160a7e66f20ff0886581179526924 100644 --- a/lib/refrepo/gen/puppet/templates/kavlan-dhcp.conf.erb +++ b/lib/refrepo/gen/puppet/templates/kavlan-dhcp.conf.erb @@ -11,7 +11,7 @@ option pxelinux.reboottime code 211 = unsigned integer 32; option vendorinfo code 43 = string; <% -kavlan = refapi['sites'][site_uid]['kavlans'][kavlan_id] +kavlan = refapi['sites'][site_uid]['kavlans'][kavlan_id.to_s] # TODO fix needed conversion kavlan_ip = IPAddress::IPv4::new(kavlan['network']) -%> @@ -32,21 +32,24 @@ refapi['sites'].sort.to_h.each_key do |site| refapi['sites'][site].fetch('clusters', []).sort.to_h.each do |cluster_uid, cluster| cluster['nodes'].sort.to_h.each do |node_uid, node| next if node['status'] and node['status'] == 'retired' - node['network_adapters'].each do |interface_uid, interface| - if (interface['mountable'] or site == "rennes") and node['kavlan'] and node['kavlan'].has_key?(interface_uid) # TODO: use interface['kavlan'] - if not interface['mac'] or not node['kavlan'].has_key?(interface_uid) or not node['kavlan'][interface_uid]["kavlan-#{kavlan_id}"] - warn "WARN: Cannot fill dhcpd entry for #{node_uid}, interface #{interface_uid} in vlan #{kavlan_id}: " \ - "Missing mac (#{interface['mac']}) or IP (#{node['kavlan'][interface_uid] and node['kavlan'][interface_uid]["kavlan-#{kavlan_id}"]})" - next - end + node['network_adapters'].each do |interface| + if interface['mountable'] and + interface['kavlan'] + + if not interface['mac'] or + not node['kavlan'][interface['device']]["kavlan-#{kavlan_id}"] + warn "WARN: Cannot fill dhcpd entry for #{node_uid}, interface #{interface_uid} in vlan #{kavlan_id}: " \ + "Missing mac (#{interface['mac']}) or IP (#{node['kavlan'][interface['device']] and node['kavlan'][interface['device']]["kavlan-#{kavlan_id}"]})" + next + end -%> - host <%= node_uid %><%= interface['mounted']? "" : "-"+interface_uid %>-kavlan-<%= kavlan_id %>.<%= site %>.grid5000.fr { + host <%= node_uid %><%= interface['mounted']? "" : "-"+interface['device'] %>-kavlan-<%= kavlan_id %>.<%= site %>.grid5000.fr { hardware ethernet <%= interface['mac'].downcase() %>; - option host-name "<%= node_uid %><%= interface['mounted']? "" : "-"+interface_uid %>-kavlan-<%= kavlan_id %>"; + option host-name "<%= node_uid %><%= interface['mounted']? "" : "-"+interface['device'] %>-kavlan-<%= kavlan_id %>"; option domain-name "<%= site %>.grid5000.fr"; option domain-search "<%= site %>.grid5000.fr", "grid5000.fr"; - fixed-address <%= node['kavlan'][interface_uid]["kavlan-#{kavlan_id}"] %>; -<% if kavlan_id > 9 -%> + fixed-address <%= node['kavlan'][interface['device']]["kavlan-#{kavlan_id}"] %>; +<% if kavlan_id.to_i > 9 -%> next-server kadeploy.<%= site %>.grid5000.fr; <% end -%> } diff --git a/lib/refrepo/gen/puppet/templates/kavlan.conf.erb b/lib/refrepo/gen/puppet/templates/kavlan.conf.erb index e4372bbf14881a1b0255abe6513c6a3b352218f6..36bf1c447bbbd5de01985de318e55e578a6bde2c 100644 --- a/lib/refrepo/gen/puppet/templates/kavlan.conf.erb +++ b/lib/refrepo/gen/puppet/templates/kavlan.conf.erb @@ -21,7 +21,7 @@ "vlan": [ {"value": 100, "name": "PRODUCTION", "type": "NULL"}, <% - site_refapi['kavlans'].each do |kavlan_id, kavlan| + site_refapi['kavlans'].sort_by{ |k,v| k.to_i }.each do |kavlan_id, kavlan| if kavlan_id.to_i.between?(1, 3) -%> {"value": 70<%= kavlan_id %>, "name": "KAVLAN-<%= kavlan_id %>", "type": "kavlan-local", "dhcpd": "ssh kavlan-<%= kavlan_id %> sudo /etc/init.d/isc-dhcp-server"}, @@ -32,15 +32,15 @@ <% end end - - (10..21).each.reject{ |kavlan_id| site_refapi['kavlans'].include?(kavlan_id) }.each do |kavlan_id| + # TODO Fix dirty conversion + (10..21).each.reject{ |kavlan_id| site_refapi['kavlans'].keys.map(&:to_i).include?(kavlan_id) }.each do |kavlan_id| -%> {"value": 7<%= kavlan_id %>, "name": "KAVLAN-<%= kavlan_id %>", "type": "kavlan-remote"}, <% end - if site_refapi.has_key?('kavlan_topo') - Range.new(*site_refapi['kavlan_topo'].split("..").map{|d| Integer(d)}).each do |kavlan_topo_id| -%> + if site_refapi['kavlans'].has_key?('topo') + Range.new(*site_refapi['kavlans']['topo'].split("..").map{|d| Integer(d)}).each do |kavlan_topo_id| -%> {"value": <%= kavlan_topo_id %>, "name": "KAVLAN-<%= kavlan_topo_id %>", "type": "kavlan-topo"}, <% end diff --git a/lib/refrepo/gen/reference-api.rb b/lib/refrepo/gen/reference-api.rb index a9c1c5afa24e1146e57e1a85733503ec8e6342fa..e2fc4237c51a1996eb15912461f0d4ca6516a9fd 100644 --- a/lib/refrepo/gen/reference-api.rb +++ b/lib/refrepo/gen/reference-api.rb @@ -95,7 +95,7 @@ def generate_reference_api grid_path = Pathname.new(refapi_path) grid_path.mkpath() - write_json(grid_path.join("#{global_hash['uid']}.json"), + write_json(grid_path.join("#{global_hash['uid']}.json"), global_hash.reject {|k, v| k == "sites" || k == "network_equipments" || k == "disk_vendor_model_mapping"}) end @@ -128,7 +128,7 @@ def generate_reference_api site_path = Pathname.new(refapi_path).join("sites", site_uid) site_path.mkpath() - write_json(site_path.join("#{site_uid}.json"), + write_json(site_path.join("#{site_uid}.json"), site.reject {|k, v| k == "clusters" || k == "networks" || k == "pdus" || k == "dom0" || k == "laptops" || k == "servers" }) # @@ -189,7 +189,7 @@ def generate_reference_api # On the previous version of this script, cluster["created_ad"] was generated from a Ruby Time. cluster["created_ad"] is now a Ruby Date at JSON import. # As Date.httpdate and Time.httpdate does not behave the same with timezone, it is converted here as a Ruby time. - cluster["created_at"] = Time.parse(cluster["created_at"].to_s).httpdate + cluster["created_at"] = Date.parse(cluster["created_at"].to_s).httpdate cluster_path = Pathname.new(refapi_path).join("sites", site_uid, "clusters", cluster_uid) cluster_path.mkpath() @@ -224,7 +224,7 @@ def generate_reference_api # Delete keys #raise 'node["storage_devices"] is nil' if node["storage_devices"].nil? - Hash(node["storage_devices"]).keys.each { |key| + Hash(node["storage_devices"]).keys.each { |key| node["storage_devices"][key].delete("timeread") if node["storage_devices"][key].key?("timeread") node["storage_devices"][key].delete("timewrite") if node["storage_devices"][key].key?("timewrite") } diff --git a/lib/refrepo/gen/wiki/generators/cpu_parameters.rb b/lib/refrepo/gen/wiki/generators/cpu_parameters.rb index d0d7d0e15a9696c6cb4ede827ccadbe5735f6d06..013c1f7b9961bc2030bdd9a139cedea307cea8c5 100644 --- a/lib/refrepo/gen/wiki/generators/cpu_parameters.rb +++ b/lib/refrepo/gen/wiki/generators/cpu_parameters.rb @@ -28,7 +28,7 @@ class CPUParametersGenerator < WikiGenerator #One line per cluster table_data << [ - DateTime.new(*cluster_hash["created_at"].to_s.scan(/\d+/).map {|i| i.to_i}).strftime("%Y-%m-%d"), + DateTime.parse(*cluster_hash["created_at"]).strftime("%Y-%m-%d"), site_uid, cluster_uid, cpu_family, diff --git a/lib/refrepo/gen/wiki/generators/disk_reservation.rb b/lib/refrepo/gen/wiki/generators/disk_reservation.rb index 5a5ebf775a5f6b53503f29f1f59bae50f1b6f239..b88db2dc21aed6c2b0a7857f2f64b6adb0dfa5a3 100644 --- a/lib/refrepo/gen/wiki/generators/disk_reservation.rb +++ b/lib/refrepo/gen/wiki/generators/disk_reservation.rb @@ -17,7 +17,7 @@ class DiskReservationGenerator < WikiGenerator disk_info = {} cluster_hash.fetch('nodes').sort.to_h.each { |node_uid, node_hash| next if node_hash['status'] == 'retired' - reservable_disks = node_hash['storage_devices'].select{ |k, v| v['reservation'] == true }.count + reservable_disks = node_hash['storage_devices'].select{ |v| v['reservation'] == true }.count add(disk_info, node_uid, reservable_disks) } diff --git a/lib/refrepo/gen/wiki/generators/hardware.rb b/lib/refrepo/gen/wiki/generators/hardware.rb index 0098d770accf7b5c9bfec4118822b904c927837f..23f7cc07cd3ca5273fe55e4cd2d380bdf5b464c3 100644 --- a/lib/refrepo/gen/wiki/generators/hardware.rb +++ b/lib/refrepo/gen/wiki/generators/hardware.rb @@ -42,10 +42,10 @@ class G5KHardwareGenerator < WikiGenerator if node_hash['gpu'] and node_hash['gpu']['gpu_count'] gpus += node_hash['gpu']['gpu_count'] end - ssds += node_hash['storage_devices'].values.select { |d| d['storage'] == 'SSD' }.length - hdds += node_hash['storage_devices'].values.select { |d| d['storage'] == 'HDD' }.length - node_hash['storage_devices'].each_pair do |k, e| - storage_space += e['size'] + ssds += node_hash['storage_devices'].select { |d| d['storage'] == 'SSD' }.length + hdds += node_hash['storage_devices'].select { |d| d['storage'] == 'HDD' }.length + node_hash['storage_devices'].each do |i| + storage_space += i['size'] end end end @@ -108,12 +108,12 @@ class G5KHardwareGenerator < WikiGenerator data['ram_size'][key][site_uid] += 1 # HPC Networks - interfaces = node_hash['network_adapters'].select{ |k, v| + interfaces = node_hash['network_adapters'].select{ |v| v['enabled'] and (v['mounted'] or v['mountable']) and not v['management'] and - (k =~ /\./).nil? # exclude PKEY / VLAN interfaces see #9417 - }.map{ |k, v| + (v['device'] =~ /\./).nil? # exclude PKEY / VLAN interfaces see #9417 + }.map{ |v| [ { text: v['interface'] + ' ' + G5K.get_rate(v['rate']), @@ -129,12 +129,12 @@ class G5KHardwareGenerator < WikiGenerator } # NIC models - interfaces = node_hash['network_adapters'].select{ |k, v| + interfaces = node_hash['network_adapters'].select{ |v| v['enabled'] and (v['mounted'] or v['mountable']) and not v['management'] and - (k =~ /\./).nil? # exclude PKEY / VLAN interfaces see #9417 - }.map{ |k, v| + (v['device'] =~ /\./).nil? # exclude PKEY / VLAN interfaces see #9417 + }.map{ |v| t = (v['vendor'] || 'N/A') + ' ' + (v['model'] || 'N/A'); [ { @@ -314,21 +314,21 @@ class G5KHardwareGenerator < WikiGenerator cluster_hash.fetch('nodes').sort.to_h.each do |node_uid, node_hash| next if node_hash['status'] == 'retired' sd = node_hash['storage_devices'] - reservable_disks = sd.to_a.select{ |v| v[1]['reservation'] == true }.count > 0 - maindisk = sd.to_a.select { |v| v[0] == 'sda' }.first[1] + reservable_disks = sd.select{ |v| v['reservation'] == true }.count > 0 + maindisk = sd.select { |v| v['device'] == 'sda' }[0] maindisk_t = maindisk['storage'] + ' ' + G5K.get_size(maindisk['size'],'metric') - other = sd.to_a.select { |d| d[0] != 'sda' } - hdds = other.select { |d| d[1]['storage'] == 'HDD' } + other = sd.select { |d| d['device'] != 'sda' } + hdds = other.select { |d| d['storage'] == 'HDD' } if hdds.count == 0 hdd_t = "0" else - hdd_t = hdds.count.to_s + " (" + hdds.map { |d| G5K.get_size(d[1]['size'],'metric') }.join(', ') + ")" + hdd_t = hdds.count.to_s + " (" + hdds.map { |d| G5K.get_size(d['size'],'metric') }.join(', ') + ")" end - ssds = other.select { |d| d[1]['storage'] == 'SSD' } + ssds = other.select { |d| d['storage'] == 'SSD' } if ssds.count == 0 ssd_t = "0" else - ssd_t = ssds.count.to_s + " (" + ssds.map { |d| G5K.get_size(d[1]['size'],'metric') }.join(', ') + ")" + ssd_t = ssds.count.to_s + " (" + ssds.map { |d| G5K.get_size(d['size'],'metric') }.join(', ') + ")" end queues = cluster_hash['queues'] - ['admin', 'default'] queue_t = (queues.nil? || (queues.empty? ? '' : "_.28" + queues[0].gsub(' ', '_') + ' queue.29')) @@ -373,13 +373,18 @@ class G5KHardwareGenerator < WikiGenerator cluster_hash.fetch('nodes').sort.to_h.each { |node_uid, node_hash| next if node_hash['status'] == 'retired' if node_hash['network_adapters'] - node_interfaces = node_hash['network_adapters'].select{ |k, v| v['interface'] == 'Ethernet' and v['enabled'] == true and (v['mounted'] == true or v['mountable'] == true) and v['management'] == false } + node_interfaces = node_hash['network_adapters'].select{ |v| + v['interface'] == 'Ethernet' and + v['enabled'] == true and + (v['mounted'] == true or v['mountable'] == true) and + v['management'] == false + } interfaces = {} - interfaces['25g_count'] = node_interfaces.select { |k, v| v['rate'] == 25_000_000_000 }.count - interfaces['10g_count'] = node_interfaces.select { |k, v| v['rate'] == 10_000_000_000 }.count - interfaces['1g_count'] = node_interfaces.select { |k, v| v['rate'] == 1_000_000_000 }.count - interfaces['details'] = node_interfaces.map{ |k, v| k + (v['name'].nil? ? '' : '/' + v['name']) + ' (' + G5K.get_rate(v['rate']) + ')' }.sort.join(', ') + interfaces['25g_count'] = node_interfaces.select { |v| v['rate'] == 25_000_000_000 }.count + interfaces['10g_count'] = node_interfaces.select { |v| v['rate'] == 10_000_000_000 }.count + interfaces['1g_count'] = node_interfaces.select { |v| v['rate'] == 1_000_000_000 }.count + interfaces['details'] = node_interfaces.map{ |v| v['device'] + (v['name'].nil? ? '' : '/' + v['name']) + ' (' + G5K.get_rate(v['rate']) + ')' }.sort.join(', ') queues = cluster_hash['queues'] - ['admin', 'default', 'testing'] interfaces['queues'] = (queues.nil? || (queues.empty? ? '' : queues[0] + G5K.pluralize(queues.count, ' queue'))) interface_add(network_interfaces, node_uid, interfaces) if node_interfaces.count > 1 diff --git a/lib/refrepo/gen/wiki/generators/site_hardware.rb b/lib/refrepo/gen/wiki/generators/site_hardware.rb index 6e5cd3d193e70deacc359fe37f5a447f5c31c199..aea21ae09cd2179033412414b10046019de2c801 100644 --- a/lib/refrepo/gen/wiki/generators/site_hardware.rb +++ b/lib/refrepo/gen/wiki/generators/site_hardware.rb @@ -184,7 +184,6 @@ def get_hardware(sites) cluster_hash.fetch('nodes').sort.each { |node_uid, node_hash| next if node_hash['status'] == 'retired' # map model to vendor (eg: {'SAS5484654' => 'Seagate', 'PX458' => 'Toshiba' ...} - disk_model_vendor_mapping = global_hash['disk_vendor_model_mapping'].map{ |vdr, mdls| mdls.map{ |mdl| [mdl, vdr] } }.flatten(1).to_h hard = {} queue = cluster_hash['queues'] - ['admin', 'default'] hard['queue'] = (queue.nil? || queue.empty?) ? '' : queue[0] @@ -204,17 +203,51 @@ def get_hardware(sites) hard['num_processor_model'] = (hard['cpus_per_node'] == 1 ? '' : "#{hard['cpus_per_node']} x ") + hard['processor_model'].gsub(' ', ' ') hard['processor_description'] = "#{hard['processor_model']} (#{hard['microarchitecture']}#{hard['processor_freq'] ? ', ' + hard['processor_freq'] : ''}, #{hard['cpus_per_node_str']}, #{hard['cores_per_cpu_str']})" hard['ram_size'] = G5K.get_size(node_hash['main_memory']['ram_size']) - storage = node_hash['storage_devices'].map{ |k, v| {'size' => v['size'], 'tech' => v['storage']} } - hard['storage'] = storage.each_with_object(Hash.new(0)) { |data, counts| counts[data] += 1 }.to_a.sort_by { |e| e[0]['size'].to_f }.map{ |e| (e[1] == 1 ? '' : e[1].to_s + ' x ') + G5K.get_size(e[0]['size'],'metric') + ' ' + e[0]['tech'] }.join(' + ') - hard['storage_size'] = storage.inject(0){|sum, v| sum + (v['size'].to_f / 2**30).floor }.to_s # round to GB to avoid small differences within a cluster - storage_description = node_hash['storage_devices'].map { |k, v| { 'device' => v['device'], 'size' => v['size'], 'tech' => v['storage'], 'interface' => v['interface'], 'vendor' => disk_model_vendor_mapping[v['model']],'model' => v['model'], 'driver' => v['driver'], 'path' => v['by_path'] || v['by_id'], 'count' => node_hash['storage_devices'].count } } + storage = node_hash['storage_devices'].map { |i| { 'size' => i['size'], 'tech' => i['storage'] } } + hard['storage'] = storage.each_with_object(Hash.new(0)) { |data, counts| + counts[data] += 1 + }.to_a.sort_by { |e| + e[0]['size'].to_f + }.map { |e| + (e[1] == 1 ? '' : e[1].to_s + ' x ') + + G5K.get_size(e[0]['size'], 'metric') + + ' ' + + e[0]['tech'] + }.join(' + ') - hard['storage_description'] = storage_description.map { |e| [ e['count'] > 1 ? "\n*" : '', G5K.get_size(e['size'],'metric'), e['tech'], e['interface'], e['vendor'], e['model'], ' (driver: ' + (e['driver'] || 'MISSING') + ', path: ' + (e['path'] || 'MISSING') + ')'].join(' ') }.join('<br />') + hard['storage_size'] = storage.inject(0){|sum, v| sum + (v['size'].to_f / 2**30).floor }.to_s # round to GB to avoid small differences within a cluster + storage_description = node_hash['storage_devices'].sort { |a,b| + a['device'] <=> b['device'] + }.map do |v| + { + 'device' => v['device'], + 'size' => v['size'], + 'tech' => v['storage'], + 'interface' => v['interface'], + 'vendor' => v['vendor'], + 'model' => v['model'], + 'driver' => v['driver'], + 'path' => v['by_path'] || v['by_id'], + 'count' => node_hash['storage_devices'].count + } + end - network = node_hash['network_adapters'].select { |k, v| + hard['storage_description'] = storage_description.map { |e| + [ + e['count'] > 1 ? "\n*" : '', + G5K.get_size(e['size'],'metric'), + e['tech'], + e['interface'], + e['vendor'], + e['model'], + ' (driver: ' + (e['driver'] || 'MISSING') + ', path: ' + (e['path'] || 'MISSING') + ')' + ].join(' ') + }.join('<br />') + + network = node_hash['network_adapters'].select { |v| v['management'] == false && - (k =~ /\./).nil? # exclude PKEY / VLAN interfaces see #9417 - }.map{|k, v| { + (v['device'] =~ /\./).nil? # exclude PKEY / VLAN interfaces see #9417 + }.map{|v| { 'rate' => v['rate'], 'interface' => v['interface'], 'used' => (v['enabled'] and (v['mounted'] or v['mountable'])) @@ -236,12 +269,12 @@ def get_hardware(sites) sum + (v['rate'].to_f / 10**6).floor }.to_s # round to Mbps - network_description = node_hash['network_adapters'].select { |k, v| + network_description = node_hash['network_adapters'].select { |v| v['management'] == false && - (k =~ /\./).nil? # exclude PKEY / VLAN interface see #9417 - }.map{ |k, v| + (v['device'] =~ /\./).nil? # exclude PKEY / VLAN interface see #9417 + }.map{ |v| { - 'device' => k, + 'device' => v['device'], 'name' => v['name'], 'rate' => v['rate'], 'interface' => v['interface'], diff --git a/lib/refrepo/gen/wiki/generators/site_network.rb b/lib/refrepo/gen/wiki/generators/site_network.rb index c6ff45a39a610718b52340a82afadb3966e2beb4..56f373a6d01ed2d420863918b805392a75e34951 100644 --- a/lib/refrepo/gen/wiki/generators/site_network.rb +++ b/lib/refrepo/gen/wiki/generators/site_network.rb @@ -23,7 +23,7 @@ class SiteNetworkGenerator < WikiGenerator def generate_equipments h = G5K::get_global_hash['sites'][@site] - return h['networks'].to_a.map { |e| "* #{e[0]}: #{e[1]['model']}" }.sort.join("\n") + return h['network_equipments'].to_a.map { |e| "* #{e[0]}: #{e[1]['model']}" }.sort.join("\n") end def generate_content diff --git a/lib/refrepo/gen/wiki/mw_utils.rb b/lib/refrepo/gen/wiki/mw_utils.rb index 64011552c35df19d82a348efde394b6e8b2ae9dd..8124d729c05633889ec335ce97ccd8601e7c8699 100644 --- a/lib/refrepo/gen/wiki/mw_utils.rb +++ b/lib/refrepo/gen/wiki/mw_utils.rb @@ -21,7 +21,7 @@ module MediawikiApi res = get_conn.send(:get, '', params) res.body end - + def get_file_content(file_name) get_conn = Faraday.new(url: MW::BASE_URL + "images/#{file_name}") do |faraday| faraday.request :multipart @@ -95,7 +95,7 @@ module G5K end end end - + def self.get_rate(x) return '' if (x == 0 || x.nil?) mbps = (x.to_f / 10**6).floor @@ -109,11 +109,11 @@ module G5K def self.pluralize(count, word) return (count == 1 || word[-1] == 's') ? word : word + 's' end - + @@global_hash = nil def self.get_global_hash if @@global_hash.nil? - @@global_hash = load_yaml_file_hierarchy + @@global_hash = load_data_hierarchy end # return a deep copy of global_hash return Marshal.load(Marshal.dump(@@global_hash)) @@ -144,7 +144,7 @@ module MW UNSORTED_INLINE_CELL = "!!" UNSORTED_TABLE_CELL = "!" - + LINE_FEED = "\n" LIST_ITEM = "*" @@ -212,7 +212,7 @@ module MW def self.italic(text) "''" + text + "''" end - + def self.bold(text) "'''" + text + "'''" end @@ -223,6 +223,6 @@ module MW def self.code(text) "<code>" + text + "</code>" - end + end end