diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 1a403f8c019e40abf62962adb3947ee39b62a9e9..b07e215238bfeecc45bd62ec605ce7095d7f7881 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -1,5 +1,6 @@
 ---
 stages:
+  - lint
   - validate
   - generate
   - deploy
@@ -9,14 +10,10 @@ include:
     ref: master
     file: '/sonarqube.yml'
 
-rubocop:
-  stage: validate
-  tags:
-    - grid5000-docker
-  image: debian:buster
-  script:
-    - apt-get update && apt-get -y install rubocop
-    - rubocop -l --fail-level W
+include:
+  - project: 'grid5000/grid5000-gitlab-templates'
+    ref: master
+    file: '/rubocop.yml'
 
 validate-data:
   stage: validate
diff --git a/lib/refrepo/gen/oar-properties.rb b/lib/refrepo/gen/oar-properties.rb
index 22bd4eb634bd6f0b7916fb47d477d8971d8af7e3..997430e998095d05d26cba768e77038f517ff100 100644
--- a/lib/refrepo/gen/oar-properties.rb
+++ b/lib/refrepo/gen/oar-properties.rb
@@ -504,7 +504,7 @@ end
 def properties_internal(properties)
   str = properties
             .to_a
-            .select{|k, v| not ignore_default_keys.include? k}
+            .select{|k, _v| not ignore_default_keys.include? k}
             .map do |(k, v)|
                 v = "YES" if v == true
                 v = "NO"  if v == false
@@ -516,7 +516,7 @@ end
 def disk_properties_internal(properties)
   str = properties
             .to_a
-            .select{|k, v| not v.nil? and not v==""}
+            .select{|_k, v| not v.nil? and not v==""}
             .map do |(k, v)|
     v = "YES" if v == true
     v = "NO"  if v == false
@@ -774,16 +774,16 @@ def run_commands_via_ssh(cmds, options, verbose=true)
   res = ""
   c = Net::SSH.start(options[:ssh][:host], options[:ssh][:user])
   c.open_channel { |channel|
-    channel.exec('sudo bash') { |ch, success|
+    channel.exec('sudo bash') { |_ch, _success|
       # stdout
-      channel.on_data { |ch2, data|
+      channel.on_data { |_ch2, data|
         if verbose
           puts data
         end
         res += data
       }
       # stderr
-      channel.on_extended_data do |ch2, type, data|
+      channel.on_extended_data do |_ch2, _type, data|
         if verbose
           puts data
         end
@@ -1212,7 +1212,7 @@ def extract_clusters_description(clusters, site_name, options, data_hierarchy, s
     end
 
     if is_a_new_cluster
-      oar_resource_ids = phys_rsc_map["core"][:current_ids].map{|r| -1}
+      oar_resource_ids = phys_rsc_map["core"][:current_ids].map{|_r| -1}
     else
       oar_resource_ids = cluster_resources.map{|r| r["id"]}.uniq
     end
@@ -1270,7 +1270,7 @@ def extract_clusters_description(clusters, site_name, options, data_hierarchy, s
         :description => node_description,
         :oar_rows => [],
         :disks => [],
-        :gpus => (node_description["gpu_devices"] != nil ? (node_description["gpu_devices"].select{|k ,v| v.fetch("reservation", true)}.length) : 0),
+        :gpus => (node_description["gpu_devices"] != nil ? (node_description["gpu_devices"].select{|_k ,v| v.fetch("reservation", true)}.length) : 0),
         :default_description => node_description_default_properties
       }
 
diff --git a/lib/refrepo/gen/puppet/bindg5k.rb b/lib/refrepo/gen/puppet/bindg5k.rb
index ee86aded4558a77132522d3b50541e603e935d54..ac4e7f0d980e23de942cc9d433357504d2635500 100644
--- a/lib/refrepo/gen/puppet/bindg5k.rb
+++ b/lib/refrepo/gen/puppet/bindg5k.rb
@@ -610,7 +610,7 @@ def fetch_site_records(site, type)
 
   site.fetch("clusters", []).sort.each { |cluster_uid, cluster|
 
-    cluster.fetch('nodes').select { |node_uid, node|
+    cluster.fetch('nodes').select { |_node_uid, node|
       node != nil && node["status"] != "retired" && node.has_key?('network_adapters')
     }.each_sort_by_node_uid { |node_uid, node|
 
diff --git a/lib/refrepo/gen/puppet/dhcpg5k.rb b/lib/refrepo/gen/puppet/dhcpg5k.rb
index 6d050371cc9e18b450950247e7b317795c9f8e7f..97685679b9b3d4e65bbe2ed99b48a31ef033a67b 100644
--- a/lib/refrepo/gen/puppet/dhcpg5k.rb
+++ b/lib/refrepo/gen/puppet/dhcpg5k.rb
@@ -67,8 +67,8 @@ def generate_puppet_dhcpg5k(options)
     #
 
     # Relocate ip/mac info of MIC
-    site_hash.fetch("clusters").each { |cluster_uid, cluster_hash|
-      cluster_hash.fetch('nodes').each { |node_uid, node_hash|
+    site_hash.fetch("clusters").each { |_cluster_uid, cluster_hash|
+      cluster_hash.fetch('nodes').each { |_node_uid, node_hash|
         next if node_hash == nil || node_hash['status'] == 'retired'
 
         if node_hash['mic'] && node_hash['mic']['ip'] && node_hash['mic']['mac']
@@ -113,7 +113,7 @@ def generate_puppet_dhcpg5k(options)
 
     if ! site_hash['pdus'].nil?
       # Relocate ip/mac info of PDUS
-      site_hash['pdus'].each { |pdu_uid, pdu_hash|
+      site_hash['pdus'].each { |_pdu_uid, pdu_hash|
         if pdu_hash['ip'] && pdu_hash['mac']
           pdu_hash['network_adapters'] ||= {}
           pdu_hash['network_adapters']['pdu'] ||= {}
diff --git a/lib/refrepo/gen/puppet/kavlanngg5k.rb b/lib/refrepo/gen/puppet/kavlanngg5k.rb
index 015ac092c96fe37d7a8574187d1eeef1c7cbe910..1ba9cb75e0c7b708aa2077b7a9f5362f0950082a 100644
--- a/lib/refrepo/gen/puppet/kavlanngg5k.rb
+++ b/lib/refrepo/gen/puppet/kavlanngg5k.rb
@@ -12,7 +12,7 @@ def gen_json(output_path)
   site_data_hierarchy.delete_if { |k| k != 'sites' }
   site_data_hierarchy['sites'].each do |site_id, site_h|
     site_h.delete_if { |k| !['clusters', 'network_equipments', 'servers'].include? k }
-    site_h['clusters'].each do |cluster_id, cluster_h|
+    site_h['clusters'].each do |_cluster_id, cluster_h|
       cluster_h.delete_if { |k| k != 'nodes' }
       cluster_h['nodes'].each do |_node_id, node_h|
         node_h.delete_if { |k| k != 'network_adapters' }
@@ -46,11 +46,11 @@ def gen_json(output_path)
     end
   end
   # consistent order
-  site_data_hierarchy['sites'] = site_data_hierarchy['sites'].sort_by { |site_id, site_h| site_id }.to_h
-  site_data_hierarchy['sites'].each { |site_id, site_h|
-    site_h['clusters'] = site_h['clusters'].sort_by { |cluster_id, cluster_h| cluster_id }.to_h
-    site_h['clusters'].each { |cluster_id, cluster_h|
-      cluster_h['nodes'] = cluster_h['nodes'].sort_by { |node_id, node_h| node_id[/(\d+)/].to_i }.to_h
+  site_data_hierarchy['sites'] = site_data_hierarchy['sites'].sort_by { |site_id, _site_h| site_id }.to_h
+  site_data_hierarchy['sites'].each { |_site_id, site_h|
+    site_h['clusters'] = site_h['clusters'].sort_by { |cluster_id, _cluster_h| cluster_id }.to_h
+    site_h['clusters'].each { |_cluster_id, cluster_h|
+      cluster_h['nodes'] = cluster_h['nodes'].sort_by { |node_id, _node_h| node_id[/(\d+)/].to_i }.to_h
     }
   }
 
diff --git a/lib/refrepo/gen/puppet/lanpowerg5k.rb b/lib/refrepo/gen/puppet/lanpowerg5k.rb
index 7d8132b379625e0d29a2746dcd19e1487f210a03..ad93d56a7a206eb4dbaa51ba232c779a6984ef33 100644
--- a/lib/refrepo/gen/puppet/lanpowerg5k.rb
+++ b/lib/refrepo/gen/puppet/lanpowerg5k.rb
@@ -58,7 +58,7 @@ def generate_puppet_lanpowerg5k(options)
       cluster_hash['user'] ||= cluster_credentials.split(' ')[0]
       cluster_hash['password'] ||= cluster_credentials.split(' ')[1]
 
-      cluster_hash.reject!{ |k,v| v == nil }
+      cluster_hash.reject!{ |_k,v| v == nil }
 
       h['clusters'][cluster_uid] = cluster_hash
 
diff --git a/lib/refrepo/gen/puppet/refapi-subset.rb b/lib/refrepo/gen/puppet/refapi-subset.rb
index e6f5ae9e639a20bc051c5659df5d308a3ea10da9..a6f53e06e9d05f5eba86b00471dadf7c33af3f5d 100644
--- a/lib/refrepo/gen/puppet/refapi-subset.rb
+++ b/lib/refrepo/gen/puppet/refapi-subset.rb
@@ -19,16 +19,16 @@ def gen_json(site, output_path)
   site_data_hierarchy.delete_if { |key| key != 'sites' }
   site_data_hierarchy['sites'].delete_if { |key| key != site }
   site_data_hierarchy['sites'][site].delete_if { |key| key != 'clusters' }
-  site_data_hierarchy['sites'][site]['clusters'].to_h.each do |cluster_uid, cluster_hash|
+  site_data_hierarchy['sites'][site]['clusters'].to_h.each do |_cluster_uid, cluster_hash|
     cluster_hash.delete_if { |key| key != 'nodes' }
-    cluster_hash['nodes'].to_h.each do |node_uid, node_hash|
+    cluster_hash['nodes'].to_h.each do |_node_uid, node_hash|
       node_hash.delete_if { |key| key != 'software' }
       node_hash['software'].delete_if { |key| key != 'standard-environment' }
     end
 
     cluster_hash['nodes'] = cluster_hash['nodes'].sort_by{|node_uid, _node_hash| node_uid[/(\d+)/].to_i }.to_h
   end
-  site_data_hierarchy['sites'][site]['clusters'] = site_data_hierarchy['sites'][site]['clusters'].sort_by{ |cluster_uid, cluster_hash| cluster_uid }.to_h
+  site_data_hierarchy['sites'][site]['clusters'] = site_data_hierarchy['sites'][site]['clusters'].sort_by{ |cluster_uid, _cluster_hash| cluster_uid }.to_h
 
   output_file = File.new(output_path, 'w')
   output_file.write(JSON.pretty_generate(site_data_hierarchy))
diff --git a/lib/refrepo/gen/reference-api.rb b/lib/refrepo/gen/reference-api.rb
index aa856e0767c17276b1a52264f609a1349621cebf..f0f81f1c320ad098735c9168551be04468114f61 100644
--- a/lib/refrepo/gen/reference-api.rb
+++ b/lib/refrepo/gen/reference-api.rb
@@ -43,7 +43,7 @@ def generate_reference_api
     grid_path.mkpath()
 
     write_json(grid_path.join("#{global_hash['uid']}.json"),
-               global_hash.reject {|k, v| k == "sites" || k == "network_equipments" || k == "disk_vendor_model_mapping"})
+               global_hash.reject {|k, _v| k == "sites" || k == "network_equipments" || k == "disk_vendor_model_mapping"})
   end
 
   puts "Generating the reference api:\n\n"
@@ -76,7 +76,7 @@ def generate_reference_api
     site_path.mkpath()
 
     write_json(site_path.join("#{site_uid}.json"),
-               site.reject {|k, v| k == "clusters" || k == "networks" || k == "pdus" || k == "dom0" || k == "laptops" || k == "servers" })
+               site.reject {|k, _v| k == "clusters" || k == "networks" || k == "pdus" || k == "dom0" || k == "laptops" || k == "servers" })
 
     #
     # Write pdu info
@@ -118,7 +118,7 @@ def generate_reference_api
 
       # Write cluster info w/o nodes entries
       write_json(cluster_path.join("#{cluster_uid}.json"),
-                 cluster.reject {|k, v| k == "nodes"})
+                 cluster.reject {|k, _v| k == "nodes"})
 
       #
       # Write node info
@@ -133,12 +133,12 @@ def generate_reference_api
         node.delete("status")
 
         # Convert hashes to arrays
-        node["storage_devices"] = node["storage_devices"].sort_by{ |sd, v| v['id'] }.map { |a| a[1] }
+        node["storage_devices"] = node["storage_devices"].sort_by{ |_sd, v| v['id'] }.map { |a| a[1] }
 
-        node["network_adapters"].each { |key, hash| node["network_adapters"][key]["device"] = key; } # Add "device: ethX" within the hash
+        node["network_adapters"].each { |key, _hash| node["network_adapters"][key]["device"] = key; } # Add "device: ethX" within the hash
         node["network_adapters"] = node["network_adapters"].sort_by_array(["eth0", "eth1", "eth2", "eth3", "eth4", "eth5", "eth6", "ib0.8100", "ib0", "ib1", "ib2", "ib3", "ib4", "ib5", "ib6", "ib7", "ibs1","bmc", "eno1", "eno2", "eno1np0", "eno2np1", "ens4f0", "ens4f1", "ens5f0", "ens5f1"]).values
 
-        node["memory_devices"].each { |key, hash| node["memory_devices"][key]["device"] = key; } # Add "device: dimm_a1" within the hash
+        node["memory_devices"].each { |key, _hash| node["memory_devices"][key]["device"] = key; } # Add "device: dimm_a1" within the hash
         node["memory_devices"] = node["memory_devices"].sort_by { |d, _|
           [d.gsub(/dimm_(\d+)/, '\1').to_i,
            d.gsub(/^dimm_([A-z]+)(\d+)$/, '\1'),
@@ -157,7 +157,7 @@ def generate_reference_api
   #
 
   # rename entry for the all-in-on json file
-  global_hash["sites"].each do |site_uid, site|
+  global_hash["sites"].each do |_site_uid, site|
     site["network_equipments"] = site.delete("networks")
   end
 
diff --git a/lib/refrepo/gen/wiki/generators/cpu_parameters.rb b/lib/refrepo/gen/wiki/generators/cpu_parameters.rb
index ea365a7c4f252c56fe00d1d2fd312baa4299df40..1d21f8dc179438c85ad42e5e1f78ed17f5b32736 100644
--- a/lib/refrepo/gen/wiki/generators/cpu_parameters.rb
+++ b/lib/refrepo/gen/wiki/generators/cpu_parameters.rb
@@ -1,9 +1,7 @@
 
 class CPUParametersGenerator < WikiGenerator
 
-  def initialize(page_name)
-    super(page_name)
-  end
+  
 
   def generate_content(_options)
 
diff --git a/lib/refrepo/gen/wiki/generators/disk_reservation.rb b/lib/refrepo/gen/wiki/generators/disk_reservation.rb
index a3bc7a918a3fb2e5510b1f482fe780c032ec5953..a525961775641b81b59e046751dd2a8f2c0fa905 100644
--- a/lib/refrepo/gen/wiki/generators/disk_reservation.rb
+++ b/lib/refrepo/gen/wiki/generators/disk_reservation.rb
@@ -2,9 +2,7 @@
 
 class DiskReservationGenerator < WikiGenerator
 
-  def initialize(page_name)
-    super(page_name)
-  end
+  
 
   def generate_content(_options)
     table_columns = ["Site", "Cluster", "Number of nodes", "Number of reservable disks per node"]
diff --git a/lib/refrepo/gen/wiki/generators/group_storage.rb b/lib/refrepo/gen/wiki/generators/group_storage.rb
index 5350889e8132d2547f3e7eadb458997e105450f3..722d79af9f1a5814105f5e5ba695189a74bab6d2 100644
--- a/lib/refrepo/gen/wiki/generators/group_storage.rb
+++ b/lib/refrepo/gen/wiki/generators/group_storage.rb
@@ -2,9 +2,7 @@
 
 class GroupStorageGenerator < WikiGenerator
 
-  def initialize(page_name)
-    super(page_name)
-  end
+  
 
   def generate_content(_options)
     table_columns = ["Site", "Server Name", "Size", "Link Speed", "Notes"]
diff --git a/lib/refrepo/gen/wiki/generators/hardware.rb b/lib/refrepo/gen/wiki/generators/hardware.rb
index 2a6989d844e1c3ed8f0db97ada9aef31f19e1178..71f340822ce9f1a888540497d123a51aae36ef0c 100644
--- a/lib/refrepo/gen/wiki/generators/hardware.rb
+++ b/lib/refrepo/gen/wiki/generators/hardware.rb
@@ -3,9 +3,7 @@ require 'refrepo/gen/wiki/generators/site_hardware'
 
 class G5KHardwareGenerator < WikiGenerator
 
-  def initialize(page_name)
-    super(page_name)
-  end
+  
 
   def generate_content(_options)
     @global_hash = get_global_hash
@@ -39,7 +37,7 @@ class G5KHardwareGenerator < WikiGenerator
     }
 
     @global_hash['sites'].sort.to_h.each { |site_uid, site_hash|
-      site_hash['clusters'].sort.to_h.each { |cluster_uid, cluster_hash|
+      site_hash['clusters'].sort.to_h.each { |_cluster_uid, cluster_hash|
         cluster_hash['nodes'].sort.to_h.each { |node_uid, node_hash|
           begin
             next if node_hash['status'] == 'retired'
@@ -94,7 +92,7 @@ class G5KHardwareGenerator < WikiGenerator
             }
 
             net_interconnects = interfaces.inject(Hash.new(0)){ |h, v| h[v] += 1; h }
-            net_interconnects.sort_by { |k, v|  k.first[:sort] }.each { |k, v|
+            net_interconnects.sort_by { |k, _v|  k.first[:sort] }.each { |k, v|
               init(data, 'net_interconnects', k)
               data['net_interconnects'][k][site_uid] += v
             }
@@ -123,7 +121,7 @@ class G5KHardwareGenerator < WikiGenerator
 
             net_models = interfaces.inject(Hash.new(0)){ |h, v| h[v] += 1; h }
             # Sort by interface type (eth or IB) and then by driver
-            net_models.sort_by { |k, v|  [k.first[:sort], k[1][:sort]] }.each { |k, v|
+            net_models.sort_by { |k, _v|  [k.first[:sort], k[1][:sort]] }.each { |k, v|
               init(data, 'net_models', k)
               data['net_models'][k][site_uid] += v
             }
@@ -150,7 +148,7 @@ class G5KHardwareGenerator < WikiGenerator
             }
 
             ssd_models = ssd.inject(Hash.new(0)){ |h, v| h[v] += 1; h }
-            ssd_models.sort_by { |k, v|  k.first[:sort] }.each { |k, v|
+            ssd_models.sort_by { |k, _v|  k.first[:sort] }.each { |k, v|
               init(data, 'ssd_models', k)
               data['ssd_models'][k][site_uid] += v
             }
@@ -296,7 +294,7 @@ class G5KHardwareGenerator < WikiGenerator
       # Sort the table by the identifiers (e.g. Microarchitecture, or Microarchitecture + CPU name).
       # This colum is either just a text field, or a more complex hash with a :sort key that should be
       # used for sorting.
-      |k, v| k.map { |c| c.kind_of?(Hash) ? c[:sort] : c }
+      |k, _v| k.map { |c| c.kind_of?(Hash) ? c[:sort] : c }
     }.to_h.each { |k, v|
       k0 = k if index == 0
       index += 1
diff --git a/lib/refrepo/gen/wiki/generators/kwollect_metrics.rb b/lib/refrepo/gen/wiki/generators/kwollect_metrics.rb
index 3ba3532cd8cc0f844380544d68f785d9ea4afe8f..8423396106adde01a6eec09e6016a2b1617ef1a6 100644
--- a/lib/refrepo/gen/wiki/generators/kwollect_metrics.rb
+++ b/lib/refrepo/gen/wiki/generators/kwollect_metrics.rb
@@ -2,9 +2,7 @@
 
 class KwollectMetricsGenerator < WikiGenerator
 
-  def initialize(page_name)
-    super(page_name)
-  end
+  
 
   def generate_content(_options)
     @generated_content = "__NOEDITSECTION__\n"
diff --git a/lib/refrepo/gen/wiki/generators/oar_properties.rb b/lib/refrepo/gen/wiki/generators/oar_properties.rb
index 3f77b51d35aff8ca912bbe08d8e58844045703ae..a0f4956faa7133192632e16aea80886c8e63b7bf 100644
--- a/lib/refrepo/gen/wiki/generators/oar_properties.rb
+++ b/lib/refrepo/gen/wiki/generators/oar_properties.rb
@@ -5,9 +5,7 @@ require 'refrepo/data_loader'
 
 class OarPropertiesGenerator < WikiGenerator
 
-  def initialize(page_name)
-    super(page_name)
-  end
+  
 
   #Static information about properties that cannot be infered from ref-api data
   @@properties = {
@@ -270,7 +268,7 @@ class OarPropertiesGenerator < WikiGenerator
     props = {}
     oarapi_properties = []
 
-    G5K::SITES.each_with_index{ |site_uid, index|
+    G5K::SITES.each_with_index{ |site_uid, _index|
       props[site_uid] = {}
       props[site_uid]["default"] = get_ref_default_properties(site_uid, refapi["sites"][site_uid])
       props[site_uid]["disk"] = get_ref_disk_properties(site_uid, refapi["sites"][site_uid])
@@ -284,9 +282,9 @@ class OarPropertiesGenerator < WikiGenerator
 
     #Compiled properties used to generate page
     oar_properties = {}
-    props.sort.to_h.each { |site, site_props|
-      site_props.sort.to_h.each { |type, type_props|
-        type_props.sort.to_h.each { |node_uid, node_props|
+    props.sort.to_h.each { |_site, site_props|
+      site_props.sort.to_h.each { |_type, type_props|
+        type_props.sort.to_h.each { |_node_uid, node_props|
           node_props.sort.to_h.each { |property, value|
             next if @@ignored_properties.include?(property)
 
diff --git a/lib/refrepo/gen/wiki/generators/oarsub_simplifier_aliases.rb b/lib/refrepo/gen/wiki/generators/oarsub_simplifier_aliases.rb
index a37eadb2c71b661940b315f5771ee03cb780e6e4..cbc67b37804907d0bf18cd67f5faa419395af258 100644
--- a/lib/refrepo/gen/wiki/generators/oarsub_simplifier_aliases.rb
+++ b/lib/refrepo/gen/wiki/generators/oarsub_simplifier_aliases.rb
@@ -3,9 +3,7 @@
 require 'refrepo/gen/puppet/oarsub-simplifier-aliases'
 
 class OarsubSimplifierAliasesGenerator < WikiGenerator
-  def initialize(page_name)
-    super(page_name)
-  end
+  
 
   def generate_content(options)
     default_aliases = get_sub_simplifier_default_aliases(options)
diff --git a/lib/refrepo/gen/wiki/generators/site_hardware.rb b/lib/refrepo/gen/wiki/generators/site_hardware.rb
index ff248ec7e7f4ace60109833bcefe4186f231711e..6cf5d67d5a5af046e4b36c03d95f725cc2e24ca8 100644
--- a/lib/refrepo/gen/wiki/generators/site_hardware.rb
+++ b/lib/refrepo/gen/wiki/generators/site_hardware.rb
@@ -55,10 +55,10 @@ class SiteHardwareGenerator < WikiGenerator
     pmem = 0
     flops = 0
 
-    sites_hash.sort.to_h.each do |site_uid, site_hash|
+    sites_hash.sort.to_h.each do |_site_uid, site_hash|
       clusters += site_hash['clusters'].length
-      site_hash['clusters'].sort.to_h.each do |cluster_uid, cluster_hash|
-        cluster_hash['nodes'].sort.to_h.each do |node_uid, node_hash|
+      site_hash['clusters'].sort.to_h.each do |_cluster_uid, cluster_hash|
+        cluster_hash['nodes'].sort.to_h.each do |_node_uid, node_hash|
           next if node_hash['status'] == 'retired'
           nodes += 1
           cores += node_hash['architecture']['nb_cores']
@@ -103,20 +103,20 @@ class SiteHardwareGenerator < WikiGenerator
     hardware = get_hardware([site])
 
     site_accelerators = 0
-    hardware[site].sort.to_h.each { |cluster_uid, cluster_hash|
-      site_accelerators += cluster_hash.select { |k, v| v['accelerators'] != '' }.count
+    hardware[site].sort.to_h.each { |_cluster_uid, cluster_hash|
+      site_accelerators += cluster_hash.select { |_k, v| v['accelerators'] != '' }.count
     }
 
     hardware[site].sort.to_h.each { |cluster_uid, cluster_hash|
       cluster_nodes = cluster_hash.keys.flatten.count
-      queue = cluster_hash.map { |k, v| v['queue']}.first
+      queue = cluster_hash.map { |_k, v| v['queue']}.first
       access_conditions = []
       if queue == 'production'
         access_conditions << "<b>[[Grid5000:UsagePolicy#Rules_for_the_production_queue|#{queue}]]</b>&nbsp;queue"
       elsif queue != ''
         access_conditions << "<b>#{queue}</b>&nbsp;queue"
       end
-      access_conditions << '<b>[[Getting_Started#Selecting_specific_resources|exotic]]</b>&nbsp;job&nbsp;type' if cluster_hash.map { |k, v| v['exotic']}.first
+      access_conditions << '<b>[[Getting_Started#Selecting_specific_resources|exotic]]</b>&nbsp;job&nbsp;type' if cluster_hash.map { |_k, v| v['exotic']}.first
       table_columns = (with_sites == true ? ['Site'] : []) + ['Cluster',  'Access Condition', 'Date of arrival', { attributes: 'data-sort-type="number"', text: 'Nodes' }, 'CPU', { attributes: 'data-sort-type="number"', text: 'Cores' }, { attributes: 'data-sort-type="number"', text: 'Memory' }, { attributes: 'data-sort-type="number"', text: 'Storage' }, { attributes: 'data-sort-type="number"', text: 'Network' }] + ((site_accelerators.zero? && with_sites == false) ? [] : ['Accelerators'])
       data = partition(cluster_hash)
       table_data <<  (with_sites == true ? ["[[#{site.capitalize}:Hardware|#{site.capitalize}]]"] : []) + [
@@ -153,13 +153,13 @@ class SiteHardwareGenerator < WikiGenerator
     hardware = get_hardware([site])
 
     site_accelerators = 0
-    hardware[site].sort.to_h.each { |cluster_uid, cluster_hash|
-      site_accelerators += cluster_hash.select { |k, v| v['accelerators'] != '' }.count
+    hardware[site].sort.to_h.each { |_cluster_uid, cluster_hash|
+      site_accelerators += cluster_hash.select { |_k, v| v['accelerators'] != '' }.count
     }
 
     # Group by queue
     # Alphabetic ordering of queue names matches what we want: "default" < "production" < "testing"
-    hardware[site].group_by { |cluster_uid, cluster_hash| cluster_hash.map { |k, v| v['queue']}.first }.sort.each { |queue, clusters|
+    hardware[site].group_by { |_cluster_uid, cluster_hash| cluster_hash.map { |_k, v| v['queue']}.first }.sort.each { |queue, clusters|
       queue = (queue.nil? || queue.empty?) ? 'default' : queue
       queue_drawgantt_url = get_queue_drawgantt_url(site, queue)
       if (queue != 'testing')
@@ -172,10 +172,10 @@ class SiteHardwareGenerator < WikiGenerator
         cluster_nodes = cluster_hash.keys.flatten.count
         cluster_cpus = cluster_hash.map { |k, v| k.count * v['cpus_per_node'] }.reduce(:+)
         cluster_cores = cluster_hash.map { |k, v| k.count * v['cpus_per_node'] * v['cores_per_cpu'] }.reduce(:+)
-        queue_str = cluster_hash.map { |k, v| v['queue_str']}.first
+        queue_str = cluster_hash.map { |_k, v| v['queue_str']}.first
         access_conditions = []
         access_conditions << queue_str if queue_str != ''
-        access_conditions << "exotic job type" if cluster_hash.map { |k, v| v['exotic']}.first
+        access_conditions << "exotic job type" if cluster_hash.map { |_k, v| v['exotic']}.first
         table_columns = ['Cluster',  'Queue', 'Date of arrival', { attributes: 'data-sort-type="number"', text: 'Nodes' }, 'CPU', { attributes: 'data-sort-type="number"', text: 'Cores' }, { attributes: 'data-sort-type="number"', text: 'Memory' }, { attributes: 'data-sort-type="number"', text: 'Storage' }, { attributes: 'data-sort-type="number"', text: 'Network' }] + (site_accelerators.zero? ? [] : ['Accelerators'])
 
         cluster_drawgantt_url = get_queue_drawgantt_url(site, queue)+"?filter=#{cluster_uid}%20only"
@@ -185,7 +185,7 @@ class SiteHardwareGenerator < WikiGenerator
         reservation_cmd = "\n{{Term|location=f#{site}|cmd="
         reservation_cmd += "<code class=\"command\">oarsub</code> "
         reservation_cmd += "<code class=\"replace\">-q #{queue}</code> " if queue != 'default'
-        reservation_cmd += "<code class=\"replace\">-t exotic</code> " if cluster_hash.map { |k, v| v['exotic']}.first
+        reservation_cmd += "<code class=\"replace\">-t exotic</code> " if cluster_hash.map { |_k, v| v['exotic']}.first
         reservation_cmd += "<code class=\"env\">-p #{cluster_uid}</code> "
         reservation_cmd += "<code>-I</code>"
         reservation_cmd += "}}\n"
@@ -267,7 +267,7 @@ end
 def partition(cluster_hash)
   data = {}
   h1 = {}
-  cluster_hash.sort.to_h.each { |num2, h2|
+  cluster_hash.sort.to_h.each { |_num2, h2|
     h2.each_key{ |k|
       h1[k] = []
       cluster_hash.sort.to_h.each { |num3, h3|
@@ -335,7 +335,7 @@ def get_hardware(sites)
 
   # Loop over each cluster of the site
   hardware = {}
-  global_hash['sites'].sort.to_h.select{ |site_uid, site_hash| sites.include?(site_uid) }.each { |site_uid, site_hash|
+  global_hash['sites'].sort.to_h.select{ |site_uid, _site_hash| sites.include?(site_uid) }.each { |site_uid, site_hash|
     hardware[site_uid] = {}
     site_hash['clusters'].sort.to_h.each { |cluster_uid, cluster_hash|
       hardware[site_uid][cluster_uid] = {}
diff --git a/lib/refrepo/gen/wiki/generators/status.rb b/lib/refrepo/gen/wiki/generators/status.rb
index 5ae0d4c3c0252ebb77befa542a1cf56580c0aae5..ee321062e6ea0994f09a6731053bc14e8d3355ae 100644
--- a/lib/refrepo/gen/wiki/generators/status.rb
+++ b/lib/refrepo/gen/wiki/generators/status.rb
@@ -2,9 +2,7 @@
 
 class StatusGenerator < WikiGenerator
 
-  def initialize(page_name)
-    super(page_name)
-  end
+  
 
   def generate_content(_options)
     @global_hash = get_global_hash
diff --git a/lib/refrepo/hash/hash.rb b/lib/refrepo/hash/hash.rb
index aa7f0e8ccf79e88f36ff16bd0fd64aabb3825bb6..9c6c71acffe37e572dc3e01316ef66abcd6226b8 100644
--- a/lib/refrepo/hash/hash.rb
+++ b/lib/refrepo/hash/hash.rb
@@ -61,7 +61,7 @@ class ::Hash
   # a.deep_merge(b) -> {:key=>"value_b"}
   # b.deep_merge(a) -> {:key=>"value_a"}
   def deep_merge(other_hash)
-    merger = proc { |key, v1, v2| Hash === v1 && Hash === v2 ? v1.merge(v2, &merger) : v2 }
+    merger = proc { |_key, v1, v2| Hash === v1 && Hash === v2 ? v1.merge(v2, &merger) : v2 }
     self.merge(other_hash, &merger)
   end
 
diff --git a/lib/refrepo/input_loader.rb b/lib/refrepo/input_loader.rb
index 360737ade08156f45b0e2fb08382798e1d86dffe..b83ed326feeb7aed5e78956bfe925fe6b49496fb 100644
--- a/lib/refrepo/input_loader.rb
+++ b/lib/refrepo/input_loader.rb
@@ -26,7 +26,7 @@ def load_yaml_file_hierarchy(directory = File.expand_path("../../input/grid5000/
           file_hash = YAML::load_file(filename)
         end
       if not file_hash
-        raise Exception.new("loaded hash is empty")
+        raise StandardError.new("loaded hash is empty")
       end
       # YAML::Psych raises an exception if the file cannot be loaded.
       rescue StandardError => e
@@ -103,7 +103,7 @@ def add_node_pdu_mapping(h)
 
       # Get pdu information from node description in clusters/ hierachy
       pdu_attached_nodes = {}
-      site.fetch("clusters", []).sort.each do |cluster_uid, cluster|
+      site.fetch("clusters", []).sort.each do |_cluster_uid, cluster|
         cluster["nodes"].each do |node_uid, node|# _sort_by_node_uid
           next if node['status'] == "retired"
           node.fetch('pdu', []).each do |node_pdu|
@@ -145,7 +145,7 @@ def add_wattmetre_mapping(h)
       if pdu_uid.include?("wattmetrev3") # TODO: better way of identifying a V3 wattmetre
         wattmetre_modules = {}
         # Look for other PDUs where this wattmetre is used
-        site.fetch("pdus", []).each do |other_pdu_uid, other_pdu|
+        site.fetch("pdus", []).each do |_other_pdu_uid, other_pdu|
           other_pdu.fetch("ports", {}).each do |other_port_uid, other_port_data|
             next if not other_port_data.is_a?(Hash)
             next if other_port_data["wattmetre"] != pdu_uid
@@ -273,12 +273,12 @@ def add_default_values_and_mappings(h)
         end
 
         # Ensure that by_id is present (bug 11043)
-        node["storage_devices"].each do |key, hash|
+        node["storage_devices"].each do |_key, hash|
           hash['by_id'] = '' if not hash['by_id']
         end
 
         # Type conversion
-        node["network_adapters"].each { |key, hash| hash["rate"] = hash["rate"].to_i if hash["rate"].is_a?(Float) }
+        node["network_adapters"].each { |_key, hash| hash["rate"] = hash["rate"].to_i if hash["rate"].is_a?(Float) }
 
         # For each network adapters, populate "network_address", "switch" and "switch_port" from the network equipment description
         node["network_adapters"].each_pair { |device, network_adapter|
@@ -373,9 +373,9 @@ def net_switch_port_lookup(site, node_uid, interface='')
 end
 
 def add_switch_port(h)
-  h['sites'].each_pair do |site_uid, site|
+  h['sites'].each_pair do |_site_uid, site|
     used_ports = {}
-    site['clusters'].each_pair do |cluster_uid, hc|
+    site['clusters'].each_pair do |_cluster_uid, hc|
       hc['nodes'].each_pair do |node_uid, hn|
         next if hn['status'] == 'retired'
         hn['network_adapters'].each_pair do |iface_uid, iface|
@@ -394,8 +394,8 @@ def add_switch_port(h)
 end
 
 def detect_dead_nodes_with_yaml_files(h)
-  h['sites'].each_pair do |site_uid, hs|
-    hs['clusters'].each_pair do |cluster_uid, hc|
+  h['sites'].each_pair do |_site_uid, hs|
+    hs['clusters'].each_pair do |_cluster_uid, hc|
       hc['nodes'].each_pair do |node_uid, hn|
         if hn['status'] == 'retired'
           if (hn['processor']['model'] rescue nil)
@@ -413,7 +413,7 @@ def add_kavlan_ips(h)
   vlan_offset = h['vlans']['offsets'].split("\n").map { |l| l = l.split(/\s+/) ; [ l[0..3], l[4..-1].inject(0) { |a, b| (a << 8) + b.to_i } ] }.to_h
   h['sites'].each_pair do |site_uid, hs|
     # forget about allocated ips for local vlans, since we are starting a new site
-    allocated.delete_if { |k, v| v[3] == 'local' }
+    allocated.delete_if { |_k, v| v[3] == 'local' }
     hs['clusters'].each_pair do |cluster_uid, hc|
       next if !hc['kavlan'] # skip clusters where kavlan is globally set to false (used for initial cluster installation)
       hc['nodes'].each_pair do |node_uid, hn|
@@ -472,9 +472,9 @@ end
 
 def add_ipv6(h)
   # for each node
-  h['sites'].each_pair do |site_uid, hs|
+  h['sites'].each_pair do |_site_uid, hs|
     site_prefix = IPAddress hs['ipv6']['prefix']
-    hs['clusters'].each_pair do |cluster_uid, hc|
+    hs['clusters'].each_pair do |_cluster_uid, hc|
       hc['nodes'].each_pair do |node_uid, hn|
         ipv6_adapters = hn['network_adapters'].select { |_k,v| v['mountable'] and v['interface'] == 'Ethernet' }
         if ipv6_adapters.length > 0
@@ -578,9 +578,9 @@ end
 
 def add_software(h)
   # for each node
-  h['sites'].each_pair do |site_uid, hs|
-    hs['clusters'].each_pair do |cluster_uid, hc|
-      hc['nodes'].each_pair do |node_uid, hn|
+  h['sites'].each_pair do |_site_uid, hs|
+    hs['clusters'].each_pair do |_cluster_uid, hc|
+      hc['nodes'].each_pair do |_node_uid, hn|
         if not hn.key?('software')
           hn['software'] = {}
         end
@@ -593,14 +593,14 @@ end
 
 def add_network_metrics(h)
   # for each cluster
-  h['sites'].each_pair do |site_uid, site|
-    site['clusters'].each_pair do |cluster_uid, cluster|
+  h['sites'].each_pair do |_site_uid, site|
+    site['clusters'].each_pair do |_cluster_uid, cluster|
 
       # remove any network metrics defined in cluster
       cluster['metrics'] = cluster.fetch('metrics', []).reject {|m| m['name'] =~ /network_iface.*/}
 
       # for each interface of a cluster's node
-      _, node = cluster['nodes'].select { |k, v| v['status'] != 'retired' }.sort_by{ |k, v| k }.first
+      _, node = cluster['nodes'].select { |_k, v| v['status'] != 'retired' }.sort_by{ |k, _v| k }.first
       node["network_adapters"].each do |iface_uid, iface|
 
         # we only support metrics for Ethernet at this point
@@ -624,8 +624,8 @@ end
 
 def add_pdu_metrics(h)
   # for each cluster
-  h['sites'].each_pair do |site_uid, site|
-    site['clusters'].each_pair do |cluster_uid, cluster|
+  h['sites'].each_pair do |_site_uid, site|
+    site['clusters'].each_pair do |_cluster_uid, cluster|
 
       # remove any PDU metrics defined in cluster
       cluster['metrics'] = cluster.fetch('metrics', []).reject {|m| m['name'] =~ /(wattmetre_power_watt|pdu_outlet_power_watt)/ }
@@ -637,7 +637,7 @@ def add_pdu_metrics(h)
       if not cluster_wm.empty? \
       and cluster_wm.all?{|pdu| site['pdus'][pdu].fetch('metrics', []).any?{|m| m['name'] == 'wattmetre_power_watt'}}
 
-        metric = site['pdus'][cluster_wm.first].fetch('metrics', []).each{|m| m['name'] == 'wattmetre_power_watt'}.first
+        metric = site['pdus'][cluster_wm.first].fetch('metrics', []).select{|m| m['name'] == 'wattmetre_power_watt'}.first
         new_metric = metric.merge({'description' => "Power consumption of node reported by wattmetre, in watt"})
         cluster['metrics'].insert(0, new_metric)
       end
@@ -649,7 +649,7 @@ def add_pdu_metrics(h)
       if not cluster_pdus.empty? \
       and cluster_pdus.all?{|pdu| site['pdus'][pdu].fetch('metrics', []).any?{|m| m['name'] == 'wattmetre_power_watt'}}
 
-        metric = site['pdus'][cluster_pdus.first].fetch('metrics', []).each{|m| m['name'] == 'wattmetre_power_watt'}.first
+        metric = site['pdus'][cluster_pdus.first].fetch('metrics', []).select{|m| m['name'] == 'wattmetre_power_watt'}.first
         new_metric = metric.merge({'description' => "Power consumption of node reported by wattmetre, in watt"})
         cluster['metrics'].insert(0, new_metric)
       end
@@ -660,7 +660,7 @@ def add_pdu_metrics(h)
 
         # Metric is available for node only if a single PDU powers it
         if not cluster['nodes'].each_value.any?{|node| node.fetch('pdu', []).select{|p| not p.has_key?('kind') or p.fetch('kind', '') != 'wattmetre-only'}.map{|p| p['uid']}.uniq.length > 1}
-          metric = site['pdus'][cluster_pdus.first].fetch('metrics', []).each{|m| m['name'] == 'pdu_outlet_power_watt'}.first
+          metric = site['pdus'][cluster_pdus.first].fetch('metrics', []).select{|m| m['name'] == 'pdu_outlet_power_watt'}.first
           new_metric = metric.merge({'description' => "Power consumption of node reported by PDU, in watt"})
           new_metric['source'] = {"protocol" => "pdu"}
           cluster['metrics'].insert(0, new_metric)
@@ -699,9 +699,9 @@ def get_flops_per_cycle(microarch, cpu_name)
 end
 
 def add_theorical_flops(h)
-  h['sites'].each_pair do |site_uid, site|
-    site['clusters'].each_pair do |cluster_uid, cluster|
-      cluster['nodes'].select { |k, v| v['status'] != 'retired' }.each_pair do |node_uid, node|
+  h['sites'].each_pair do |_site_uid, site|
+    site['clusters'].each_pair do |_cluster_uid, cluster|
+      cluster['nodes'].select { |_k, v| v['status'] != 'retired' }.each_pair do |_node_uid, node|
         node['performance'] = {}
         node['performance']['core_flops'] =  node['processor']['clock_speed'].to_i * get_flops_per_cycle(node['processor']['microarchitecture'], node['processor']['other_description'])
         node['performance']['node_flops'] = node['architecture']['nb_cores'].to_i * node['performance']['core_flops'].to_i
@@ -711,9 +711,9 @@ def add_theorical_flops(h)
 end
 
 def add_management_tools(h)
-  h['sites'].each_pair do |site_uid, site|
-    site['clusters'].each_pair do |cluster_uid, cluster|
-      cluster['nodes'].select { |k, v| v['status'] != 'retired' }.each_pair do |node_uid, node|
+  h['sites'].each_pair do |_site_uid, site|
+    site['clusters'].each_pair do |_cluster_uid, cluster|
+      cluster['nodes'].select { |_k, v| v['status'] != 'retired' }.each_pair do |_node_uid, node|
         node['management_tools'] = h['management_tools'] if !node.has_key?('management_tools')
         h['management_tools'].each_key do |k|
           node['management_tools'][k] = h['management_tools'][k] if !node['management_tools'].has_key?(k)
@@ -725,7 +725,7 @@ end
 
 def add_site_ipv6_infos(h)
   global_prefix = IPAddress h['ipv6']['prefix']
-  h['sites'].each_pair do |site_uid, hs|
+  h['sites'].each_pair do |site_uid, _hs|
     site_prefix = IPAddress global_prefix.to_string
     # Site index is third group of nibbles, but on the MSB side
     site_prefix[3] = h['ipv6']['site_indexes'][site_uid] << 8
@@ -738,9 +738,9 @@ def add_site_ipv6_infos(h)
 end
 
 def add_compute_capability(h)
-  h['sites'].each_pair do |site_uid, site|
-    site['clusters'].each_pair do |cluster_uid, cluster|
-      cluster['nodes'].select { |k, v| v['status'] != 'retired' }.each_pair do |node_uid, node|
+  h['sites'].each_pair do |_site_uid, site|
+    site['clusters'].each_pair do |_cluster_uid, cluster|
+      cluster['nodes'].select { |_k, v| v['status'] != 'retired' }.each_pair do |_node_uid, node|
         if node['gpu_devices']
           node['gpu_devices'].select { |_, v| v['vendor'] == 'Nvidia' }.each do |_, v|
             v['compute_capability'] = GPURef.get_compute_capability(v['model'])
@@ -800,7 +800,7 @@ def complete_network_equipments(h)
     complete_one_network_equipment(network_uid, network)
   end
 
-  h['sites'].each do |site_uid, site|
+  h['sites'].each do |_site_uid, site|
     site.fetch('networks', []).each do |network_uid, network|
       complete_one_network_equipment(network_uid, network)
     end
diff --git a/lib/refrepo/utils.rb b/lib/refrepo/utils.rb
index aca21c31cca7aab497766788f111edbebdfc190c..ebd8e571c073170369fcb19a76fe1a09d57d368e 100644
--- a/lib/refrepo/utils.rb
+++ b/lib/refrepo/utils.rb
@@ -27,7 +27,7 @@ end
 # Various monkey patches
 class Hash
   def slice(*extract)
-    h2 = self.select{|key, value| extract.include?(key) }
+    h2 = self.select{|key, _value| extract.include?(key) }
     h2
   end
 end
diff --git a/lib/refrepo/valid/input/duplicates.rb b/lib/refrepo/valid/input/duplicates.rb
index 48f2763dc27bf91758e5cb248259d6c956a95e9b..f2d45523569c4a46cc4ffc45b89a2f40e78442e9 100644
--- a/lib/refrepo/valid/input/duplicates.rb
+++ b/lib/refrepo/valid/input/duplicates.rb
@@ -55,14 +55,14 @@ def yaml_input_find_duplicates(options)
   end
 
 # remove entries that are not duplicate
-  refapi_hash.deep_reject! {|k, v| !(
+  refapi_hash.deep_reject! {|_k, v| !(
   (v.is_a?(Hash) && !v.empty?) ||
       v.is_a?(String) && v.start_with?('!duplicated:')
   )}
 
 # remove ip, mac and mounted properties (as it can be use to bootstrap the installation of a cluster)
 #  refapi_hash.deep_reject! {|k, v| k == 'ip' || k == 'mac' || k == 'mounted'}
-  refapi_hash.deep_reject! {|k, v| v == {}}
+  refapi_hash.deep_reject! {|_k, v| v == {}}
 
   if refapi_hash.empty?
     puts "OK: no duplicate entries."
diff --git a/lib/refrepo/valid/input/lib/array_validator.rb b/lib/refrepo/valid/input/lib/array_validator.rb
index de9ddf3c3b064ddcc465f88514119a48bfe68298..2c75bbf3e5f9375a041c1ef176bdf729a392b75a 100644
--- a/lib/refrepo/valid/input/lib/array_validator.rb
+++ b/lib/refrepo/valid/input/lib/array_validator.rb
@@ -44,7 +44,7 @@ class HashValidator::Validator::ArrayValidator < HashValidator::Validator::Base
     end
 
     # Cleanup errors (remove any empty nested errors)
-    errors.delete_if { |k,v| v.empty? }
+    errors.delete_if { |_k,v| v.empty? }
   end
 end
 
@@ -74,7 +74,7 @@ class HashValidator::Validator::NestedArrayValidator < HashValidator::Validator:
     end
 
     # Cleanup errors (remove any empty nested errors)
-    errors.delete_if { |k,v| v.empty? }
+    errors.delete_if { |_k,v| v.empty? }
   end
 end
 
diff --git a/lib/refrepo/valid/input/lib/custom_validators.rb b/lib/refrepo/valid/input/lib/custom_validators.rb
index 4ee0aae4d6a76a4c9329e6839d486fcc9e8b061a..9c9e79ed6ace4382c02d4c30843578275e38bb13 100644
--- a/lib/refrepo/valid/input/lib/custom_validators.rb
+++ b/lib/refrepo/valid/input/lib/custom_validators.rb
@@ -10,7 +10,7 @@ class HashValidator::Validator::LinecardPortValidator < HashValidator::Validator
 
   def validate(key, values, _validations, errors)
     if values.is_a?(Hash)
-      values.each do |k, v|
+      values.each do |k, _v|
         if @port_properties.index(k) == nil
           errors[key] = "unexpected key '#{k}'."
         end
diff --git a/lib/refrepo/valid/input/lib/multihash_validator.rb b/lib/refrepo/valid/input/lib/multihash_validator.rb
index dcbb0615a60facf2eec07fd7b9509d7fab426e9f..b563e676d4b99caa9cfe0cd0014cc2fcd0ba18bb 100644
--- a/lib/refrepo/valid/input/lib/multihash_validator.rb
+++ b/lib/refrepo/valid/input/lib/multihash_validator.rb
@@ -36,7 +36,7 @@ class HashValidator::Validator::MultiHashValidator < HashValidator::Validator::B
     end
 
     # Cleanup errors (remove any empty nested errors)
-    errors.delete_if { |k,v| v.empty? }
+    errors.delete_if { |_k,v| v.empty? }
   end
 end
 
diff --git a/lib/refrepo/valid/input/schema.rb b/lib/refrepo/valid/input/schema.rb
index e1d504ec3a9909a7c16e742daebe86dfda7553a2..0be09bf5bc523eb0457dcdfbf780f581ca41ddd3 100644
--- a/lib/refrepo/valid/input/schema.rb
+++ b/lib/refrepo/valid/input/schema.rb
@@ -29,7 +29,7 @@ def yaml_input_schema_validator(options)
 
     r &= run_validator(site_uid, site, schema_site) #
 
-    site['networks'].each do |network_equipment_uid, network_equipment|
+    site['networks'].each do |_network_equipment_uid, network_equipment|
       r &= run_validator(site_uid, network_equipment, schema_network_equipments)
     end
 
diff --git a/lib/refrepo/valid/network.rb b/lib/refrepo/valid/network.rb
index a5a0cf7a2ef88cca73986773b54e92f5af319649..1926a285f8a855a8fe3d8ad1c45160f1a3c08506 100644
--- a/lib/refrepo/valid/network.rb
+++ b/lib/refrepo/valid/network.rb
@@ -103,8 +103,8 @@ def check_network_description(options)
         #puts "This is an HPC switch. ERRORs will be non-fatal."
         oldok = ok
       end
-      eq['linecards'].each_with_index do |lc, lc_i|
-        (lc['ports'] || []).each_with_index do |port, port_i|
+      eq['linecards'].each_with_index do |lc, _lc_i|
+        (lc['ports'] || []).each_with_index do |port, _port_i|
           # skip if empty port
           next if port == {}
 
@@ -370,7 +370,7 @@ def generate_dot(netnodes, links, site)
       end
     else
       # only one interface
-      l[0][1].each_pair do |iface, target|
+      l[0][1].each_pair do |_iface, target|
         r = "#{target['rate'] / 10**9}G"
         content << "\"#{target['switch']}\" -- \"#{l[1]}\" [label=\"#{r}\",len=2.0];"
       end
diff --git a/scripts/edit-input-files.rb b/scripts/edit-input-files.rb
index 3ec860f5fca517e6bc06b11d5a10b22781315e21..a567b1568e6a8ffc96c2104fb5bf8139cf09b0b8 100755
--- a/scripts/edit-input-files.rb
+++ b/scripts/edit-input-files.rb
@@ -41,7 +41,7 @@ OptionParser.new do |opts|
     options[:clusters] = s
   end
 
-  opts.on('--clean', 'Remove content from output/ after execution') do |s|
+  opts.on('--clean', 'Remove content from output/ after execution') do |_s|
     options[:rm_output] = true
   end
 
diff --git a/scripts/normalize_input.rb b/scripts/normalize_input.rb
index b2a7fb6212dc64a329603a52e56909e2aa76d5e5..6d11f4b8ffdbad190e85da869d52950772f3c36a 100755
--- a/scripts/normalize_input.rb
+++ b/scripts/normalize_input.rb
@@ -10,7 +10,7 @@ Dir['input/grid5000/sites/*/clusters/*/nodes/*.yaml'].each do |f|
   #d.values.first['bios'].delete('configuration')
 
   # remove rate when it's 0
-  d.values.first['network_adapters'].each_pair do |name, na|
+  d.values.first['network_adapters'].each_pair do |_name, na|
     next if not na['rate']
     next if na['rate'] != 0
     na.delete('rate')
diff --git a/spec/spec_helper.rb b/spec/spec_helper.rb
index f78eccf6d2ab6bbebf3493e5fb0c7f31cf7aec94..eef6fc8742f0cd4d21cd6a7bc1e2e41dd0c4d260 100644
--- a/spec/spec_helper.rb
+++ b/spec/spec_helper.rb
@@ -74,9 +74,9 @@ end
 
 def gen_stub(file, site, cluster, node_count = 9999)
   data = load_data_hierarchy
-  data['sites'].delete_if { |k, v| site != k }
-  data['sites'].each_pair do |site_uid, s|
-    s['clusters'].delete_if { |k, v| cluster != k }
+  data['sites'].delete_if { |k, _v| site != k }
+  data['sites'].each_pair do |_site_uid, s|
+    s['clusters'].delete_if { |k, _v| cluster != k }
   end
   data.delete('network_equipments')
   data['sites']['fakesite'] = data['sites'][site]