Commit adb4f428 authored by Florent Didier's avatar Florent Didier
Browse files

[dev] wiki_generators: Sort every each except when used for display (mw_utils.rb)

parent 354a258f
...@@ -15,8 +15,8 @@ class CPUParametersGenerator < WikiGenerator ...@@ -15,8 +15,8 @@ class CPUParametersGenerator < WikiGenerator
global_hash = load_yaml_file_hierarchy(File.expand_path("../../input/grid5000/", File.dirname(__FILE__))) global_hash = load_yaml_file_hierarchy(File.expand_path("../../input/grid5000/", File.dirname(__FILE__)))
# Loop over Grid'5000 sites # Loop over Grid'5000 sites
global_hash["sites"].each { |site_uid, site_hash| global_hash["sites"].sort.to_h.each { |site_uid, site_hash|
site_hash.fetch("clusters").each { |cluster_uid, cluster_hash| site_hash.fetch("clusters").sort.to_h.each { |cluster_uid, cluster_hash|
node_hash = cluster_hash.fetch('nodes').first[1] node_hash = cluster_hash.fetch('nodes').first[1]
......
...@@ -15,17 +15,17 @@ class DiskReservationGenerator < WikiGenerator ...@@ -15,17 +15,17 @@ class DiskReservationGenerator < WikiGenerator
global_hash = load_yaml_file_hierarchy(File.expand_path("../../input/grid5000/", File.dirname(__FILE__))) global_hash = load_yaml_file_hierarchy(File.expand_path("../../input/grid5000/", File.dirname(__FILE__)))
# Loop over Grid'5000 sites # Loop over Grid'5000 sites
global_hash["sites"].each { |site_uid, site_hash| global_hash["sites"].sort.to_h.each { |site_uid, site_hash|
site_hash.fetch("clusters").each { |cluster_uid, cluster_hash| site_hash.fetch("clusters").sort.to_h.each { |cluster_uid, cluster_hash|
disk_info = {} disk_info = {}
cluster_hash.fetch('nodes').sort.each { |node_uid, node_hash| cluster_hash.fetch('nodes').sort.to_h.each { |node_uid, node_hash|
next if node_hash['status'] == 'retired' next if node_hash['status'] == 'retired'
reservable_disks = node_hash['storage_devices'].select{ |k, v| v['reservation'] == true }.count reservable_disks = node_hash['storage_devices'].select{ |k, v| v['reservation'] == true }.count
add(disk_info, node_uid, reservable_disks) add(disk_info, node_uid, reservable_disks)
} }
# One line for each group of nodes with the same number of reservable disks # One line for each group of nodes with the same number of reservable disks
disk_info.each { |num, reservable_disks| disk_info.sort.to_h.each { |num, reservable_disks|
table_data << [ table_data << [
"[[#{site_uid.capitalize}:Hardware|#{site_uid.capitalize}]]", "[[#{site_uid.capitalize}:Hardware|#{site_uid.capitalize}]]",
"[https://public-api.grid5000.fr/stable/sites/#{site_uid}/clusters/#{cluster_uid}/nodes.json?pretty=1 #{cluster_uid}" + (disk_info.size== 1 ? '' : '-' + G5K.nodeset(num)) + "]", "[https://public-api.grid5000.fr/stable/sites/#{site_uid}/clusters/#{cluster_uid}/nodes.json?pretty=1 #{cluster_uid}" + (disk_info.size== 1 ? '' : '-' + G5K.nodeset(num)) + "]",
......
...@@ -36,9 +36,9 @@ class G5KHardwareGenerator < WikiGenerator ...@@ -36,9 +36,9 @@ class G5KHardwareGenerator < WikiGenerator
'node_models' => {} 'node_models' => {}
} }
@global_hash['sites'].sort.each { |site_uid, site_hash| @global_hash['sites'].sort.to_h.each { |site_uid, site_hash|
site_hash['clusters'].sort.each { |cluster_uid, cluster_hash| site_hash['clusters'].sort.to_h.each { |cluster_uid, cluster_hash|
cluster_hash['nodes'].sort.each { |node_uid, node_hash| cluster_hash['nodes'].sort.to_h.each { |node_uid, node_hash|
next if node_hash['status'] == 'retired' next if node_hash['status'] == 'retired'
@node = node_uid @node = node_uid
...@@ -74,7 +74,7 @@ class G5KHardwareGenerator < WikiGenerator ...@@ -74,7 +74,7 @@ class G5KHardwareGenerator < WikiGenerator
.uniq .uniq
net_interconnects = interfaces.inject(Hash.new(0)){ |h, v| h[v] += 1; h } net_interconnects = interfaces.inject(Hash.new(0)){ |h, v| h[v] += 1; h }
net_interconnects.each { |k, v| net_interconnects.sort_by { |k, v| k.first[:sort] }.each { |k, v|
init(data, 'net_interconnects', k) init(data, 'net_interconnects', k)
data['net_interconnects'][k][site_uid] += v data['net_interconnects'][k][site_uid] += v
} }
...@@ -87,7 +87,7 @@ class G5KHardwareGenerator < WikiGenerator ...@@ -87,7 +87,7 @@ class G5KHardwareGenerator < WikiGenerator
gpu_families[[g['gpu_vendor']]] = g['gpu_count'] if g and g['gpu'] gpu_families[[g['gpu_vendor']]] = g['gpu_count'] if g and g['gpu']
mic_families = {} mic_families = {}
mic_families[[m['mic_vendor']]] = m['mic_count'] if m and m['mic'] mic_families[[m['mic_vendor']]] = m['mic_count'] if m and m['mic']
gpu_families.merge(mic_families).each { |k, v| gpu_families.merge(mic_families).sort.to_h.each { |k, v|
init(data, 'acc_families', k) init(data, 'acc_families', k)
data['acc_families'][k][site_uid] += v data['acc_families'][k][site_uid] += v
} }
...@@ -97,7 +97,7 @@ class G5KHardwareGenerator < WikiGenerator ...@@ -97,7 +97,7 @@ class G5KHardwareGenerator < WikiGenerator
mic_details = {} mic_details = {}
mic_details[["#{m['mic_vendor']} #{m['mic_model']}"]] = [m['mic_count'], m['mic_cores']] if m and m['mic'] mic_details[["#{m['mic_vendor']} #{m['mic_model']}"]] = [m['mic_count'], m['mic_cores']] if m and m['mic']
gpu_details.merge(mic_details).each { |k, v| gpu_details.merge(mic_details).sort.to_h.each { |k, v|
init(data, 'acc_models', k) init(data, 'acc_models', k)
data['acc_models'][k][site_uid] += v[0] data['acc_models'][k][site_uid] += v[0]
...@@ -211,10 +211,10 @@ class G5KHardwareGenerator < WikiGenerator ...@@ -211,10 +211,10 @@ class G5KHardwareGenerator < WikiGenerator
def generate_interfaces def generate_interfaces
table_data = [] table_data = []
@global_hash["sites"].each { |site_uid, site_hash| @global_hash["sites"].sort.to_h.each { |site_uid, site_hash|
site_hash.fetch("clusters").each { |cluster_uid, cluster_hash| site_hash.fetch("clusters").sort.to_h.each { |cluster_uid, cluster_hash|
network_interfaces = {} network_interfaces = {}
cluster_hash.fetch('nodes').sort.each { |node_uid, node_hash| cluster_hash.fetch('nodes').sort.to_h.each { |node_uid, node_hash|
next if node_hash['status'] == 'retired' next if node_hash['status'] == 'retired'
if node_hash['network_adapters'] if node_hash['network_adapters']
node_interfaces = node_hash['network_adapters'].select{ |k, v| v['interface'] == 'Ethernet' and v['enabled'] == true and (v['mounted'] == true or v['mountable'] == true) and v['management'] == false } node_interfaces = node_hash['network_adapters'].select{ |k, v| v['interface'] == 'Ethernet' and v['enabled'] == true and (v['mounted'] == true or v['mountable'] == true) and v['management'] == false }
......
...@@ -145,8 +145,8 @@ class OarPropertiesGenerator < WikiGenerator ...@@ -145,8 +145,8 @@ class OarPropertiesGenerator < WikiGenerator
def get_nodes_properties(site_uid, site) def get_nodes_properties(site_uid, site)
properties = {} properties = {}
site['clusters'].each do |cluster_uid, cluster| site['clusters'].sort.to_h.each do |cluster_uid, cluster|
cluster['nodes'].each do |node_uid, node| cluster['nodes'].sort.to_h.each do |node_uid, node|
begin begin
properties[node_uid] = get_ref_node_properties_internal(cluster_uid, cluster, node_uid, node) properties[node_uid] = get_ref_node_properties_internal(cluster_uid, cluster, node_uid, node)
rescue MissingProperty => e rescue MissingProperty => e
...@@ -182,9 +182,9 @@ class OarPropertiesGenerator < WikiGenerator ...@@ -182,9 +182,9 @@ class OarPropertiesGenerator < WikiGenerator
#Compiled properties used to generate page #Compiled properties used to generate page
oar_properties = {} oar_properties = {}
props.each { |site, site_props| props.sort.to_h.each { |site, site_props|
site_props.each { |node_uid, node_props| site_props.sort.to_h.each { |node_uid, node_props|
node_props.each { |property, value| node_props.sort.to_h.each { |property, value|
next if @@ignored_properties.include?(property) next if @@ignored_properties.include?(property)
oar_properties[property] ||= {} oar_properties[property] ||= {}
...@@ -198,7 +198,7 @@ class OarPropertiesGenerator < WikiGenerator ...@@ -198,7 +198,7 @@ class OarPropertiesGenerator < WikiGenerator
} }
} }
oar_properties.each { |prop, prop_hash| oar_properties.sort.to_h.each { |prop, prop_hash|
if (prop_hash["values"].length > 20) if (prop_hash["values"].length > 20)
#Limit possible values to 20 elements and mark the list as truncated #Limit possible values to 20 elements and mark the list as truncated
prop_hash["values"].slice!(0...-20) prop_hash["values"].slice!(0...-20)
...@@ -210,7 +210,7 @@ class OarPropertiesGenerator < WikiGenerator ...@@ -210,7 +210,7 @@ class OarPropertiesGenerator < WikiGenerator
@generated_content = "Properties on resources managed by OAR allow users to select them according to their experiment's characteristics." + MW::LINE_FEED @generated_content = "Properties on resources managed by OAR allow users to select them according to their experiment's characteristics." + MW::LINE_FEED
@generated_content += MW::heading("OAR Properties", 1) + MW::LINE_FEED @generated_content += MW::heading("OAR Properties", 1) + MW::LINE_FEED
@@categories.each { |cat, cat_properties| @@categories.sort.to_h.each { |cat, cat_properties|
@generated_content += MW::heading(cat, 2) + MW::LINE_FEED @generated_content += MW::heading(cat, 2) + MW::LINE_FEED
cat_properties.each{ |property| cat_properties.each{ |property|
values = oar_properties[property]["values"] rescue [] values = oar_properties[property]["values"] rescue []
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment