Mentions légales du service

Skip to content
Snippets Groups Projects

Bug12144 positionnement des propriétés sur les ressources disk comme sur les ressources default

Merged LOUP David requested to merge bug12144 into master
1 unresolved thread
Files
9
@@ -369,7 +369,7 @@ def get_ref_disk_properties(site_uid, site)
@@ -369,7 +369,7 @@ def get_ref_disk_properties(site_uid, site)
site['clusters'].each do |cluster_uid, cluster|
site['clusters'].each do |cluster_uid, cluster|
cluster['nodes'].each do |node_uid, node|
cluster['nodes'].each do |node_uid, node|
begin
begin
properties.merge!(get_ref_disk_properties_internal(site_uid, cluster_uid, node_uid, node))
properties.merge!(get_ref_disk_properties_internal(site_uid, cluster_uid, cluster, node_uid, node))
rescue MissingProperty => e
rescue MissingProperty => e
puts "Error while processing node #{node_uid}: #{e}"
puts "Error while processing node #{node_uid}: #{e}"
end
end
@@ -576,24 +576,28 @@ end
@@ -576,24 +576,28 @@ end
# "diskpath"=>"/dev/disk/by-path/pci-0000:02:00.0-scsi-0:0:1:0",
# "diskpath"=>"/dev/disk/by-path/pci-0000:02:00.0-scsi-0:0:1:0",
# "cpuset"=>-1},
# "cpuset"=>-1},
# ["grimoire-1", "sdc.grimoire-1"]=> ...
# ["grimoire-1", "sdc.grimoire-1"]=> ...
def get_ref_disk_properties_internal(site_uid, cluster_uid, node_uid, node)
def get_ref_disk_properties_internal(site_uid, cluster_uid, cluster, node_uid, node)
properties = {}
properties = {}
node['storage_devices'].each_with_index do |device, index|
node['storage_devices'].each_with_index do |device, index|
disk = [device['device'], node_uid].join('.')
disk = [device['device'], node_uid].join('.')
if index > 0 && device['reservation'] # index > 0 is used to exclude sda
if index > 0 && device['reservation'] # index > 0 is used to exclude sda
key = [node_uid, disk]
key = [node_uid, disk]
h = {}
# Start by copying the properties of the resource of type default,
node_address = [node_uid, site_uid, 'grid5000.fr'].join('.')
# because to reserve both the resources of type disk and of type default
h['cluster'] = cluster_uid
# for a same host, almost all properties must have the same value.
h['host'] = node_address
# In particular, max_walltime must be identic, but mind that that
 
# max_walltime properties is not related to the max duration of a job
 
# containing only disk resources (14days), it is not used for that.
 
h = get_ref_node_properties_internal(cluster_uid, cluster, node_uid, node)
 
# We set the host property because it's not done in what we got above
 
h['host'] = [node_uid, site_uid, 'grid5000.fr'].join('.')
 
# We do not set network_address and available_upto for disk resources
 
# (FIXME: recall why)
h['network_address'] = ''
h['network_address'] = ''
h['available_upto'] = 0
h['available_upto'] = 0
h['deploy'] = 'YES'
# We set the disk specific property values
h['production'] = get_production_property(node)
h['maintenance'] = get_maintenance_property(node)
h['disk'] = disk
h['disk'] = disk
h['diskpath'] = device['by_path']
h['diskpath'] = device['by_path']
h['exotic'] = node['exotic'] ? 'YES' : 'NO'
h['cpuset'] = -1
h['cpuset'] = -1
properties[key] = h
properties[key] = h
end
end
@@ -685,13 +689,12 @@ def diff_properties(type, properties_oar, properties_ref)
@@ -685,13 +689,12 @@ def diff_properties(type, properties_oar, properties_ref)
if type == 'default'
if type == 'default'
ignore_keys = ignore_keys()
ignore_keys = ignore_keys()
ignore_keys.each { |key| properties_oar.delete(key) }
ignore_keys.each { |key| properties_ref.delete(key) }
elsif type == 'disk'
elsif type == 'disk'
check_keys = %w(cluster host network_address available_upto deploy production maintenance disk diskpath cpuset)
check_keys = %w(cluster host network_address available_upto deploy production maintenance disk diskpath cpuset)
properties_oar.select! { |k, _v| check_keys.include?(k) }
ignore_keys = ignore_keys() - check_keys #Some key must be ignored for default but not for disks, ex: available_upto
properties_ref.select! { |k, _v| check_keys.include?(k) }
end
end
 
ignore_keys.each { |key| properties_oar.delete(key) }
 
ignore_keys.each { |key| properties_ref.delete(key) }
# Ignore the 'state' property only if the node is not 'Dead' according to
# Ignore the 'state' property only if the node is not 'Dead' according to
# the reference-repo.
# the reference-repo.
@@ -998,7 +1001,7 @@ def do_diff(options, generated_hierarchy, refrepo_properties)
@@ -998,7 +1001,7 @@ def do_diff(options, generated_hierarchy, refrepo_properties)
end
end
end
end
diagnostic_msgs.push( "Properties that need to be created on the #{site_uid} server: #{properties_keys['diff'][site_uid].keys.to_a.delete_if { |e| ignore_default_keys.include?(e) }.join(', ')}") if options[:verbose] && properties_keys['diff'][site_uid].keys.to_a.delete_if { |e| ignore_default_keys.include?(e) }.size > 0
diagnostic_msgs.push( "Properties that need to be created on the #{site_uid} server: #{properties_keys['diff'][site_uid].keys.to_a.delete_if { |e| ignore_keys.include?(e) }.join(', ')}") if options[:verbose] && properties_keys['diff'][site_uid].keys.to_a.delete_if { |e| ignore_keys.include?(e) }.size > 0
# Detect unknown properties
# Detect unknown properties
unknown_properties = properties_keys['oar'][site_uid].keys.to_set - properties_keys['ref'][site_uid].keys.to_set
unknown_properties = properties_keys['oar'][site_uid].keys.to_set - properties_keys['ref'][site_uid].keys.to_set
Loading