Commit 97182471 authored by Benjamin Dufoyer's avatar Benjamin Dufoyer

Adding FDistributedGroupTreeBuilder, function redistribute leaf who...

Adding FDistributedGroupTreeBuilder, function redistribute leaf who redistribute leaf from linear tree to block size added
parent d1cb64fa
/**
* Function to fill a distributed groupTree
* @author benjamin.dufoyer@inria.fr
*/
#ifndef _FDISTRIBUTED_GROUPTREE_BUILDER_HPP_
#define _FDISTRIBUTED_GROUPTREE_BUILDER_HPP_
#include "FGroupTree.hpp"
#include <cmath>
/**
* fill_new_linear_tree this function fill the new linear tree with the value of the current linear tree who are conserved.
* @author benjamin.dufoyer@inria.fr
* @param source old linear tree
* @param destination new linear tree
* @param nb_leaf_recev_left
* @param nb_leaf_recev_right
* @param nb_leaf_send_left
* @param nb_leaf_send_right
*/
template<class node_t>
void fill_new_linear_tree(
std::vector<node_t>* source,
std::vector<node_t>* destination,
unsigned nb_leaf_recev_left,
unsigned nb_leaf_recev_right,
unsigned nb_leaf_send_left,
unsigned nb_leaf_send_right
){
unsigned min_copy = nb_leaf_send_left;
unsigned max_copy = (unsigned)source->size() - nb_leaf_send_right;
unsigned min_destination = nb_leaf_recev_left;
unsigned max_destination = (unsigned)destination->size() - nb_leaf_recev_right;
while(min_copy < max_copy && min_destination < max_destination){
destination->at(min_destination) = source->at(min_copy);
min_copy++;
min_destination++;
}
}
/**
* redistribute_leafs redistribute the leaf on proc according to the groupsize
* on a distributed linear tree
* @author benjamin.dufoyer@inria.fr
* @param conf MPI conf
* @param linear_tree current distributed linear tree
* @param group_size size of the group of leaf
*/
template<class node_t>
void redistribute_leafs(const inria::mpi_config& conf, std::vector<node_t>& linear_tree, const int& group_size){
int nb_local_leaf = (int)linear_tree.size();
const int nb_proc = conf.comm.size();
int* array_global_nb_leaf = (int *)malloc(sizeof(int) * nb_proc); //nb leaf
const int my_rank = conf.comm.rank();
// Distribute the local number of leaf to every process
conf.comm.allgather(&nb_local_leaf,
1,
MPI_INT,
array_global_nb_leaf,
1,
MPI_INT);
int nb_global_leaf = 0;
for(int i = 0 ; i < nb_proc ; i++)
nb_global_leaf += array_global_nb_leaf[i];
int nb_global_group = nb_global_leaf / group_size;
int nb_local_group = nb_global_group / nb_proc;
int nb_leaf_needed = nb_local_group * group_size;
struct message_info{
int process_rank;
int nb_leaf;
};
// We stock the future interaction in 2 vector
std::vector<message_info> interaction_send;
std::vector<message_info> interaction_recev;
// The number of leaf send and revev from left
// it's used to fill the new linear_tree
int nb_leaf_recev_left = 0;
int nb_leaf_recev_right = 0;
int nb_leaf_send_right = 0;
int nb_leaf_send_left = 0;
// COMPUTE FOR LEFT PROCESS
// Check to know if the current proc need to send leaf
// The compute is from left to right because it's the right process
// who don't have a fix number of particle
if(!my_rank == 0){ //The first process don't have computation on his left
for(int i = 1 ; i < my_rank ; i++ ){
array_global_nb_leaf[i] += array_global_nb_leaf[i-1];
}
// Check if on left process need leaf or have too many leaf
// The idea is, if the total of leaf is a multiple of nb_leaf_needed, no communication is needed
int nb_leaf_to_send = array_global_nb_leaf[my_rank-1] - (nb_leaf_needed * my_rank);
if(nb_leaf_to_send != 0 ){
message_info mess;
mess.process_rank = my_rank-1;
//Check if the left process have too much leaf
if(nb_leaf_to_send > 0){
mess.nb_leaf = nb_leaf_to_send;
interaction_recev.push_back(mess);
//Update the array global with future value for the current proc
array_global_nb_leaf[my_rank] += nb_leaf_to_send;
nb_leaf_recev_left += nb_leaf_to_send;
// Else the left process don't have leaf to create block_size
} else {
nb_leaf_to_send = -nb_leaf_to_send;
mess.nb_leaf = nb_leaf_to_send;
interaction_send.push_back(mess);
//Update the array global with future value for the current proc
array_global_nb_leaf[my_rank] -= nb_leaf_to_send;
nb_leaf_send_left += nb_leaf_to_send;
}
}
}
// COMPUTE FOR RIGHT PROCESS
// every proc compute his number of leaf after the first communication
int nb_leaf_to_send = array_global_nb_leaf[my_rank] - nb_leaf_needed;
// The last proc don't have proc on his right
if( (my_rank+1) != nb_proc) {
if(nb_leaf_to_send != 0){
message_info mess;
mess.process_rank = my_rank+1;
//Check if the current process DON'T have too much leaf
if(nb_leaf_to_send < 0){
nb_leaf_to_send = -nb_leaf_to_send;
mess.nb_leaf = nb_leaf_to_send;
interaction_recev.push_back(mess);
// Else the left process don't have leaf to form block_size
//Update the array global with future value for the current proc
array_global_nb_leaf[my_rank] += nb_leaf_to_send;
nb_leaf_recev_right += nb_leaf_to_send;
} else {
mess.nb_leaf = nb_leaf_to_send;
interaction_send.push_back(mess);
//Update the array global with future value for the current proc
array_global_nb_leaf[my_rank] -= nb_leaf_to_send;
nb_leaf_send_right += nb_leaf_to_send;
}
}
}
// Now we have 2 vector with all interaction with other process
// in the first we will post every recev message
// in a second time we post every send message
// We declare the new linear_tree who have the new number of cell
std::vector<node_t> new_linear_tree(array_global_nb_leaf[my_rank]);
//Posting receiv message
//For every interaction
// Array of request to know the status
inria::mpi::request tab_mpi_status[interaction_recev.size()];
for(unsigned i = 0 ; i < interaction_recev.size(); i++ ){
// Size of buffer
int size_recev = (int) (sizeof(node_t)*interaction_recev.at(i).nb_leaf);
// Compute the pointer to write result
unsigned start = 0;
if(my_rank < interaction_recev.at(i).process_rank){
start = (unsigned)new_linear_tree.size() - interaction_recev.at(i).nb_leaf;
}
// Sending request
tab_mpi_status[i] =
conf.comm.irecv(&new_linear_tree.at(start),
size_recev,
MPI_CHAR,
interaction_recev.at(i).process_rank,1);
}
////Posting sending message
for(unsigned i = 0 ; i < (unsigned)interaction_send.size(); i++ ){
int size_send = (int)sizeof(node_t)*interaction_send.at(i).nb_leaf;
// Compute the pointer to send cell
unsigned start = 0;
if(my_rank < interaction_send.at(i).process_rank){
start = (unsigned)linear_tree.size() - interaction_send.at(i).nb_leaf;
}
//sending leaf
conf.comm.isend(&linear_tree.at(start),
size_send,
MPI_CHAR,
interaction_send.at(i).process_rank,1);
}
// Filling vector with the old linear_tree
// The function need to know how many cell the current process send on
// the right and on the left because the MPI request write on the same
// pointer
fill_new_linear_tree(&linear_tree,&new_linear_tree,nb_leaf_recev_left,nb_leaf_recev_right,nb_leaf_send_left,nb_leaf_send_right);
// waiting for the end of MPI request
inria::mpi::request::waitall(interaction_recev.size(),tab_mpi_status);
// swaping linear_tree pointer
linear_tree.swap(new_linear_tree);
}
#endif /*_FDISTRIBUTED_GROUPTREE_BUILDER_HPP_*/
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment