Commit 794c437d authored by DUFOYER Benjamin's avatar DUFOYER Benjamin

Factorisation of code in seeking P2P & M2L interaction

parent 7fa13450
......@@ -41,7 +41,8 @@ namespace dstr_grp_tree_builder{
/**
* Return the number of MPI Message needed to send this buffer
* Return the number of MPI Message needed to send this buffer according to the
* MAX_SIZE_MPI_MESSAGE variables define in the front of this documents
* @author benjamin.dufoyer@inria.fr
* @param size_buffer size of the buffer
* @return the number of message
......@@ -62,8 +63,11 @@ unsigned get_nb_mpi_msg(send_type obj_to_send, int nb_obj_to_send){
return get_nb_mpi_msg(sizeof(send_type)*nb_obj_to_send);
}
/**
* This function return the number of element to put into a message
* This function return the number of element to put into a MPI message
* according to the MAX_SIZE_MPI_MESSAGE variables define in the front of this
* documents
* @author benjamin.dufoyer@inria.fr
* @param obj_to_send a object to send
* @param nb_obj_to_send number of object to send
......@@ -81,11 +85,11 @@ unsigned get_nb_elt_interval(int size_obj, int nb_obj_to_send){
}
/**
* This function split MPI message if the buffer is too high
* This function split MPI message if the buffer is too large
* this funciton run with the irecv_splited
* She send more 1 message if the buffer have a good size and send more than 1
* message if the size of the buffer is bigger than MAX_SIZE_MPI_MESSAGE define
* at the front of this documents
* She send 1 or more message if the buffer have a
* good size and send more than 1 message if the size of the buffer
* is bigger than MAX_SIZE_MPI_MESSAGE define at the front of this documents
* @author benjamin.dufoyer@inria.fr
* @param conf MPI Conf
* @param addr_send Vector address of data to send
......@@ -209,7 +213,9 @@ void irecv_splited(const inria::mpi_config& conf,
}
/**
* fill_new_linear_tree this function fill the new linear tree with the value of the current linear tree who are conserved.
* fill_new_linear_tree this function fill the new linear tree with the value
* of the current linear tree
* The current linear tree is keep intact.
* @author benjamin.dufoyer@inria.fr
* @param source old linear tree
* @param destination new linear tree
......@@ -244,6 +250,9 @@ void fill_new_linear_tree(
/**
* This function redistribute leaf between proc according to groupSize
* She modify the linear_tree put in parameter
* She define a new vector avec she swap the pointer
*
* [RESTRICTION] : the number of leaf is very important, we cannot be in case
* where a proc havn't leaf
* the leaf on proc according to the groupsize
......@@ -254,10 +263,15 @@ void fill_new_linear_tree(
* @param group_size size of the group of leaf
*/
template<class node_t>
void parrallel_build_block(const inria::mpi_config& conf, std::vector<node_t>* linear_tree, const int& group_size){
void parrallel_build_block(const inria::mpi_config& conf,
std::vector<node_t>* linear_tree,
const int& group_size)
{
// define usefull variables
int nb_local_leaf = (int)linear_tree->size();
const int nb_proc = conf.comm.size();
int* array_global_nb_leaf = (int *)malloc(sizeof(int) * nb_proc); //nb leaf
std::vector<int> array_global_nb_leaf(nb_proc,0);
//int* array_global_nb_leaf = (int *)malloc(sizeof(int) * nb_proc); //nb leaf
const int my_rank = conf.comm.rank();
// Check if i have leaf on my proc
FAssert(nb_local_leaf > 0);
......@@ -265,7 +279,7 @@ void fill_new_linear_tree(
conf.comm.allgather(&nb_local_leaf,
1,
MPI_INT,
array_global_nb_leaf,
array_global_nb_leaf.data(),
1,
MPI_INT);
......@@ -418,7 +432,7 @@ void fill_new_linear_tree(
// waiting for the end of MPI request
inria::mpi::request::waitall(interaction_recev.size(),tab_mpi_status);
free(array_global_nb_leaf);
//free(array_global_nb_leaf);
// swaping linear_tree pointer
std::swap(*linear_tree,*new_linear_tree);
......@@ -442,34 +456,93 @@ void fill_new_linear_tree(
void share_particle_division(
const inria::mpi_config& conf,
std::pair<type1_t,type2_t> my_pair,
std::vector<std::pair<type1_t,type2_t>>& particle_repartition
std::vector<std::pair<type1_t,type2_t>>& particle_index_distribution
){
conf.comm.allgather(
&my_pair,
sizeof(my_pair),
MPI_CHAR,
particle_repartition.data(),
particle_index_distribution.data(),
sizeof(my_pair),
MPI_CHAR);
}
/**
* this function is a surcharge of the previons function, she call the previous
* function, it's just to use her more easly and callable with juste a pair
* of index and no with juste a particle container
* @author benjamin.dufoyer@inria.fr
* @param conf conf_MPI
* @param particle particle container
* @param particle_index_distribution vector to stock the particle distribution
*/
template<class particle_t,
class type1_t,
class type2_t>
void share_particle_division(
const inria::mpi_config& conf,
std::vector<particle_t>& particle,
std::vector<std::pair<type1_t,type2_t>>& particle_repartition)
std::vector<std::pair<type1_t,type2_t>>& particle_index_distribution)
{
FAssert(particle_repartition.size() == (unsigned)conf.comm.size());
FAssert(particle_index_distribution.size() == (unsigned)conf.comm.size());
FAssert(particle.size() > 0);
std::pair<type1_t,type2_t> my_idx;
my_idx.first = particle.front().morton_index;
my_idx.second = particle.back().morton_index;
// Distribute the local min max morton_index to every process
share_particle_division(conf,my_idx,particle_repartition);
share_particle_division(conf,my_idx,particle_index_distribution);
}
/**
* This function sort and remove duplicate data from a vector of MortonIndex
* @author benjamin.dufoyer@inria.fr
* @param data_to_modify The MortonIndex's vector
* @param nb_data number of data in the vector
* @return Vector of MortonIndex
*/
std::vector<MortonIndex> sort_and_delete_duplicate_data(
std::vector<MortonIndex> data_to_modify,
unsigned nb_data
){
if(nb_data != 0) {
// Sort every morton index
//std::sort(data_to_modify.begin(),data_to_modify.begin()+nb_data, [](MortonIndex a, MortonIndex b){
// return a < b;
//});
FQuickSort<MortonIndex>::QsSequential(data_to_modify.data(),nb_data);
// Compute the number of different morton index
// to allocate vector
unsigned nb_leaf = 1;
MortonIndex last_m_idx = data_to_modify[0];
for(unsigned i = 1 ; i < nb_data ; ++i){
if(last_m_idx != data_to_modify[i]){
last_m_idx = data_to_modify[i];
nb_leaf++;
}
}
// Alloc the returned vector
std::vector<MortonIndex> leaf_needed(nb_leaf,0);
// Fill the returned vector
MortonIndex current_idx = 1;
last_m_idx = data_to_modify[0];
leaf_needed[0] = data_to_modify[0];
for(unsigned i = 1 ; i < nb_data ; ++i){
if(last_m_idx != data_to_modify[i]){
last_m_idx = data_to_modify[i];
leaf_needed[current_idx] = data_to_modify[i];
++current_idx;
}
if((unsigned)current_idx == nb_leaf)
break;
}
return leaf_needed;
} else {
std::vector<MortonIndex> leaf_needed(0,0);
return leaf_needed;
}
}
......@@ -495,10 +568,10 @@ std::vector<MortonIndex> get_leaf_P2P_interaction(
const MortonIndex& local_max_m_idx
){
// 26 is for every interaction
std::vector<std::size_t> externalInteractionsLeafLevel(tree.getTotalNbLeaf()*26,0);
std::vector<MortonIndex> external_interaction(tree.getTotalNbLeaf()*26,0);
// Reset interactions
// idx to know where we are in the vector
std::size_t idx_vector= 0;
unsigned idx_vector= 0;
// First leaf level
{
// We iterate on every particle group
......@@ -528,7 +601,7 @@ std::vector<MortonIndex> get_leaf_P2P_interaction(
if(interactionsIndexes[idxInter] < local_min_m_idx || interactionsIndexes[idxInter] > local_max_m_idx ){
// Check if the leaf exist
if(interactionsIndexes[idxInter] >= global_min_m_idx && interactionsIndexes[idxInter] <= global_max_m_idx ){
externalInteractionsLeafLevel[idx_vector] = interactionsIndexes[idxInter];
external_interaction[idx_vector] = interactionsIndexes[idxInter];
++idx_vector;
}
}
......@@ -537,38 +610,7 @@ std::vector<MortonIndex> get_leaf_P2P_interaction(
}
}
}
if(idx_vector != 0) {
// Sort every morton index
FQuickSort<std::size_t>::QsSequential(externalInteractionsLeafLevel.data(),idx_vector);
// Compute the number of different morton index
std::size_t nb_leaf = 1;
std::size_t last_leaf = externalInteractionsLeafLevel[0];
for(unsigned i = 1 ; i < idx_vector ; ++i){
if(last_leaf != externalInteractionsLeafLevel[i]){
last_leaf = externalInteractionsLeafLevel[i];
nb_leaf++;
}
}
// Alloc the returned vector
std::vector<MortonIndex> leaf_needed(nb_leaf,0);
// Fill the returned vector
MortonIndex current_idx = 1;
last_leaf = externalInteractionsLeafLevel[0];
leaf_needed[0] = externalInteractionsLeafLevel[0];
for(unsigned i = 1 ; i < idx_vector ; ++i){
if(last_leaf != externalInteractionsLeafLevel[i]){
last_leaf = externalInteractionsLeafLevel[i];
leaf_needed[current_idx] = externalInteractionsLeafLevel[i];
++current_idx;
}
if(current_idx == (int)nb_leaf)
break;
}
return leaf_needed;
} else {
std::vector<MortonIndex> leaf_needed(0,0);
return leaf_needed;
}
return (sort_and_delete_duplicate_data(external_interaction,idx_vector));
}
......@@ -600,9 +642,9 @@ std::vector<MortonIndex> get_leaf_M2L_interaction_at_level(
int dim = 3)
{
// idx to fill the vector
std::size_t idx_vector = 0;
unsigned idx_vector = 0;
// All External leaf
std::vector<MortonIndex> external_leaf(tree.getNbCellGroupAtLevel(level)*tree.getNbElementsPerBlock()*216,0);
std::vector<MortonIndex> external_interaction(tree.getNbCellGroupAtLevel(level)*tree.getNbElementsPerBlock()*216,0);
// iterate on the group
for(int idxGroup = 0 ; idxGroup < tree.getNbCellGroupAtLevel(level) ; ++idxGroup){
auto* containers = tree.getCellGroup(level,idxGroup);
......@@ -629,10 +671,10 @@ std::vector<MortonIndex> get_leaf_M2L_interaction_at_level(
if(tmp < local_min_m_idx ||
tmp > local_max_m_idx){
//Stock the leaf
if(idx_vector > external_leaf.size()){
if(idx_vector > external_interaction.size()){
std::cout << "ERROR " << std::endl;
}
external_leaf[idx_vector] = tmp;
external_interaction[idx_vector] = tmp;
++idx_vector;
}
}
......@@ -640,41 +682,7 @@ std::vector<MortonIndex> get_leaf_M2L_interaction_at_level(
} // end for leaf
} // end for group
// if we have leaf in the vector
if(idx_vector != 0 ){
//sort the result
FQuickSort<MortonIndex>::QsSequential(external_leaf.data(),idx_vector);
// compute the number of leaf
std::size_t nb_leaf = 1;
MortonIndex last_leaf = external_leaf[0];
for(unsigned i = 1 ; i < idx_vector ; ++i){
if(last_leaf != external_leaf[i]){
last_leaf = external_leaf[i];
nb_leaf++;
}
}
// Alloc the returned vector
std::vector<MortonIndex> leaf_needed(nb_leaf,0);
// Fill the returned vector
MortonIndex current_idx = 1;
last_leaf = external_leaf[0];
leaf_needed[0] = external_leaf[0];
for(unsigned i = 1 ; i < idx_vector ; ++i){
if(last_leaf != external_leaf[i]){
last_leaf = external_leaf[i];
leaf_needed[current_idx] = external_leaf[i];
++current_idx;
}
if(current_idx == (long)nb_leaf)
break;
}
return leaf_needed;
} else {
std::vector<MortonIndex> leaf_needed(0,0);
return leaf_needed;
}
return (sort_and_delete_duplicate_data(external_interaction,idx_vector));
}
......@@ -1120,7 +1128,6 @@ struct block_t{
};
/**
* This function echange symbolic information of block of a GroupTree
*
......@@ -1418,7 +1425,6 @@ std::vector<std::vector<block_t>> send_get_symbolic_block_at_level(
conf);
}
/**
* This function call a function of groupTree to create the block recev
* to create a LET group tree
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment