Commit 7fa13450 authored by DUFOYER Benjamin's avatar DUFOYER Benjamin

Modification of the interaction Matrix

parent 1881c45a
...@@ -764,6 +764,8 @@ std::vector<MortonIndex> concat_M2L_P2P( ...@@ -764,6 +764,8 @@ std::vector<MortonIndex> concat_M2L_P2P(
} }
/** /**
* This function compute the number of leaf needed on every proc, * This function compute the number of leaf needed on every proc,
* she check if leaf exit, if a leaf doesn't exist this function * she check if leaf exit, if a leaf doesn't exist this function
...@@ -777,7 +779,7 @@ std::vector<MortonIndex> concat_M2L_P2P( ...@@ -777,7 +779,7 @@ std::vector<MortonIndex> concat_M2L_P2P(
* @return vector<std::size_t> with a size nb_proc*nb_proc * @return vector<std::size_t> with a size nb_proc*nb_proc
* it's a interaction matrix * it's a interaction matrix
*/ */
std::vector<std::size_t> get_matrix_interaction( std::vector<std::vector<std::size_t>> get_matrix_interaction(
std::vector<MortonIndex>& needed_leaf, std::vector<MortonIndex>& needed_leaf,
std::vector<std::pair<MortonIndex,MortonIndex>>& particle_distribution, std::vector<std::pair<MortonIndex,MortonIndex>>& particle_distribution,
const inria::mpi_config& conf) const inria::mpi_config& conf)
...@@ -785,8 +787,8 @@ std::vector<std::size_t> get_matrix_interaction( ...@@ -785,8 +787,8 @@ std::vector<std::size_t> get_matrix_interaction(
// Getting MPI Info // Getting MPI Info
const int nb_proc = conf.comm.size(); const int nb_proc = conf.comm.size();
// Alloc interaction matrix // Alloc interaction matrix
std::vector<std::size_t> my_matrix_interaction(nb_proc,0); std::vector<std::vector<std::size_t>> matrix_interaction(2,std::vector<std::size_t>(nb_proc,0));
std::vector<std::size_t> global_matrix_interaction(nb_proc*nb_proc,0); std::vector<std::size_t> global_matrix_interaction(nb_proc,0);
// Initialise idx on particle_distribution // Initialise idx on particle_distribution
std::size_t idx_part = 0; std::size_t idx_part = 0;
// Interate on every leaf to know where she is // Interate on every leaf to know where she is
...@@ -795,7 +797,7 @@ std::vector<std::size_t> get_matrix_interaction( ...@@ -795,7 +797,7 @@ std::vector<std::size_t> get_matrix_interaction(
// if she is on the current proc // if she is on the current proc
if(current_leaf >= particle_distribution[idx_part].first if(current_leaf >= particle_distribution[idx_part].first
&& current_leaf <= particle_distribution[idx_part].second){ && current_leaf <= particle_distribution[idx_part].second){
my_matrix_interaction[idx_part] += 1; matrix_interaction[0][idx_part] += 1;
} else { } else {
// While the current leaf is not on the good interval // While the current leaf is not on the good interval
while(particle_distribution[idx_part].second < current_leaf){ while(particle_distribution[idx_part].second < current_leaf){
...@@ -807,24 +809,25 @@ std::vector<std::size_t> get_matrix_interaction( ...@@ -807,24 +809,25 @@ std::vector<std::size_t> get_matrix_interaction(
} else { } else {
// In the case it's a normal case, we juste increment the // In the case it's a normal case, we juste increment the
// number of leaf send at the proc idx_part // number of leaf send at the proc idx_part
my_matrix_interaction[idx_part] += 1; matrix_interaction[0][idx_part] += 1;
} }
} }
} }
// now we have the number of leaf to send at every proc // now we have the number of leaf to send at every proc
// we proceed a AllGather to share this information at every proc // we proceed a AlltoAll to share this information at every proc
conf.comm.allgather(my_matrix_interaction.data(), conf.comm.alltoall(matrix_interaction[0].data(),
nb_proc, 1,
my_MPI_SIZE_T, my_MPI_SIZE_T,
global_matrix_interaction.data(), matrix_interaction[1].data(),
nb_proc, 1,
my_MPI_SIZE_T); my_MPI_SIZE_T);
// removing bad leaf // removing bad leaf
needed_leaf.erase(std::remove(needed_leaf.begin(),needed_leaf.end(),0),needed_leaf.end()); needed_leaf.erase(std::remove(needed_leaf.begin(),needed_leaf.end(),0),needed_leaf.end());
return {begin(global_matrix_interaction),end(global_matrix_interaction)}; return {begin(matrix_interaction),end(matrix_interaction)};
} }
/** /**
* This function compute the number of block needed to send all leaf * This function compute the number of block needed to send all leaf
* stock in leaf_needed. * stock in leaf_needed.
...@@ -840,20 +843,22 @@ std::vector<MortonIndex> get_nb_block_from_leaf(GroupOctreeClass& tree, ...@@ -840,20 +843,22 @@ std::vector<MortonIndex> get_nb_block_from_leaf(GroupOctreeClass& tree,
std::size_t nb_leaf) std::size_t nb_leaf)
{ {
std::vector<MortonIndex> block_to_send(tree.getNbParticleGroup(),0); std::vector<MortonIndex> block_to_send(tree.getNbParticleGroup(),0);
if(nb_leaf == 0)
return {block_to_send.begin(),block_to_send.begin()};
// declaration of idx varaibles // declaration of idx varaibles
unsigned idx_vector = 0; unsigned idx_vector = 0;
unsigned idx_leaf = 0; unsigned idx_leaf = 0;
// iterate on every group // iterate on every group
for(int idx_group = 0 ; idx_group < tree.getNbParticleGroup() ; ++idx_group){ for(int idx_group = 0 ; idx_group < tree.getNbParticleGroup() ; ++idx_group){
if(idx_leaf == nb_leaf) if(idx_leaf >= nb_leaf)
break; break;
// get the current block // get the current block
auto* container = tree.getParticleGroup(idx_group); auto* container = tree.getParticleGroup(idx_group);
// get first leaf in this interval // get first leaf in this interval
while( container->getStartingIndex() > leaf_needed[idx_leaf] && idx_leaf < nb_leaf ){ while( idx_leaf < nb_leaf && container->getStartingIndex() > leaf_needed[idx_leaf]){
++idx_leaf; ++idx_leaf;
} }
if(idx_leaf == nb_leaf) if(idx_leaf >= nb_leaf)
break; break;
while( container->getEndingIndex() < leaf_needed[idx_leaf] && while( container->getEndingIndex() < leaf_needed[idx_leaf] &&
idx_leaf < nb_leaf){ idx_leaf < nb_leaf){
...@@ -921,12 +926,10 @@ std::vector<MortonIndex> get_nb_block_from_leaf(GroupOctreeClass& tree, ...@@ -921,12 +926,10 @@ std::vector<MortonIndex> get_nb_block_from_leaf(GroupOctreeClass& tree,
break; break;
} }
// while the morton index of the current node is not high // while the morton index of the current node is not high
while(node_needed[idx_node] < containers->getStartingIndex() && while(idx_node < nb_node && node_needed[idx_node] < containers->getStartingIndex()){
idx_node < nb_node){
++idx_node; ++idx_node;
} }
while(node_needed[idx_node] < containers->getEndingIndex() && while(idx_node < nb_node && node_needed[idx_node] < containers->getEndingIndex()){
idx_node < nb_node){
if(containers->isInside(node_needed[idx_node])){ if(containers->isInside(node_needed[idx_node])){
block_to_send[idx_vect] = idx_group; block_to_send[idx_vect] = idx_group;
++idx_vect; ++idx_vect;
...@@ -959,7 +962,8 @@ std::vector<MortonIndex> get_nb_block_from_leaf(GroupOctreeClass& tree, ...@@ -959,7 +962,8 @@ std::vector<MortonIndex> get_nb_block_from_leaf(GroupOctreeClass& tree,
template<class GroupOctreeClass> template<class GroupOctreeClass>
void send_get_number_of_block_node_level( void send_get_number_of_block_node_level(
std::vector<MortonIndex>& vect_recv, std::vector<MortonIndex>& vect_recv,
std::vector<std::size_t>& global_matrix_interaction, std::vector<std::vector<std::size_t>> global_matrix_interaction,
//std::vector<std::size_t>& global_matrix_interaction,
std::size_t& nb_msg_recv, std::size_t& nb_msg_recv,
GroupOctreeClass& tree, GroupOctreeClass& tree,
std::vector<std::pair<int,int>>& nb_block_to_receiv, std::vector<std::pair<int,int>>& nb_block_to_receiv,
...@@ -968,10 +972,6 @@ std::vector<MortonIndex> get_nb_block_from_leaf(GroupOctreeClass& tree, ...@@ -968,10 +972,6 @@ std::vector<MortonIndex> get_nb_block_from_leaf(GroupOctreeClass& tree,
const inria::mpi_config& conf const inria::mpi_config& conf
) )
{ {
// Get MPI Info
const unsigned nb_proc = conf.comm.size();
const unsigned my_rank = conf.comm.rank();
int idx_status = 0; int idx_status = 0;
int idx_proc = 0; int idx_proc = 0;
inria::mpi::request tab_mpi_status[nb_msg_recv]; inria::mpi::request tab_mpi_status[nb_msg_recv];
...@@ -979,12 +979,10 @@ std::vector<MortonIndex> get_nb_block_from_leaf(GroupOctreeClass& tree, ...@@ -979,12 +979,10 @@ std::vector<MortonIndex> get_nb_block_from_leaf(GroupOctreeClass& tree,
std::vector<bool> block_already_send(tree.getNbCellGroupAtLevel(level),false); std::vector<bool> block_already_send(tree.getNbCellGroupAtLevel(level),false);
// Post the number reception of the number of block // Post the number reception of the number of block
for(unsigned i = (my_rank*nb_proc); for(unsigned i = 0; i < global_matrix_interaction[0].size() ; ++i)
i < (my_rank*nb_proc)+nb_proc;
++i)
{ {
// If we have interaction with this proc // If we have interaction with this proc
if(global_matrix_interaction[i] != 0){ if(global_matrix_interaction[0][i] != 0){
nb_block_to_receiv[idx_status].first = idx_proc; nb_block_to_receiv[idx_status].first = idx_proc;
tab_mpi_status[idx_status] = conf.comm.irecv( tab_mpi_status[idx_status] = conf.comm.irecv(
&nb_block_to_receiv[idx_status].second, &nb_block_to_receiv[idx_status].second,
...@@ -1000,20 +998,20 @@ std::vector<MortonIndex> get_nb_block_from_leaf(GroupOctreeClass& tree, ...@@ -1000,20 +998,20 @@ std::vector<MortonIndex> get_nb_block_from_leaf(GroupOctreeClass& tree,
std::size_t idx_vect = 0; std::size_t idx_vect = 0;
idx_status = 0; idx_status = 0;
// Posting every send message // Posting every send message
for(unsigned i = my_rank; i < global_matrix_interaction.size() ; i+= nb_proc ){ for(unsigned i = 0; i < global_matrix_interaction[1].size() ; ++i){
// If we have interaction with this proc // If we have interaction with this proc
if(global_matrix_interaction[i] != 0){ if(global_matrix_interaction[1][i] != 0){
// Compute the number of leaf // Compute the number of leaf
if(leaf_level){ if(leaf_level){
leaf_to_send[idx_status].second = get_nb_block_from_leaf( leaf_to_send[idx_status].second = get_nb_block_from_leaf(
tree, tree,
&vect_recv.data()[idx_vect], &vect_recv.data()[idx_vect],
global_matrix_interaction[i]); global_matrix_interaction[1][i]);
} else { } else {
leaf_to_send[idx_status].second = get_nb_block_from_node( leaf_to_send[idx_status].second = get_nb_block_from_node(
tree, tree,
&vect_recv.data()[idx_vect], &vect_recv.data()[idx_vect],
global_matrix_interaction[i], global_matrix_interaction[1][i],
level, level,
&block_already_send); &block_already_send);
} }
...@@ -1026,7 +1024,7 @@ std::vector<MortonIndex> get_nb_block_from_leaf(GroupOctreeClass& tree, ...@@ -1026,7 +1024,7 @@ std::vector<MortonIndex> get_nb_block_from_leaf(GroupOctreeClass& tree,
MPI_INT, MPI_INT,
idx_proc,1 idx_proc,1
); );
idx_vect += global_matrix_interaction[i]; idx_vect += global_matrix_interaction[1][i];
idx_status += 1; idx_status += 1;
} }
idx_proc += 1; idx_proc += 1;
...@@ -1050,15 +1048,12 @@ std::vector<MortonIndex> get_nb_block_from_leaf(GroupOctreeClass& tree, ...@@ -1050,15 +1048,12 @@ std::vector<MortonIndex> get_nb_block_from_leaf(GroupOctreeClass& tree,
*/ */
std::vector<MortonIndex> send_get_leaf_morton( std::vector<MortonIndex> send_get_leaf_morton(
std::vector<MortonIndex>& needed_leaf, std::vector<MortonIndex>& needed_leaf,
std::vector<std::size_t>& global_matrix_interaction, std::vector<std::vector<std::size_t>>& global_matrix_interaction,
//std::vector<std::size_t>& global_matrix_interaction,
std::size_t& nb_msg_recv, std::size_t& nb_msg_recv,
std::size_t& nb_leaf_recv, std::size_t& nb_leaf_recv,
const inria::mpi_config& conf) const inria::mpi_config& conf)
{ {
// Get MPI Info
const unsigned nb_proc = conf.comm.size();
const unsigned my_rank = conf.comm.rank();
// allocate tab of mpi status for synchronisazion // allocate tab of mpi status for synchronisazion
std::vector<inria::mpi::request> tab_mpi_status(nb_msg_recv); std::vector<inria::mpi::request> tab_mpi_status(nb_msg_recv);
...@@ -1070,15 +1065,15 @@ std::vector<MortonIndex> send_get_leaf_morton( ...@@ -1070,15 +1065,15 @@ std::vector<MortonIndex> send_get_leaf_morton(
unsigned idx_vect = 0; unsigned idx_vect = 0;
int idx_proc = 0; int idx_proc = 0;
// Posting every recv message // Posting every recv message
for(unsigned i = my_rank; i < global_matrix_interaction.size() ; i+= nb_proc ){ for(unsigned i = 0; i < global_matrix_interaction[1].size() ; ++i ){
if(global_matrix_interaction[i] != 0){ if(global_matrix_interaction[1][i] != 0){
irecv_splited( irecv_splited(
conf, conf,
&tab_mpi_status, &tab_mpi_status,
&idx_status, &idx_status,
&vect_recv, &vect_recv,
&idx_vect, &idx_vect,
global_matrix_interaction[i], global_matrix_interaction[1][i],
idx_proc,1 idx_proc,1
); );
} }
...@@ -1088,16 +1083,13 @@ std::vector<MortonIndex> send_get_leaf_morton( ...@@ -1088,16 +1083,13 @@ std::vector<MortonIndex> send_get_leaf_morton(
// Posting every send message // Posting every send message
idx_proc = 0; idx_proc = 0;
idx_vect = 0; idx_vect = 0;
for(unsigned i = my_rank*nb_proc; for(unsigned i = 0; i < global_matrix_interaction[0].size() ; ++i){
i < (my_rank*nb_proc)+nb_proc; if(global_matrix_interaction[0][i] != 0){
++i)
{
if(global_matrix_interaction[i] != 0){
isend_splited( isend_splited(
conf, conf,
&needed_leaf, &needed_leaf,
&idx_vect, &idx_vect,
global_matrix_interaction[i], global_matrix_interaction[0][i],
idx_proc,1 idx_proc,1
); );
} }
...@@ -1346,36 +1338,39 @@ void compute_block_node_level( ...@@ -1346,36 +1338,39 @@ void compute_block_node_level(
/**
* this function return a vector with the symbolic information of the needed
* block to build a LET Group tree
*
* @author benjamin.dufoyer@inria.fr
* @param needed_leaf list of the leaf needed
* @param global_matrix_interaction matrix of interaction
* @param tree local group tree
* @param conf MPI conf
*/
template<class GroupOctreeClass> template<class GroupOctreeClass>
std::vector<std::vector<block_t>> send_get_symbolic_block_at_level( std::vector<std::vector<block_t>> send_get_symbolic_block_at_level(
std::vector<MortonIndex>& needed_leaf, std::vector<MortonIndex>& needed_leaf,
std::vector<size_t>& global_matrix_interaction, std::vector<std::vector<size_t>>& matrix_interaction,
GroupOctreeClass& tree, GroupOctreeClass& tree,
int level, int level,
const inria::mpi_config& conf const inria::mpi_config& conf
) { ){
// Get MPI Info
const unsigned nb_proc = conf.comm.size();
const unsigned my_rank = conf.comm.rank();
// Compute the number of sended message and recev message
std::size_t nb_msg_recv = 0; std::size_t nb_msg_recv = 0;
std::size_t nb_leaf_recv = 0; std::size_t nb_leaf_recv = 0;
std::size_t nb_msg_send = 0; std::size_t nb_msg_send = 0;
for(unsigned i = my_rank; i < global_matrix_interaction.size() ; i+= nb_proc ){ // Getting the number of sended message
if(global_matrix_interaction[i] != 0){ for(unsigned i = 0 ; i < matrix_interaction[0].size() ; ++i){
nb_msg_recv += 1; if(matrix_interaction[0][i] > 0 )
nb_leaf_recv += global_matrix_interaction[i]; ++nb_msg_send;
}
} }
for(unsigned i = (my_rank*nb_proc); i < ((my_rank*nb_proc)+nb_proc); ++i){ // Getting the number of recv message and the number of leaf
if(global_matrix_interaction[i] != 0){ for(unsigned i = 0 ; i < matrix_interaction[1].size() ; ++i){
nb_msg_send += 1; if(matrix_interaction[1][i] > 0){
++nb_msg_recv;
nb_leaf_recv += matrix_interaction[1][i];
} }
} }
...@@ -1386,21 +1381,24 @@ std::vector<std::vector<block_t>> send_get_symbolic_block_at_level( ...@@ -1386,21 +1381,24 @@ std::vector<std::vector<block_t>> send_get_symbolic_block_at_level(
std::vector<MortonIndex> vect_recv = std::vector<MortonIndex> vect_recv =
send_get_leaf_morton( send_get_leaf_morton(
needed_leaf, needed_leaf,
global_matrix_interaction, matrix_interaction,
nb_msg_recv, nb_msg_recv,
nb_leaf_recv, nb_leaf_recv,
conf); conf);
////////////////////////////////////////////////////////////
// SECOND STEP
// Compute the block to send to other proc
// And send the number of block sended
////////////////////////////////////////////////////////////
// Init variable to stock // Init variable to stock
std::vector<std::pair<int,int>> nb_block_to_receiv(nb_msg_send); std::vector<std::pair<int,int>> nb_block_to_receiv(nb_msg_send);
std::vector<std::pair<int,std::vector<MortonIndex>>> std::vector<std::pair<int,std::vector<MortonIndex>>>
list_of_block_to_send(nb_msg_recv); list_of_block_to_send(nb_msg_recv);
// Second step
// Compute the block to send to other proc
// And send the number of block sended
send_get_number_of_block_node_level( send_get_number_of_block_node_level(
vect_recv, vect_recv,
global_matrix_interaction, matrix_interaction,
nb_msg_send, nb_msg_send,
tree, tree,
nb_block_to_receiv, nb_block_to_receiv,
...@@ -1408,7 +1406,10 @@ std::vector<std::vector<block_t>> send_get_symbolic_block_at_level( ...@@ -1408,7 +1406,10 @@ std::vector<std::vector<block_t>> send_get_symbolic_block_at_level(
level, level,
conf); conf);
////////////////////////////////////////////////////////////
/// THIRD STEP
/// Getting the list of leaf needed by every proc
////////////////////////////////////////////////////////////
return exchange_block( return exchange_block(
nb_block_to_receiv, nb_block_to_receiv,
list_of_block_to_send, list_of_block_to_send,
...@@ -1463,10 +1464,6 @@ void add_let_leaf_block_to_tree( ...@@ -1463,10 +1464,6 @@ void add_let_leaf_block_to_tree(
tree.add_LET_block(leaf_block_to_add,level,local_min_m_idx); tree.add_LET_block(leaf_block_to_add,level,local_min_m_idx);
} }
} }
......
...@@ -328,7 +328,6 @@ public: ...@@ -328,7 +328,6 @@ public:
tree, tree,
dim); dim);
std::vector<MortonIndex> needed_leaf; std::vector<MortonIndex> needed_leaf;
if(leaf_level){ if(leaf_level){
// this function return the concatenation of the leaf for the P2P and // this function return the concatenation of the leaf for the P2P and
...@@ -342,7 +341,7 @@ public: ...@@ -342,7 +341,7 @@ public:
); );
} }
std::vector<size_t> global_matrix_interaction = dstr_grp_tree_builder::get_matrix_interaction( std::vector<std::vector<size_t>> global_matrix_interaction = dstr_grp_tree_builder::get_matrix_interaction(
needed_leaf, needed_leaf,
index_particle_distribution, index_particle_distribution,
this->mpi_conf); this->mpi_conf);
...@@ -358,6 +357,7 @@ public: ...@@ -358,6 +357,7 @@ public:
tree, tree,
level, level,
this->mpi_conf); this->mpi_conf);
// Add the block recev to the local group tree // Add the block recev to the local group tree
dstr_grp_tree_builder::add_let_leaf_block_to_tree( dstr_grp_tree_builder::add_let_leaf_block_to_tree(
tree, tree,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment