Commit 7fa13450 authored by DUFOYER Benjamin's avatar DUFOYER Benjamin
Browse files

Modification of the interaction Matrix

parent 1881c45a
......@@ -764,6 +764,8 @@ std::vector<MortonIndex> concat_M2L_P2P(
}
/**
* This function compute the number of leaf needed on every proc,
* she check if leaf exit, if a leaf doesn't exist this function
......@@ -777,7 +779,7 @@ std::vector<MortonIndex> concat_M2L_P2P(
* @return vector<std::size_t> with a size nb_proc*nb_proc
* it's a interaction matrix
*/
std::vector<std::size_t> get_matrix_interaction(
std::vector<std::vector<std::size_t>> get_matrix_interaction(
std::vector<MortonIndex>& needed_leaf,
std::vector<std::pair<MortonIndex,MortonIndex>>& particle_distribution,
const inria::mpi_config& conf)
......@@ -785,8 +787,8 @@ std::vector<std::size_t> get_matrix_interaction(
// Getting MPI Info
const int nb_proc = conf.comm.size();
// Alloc interaction matrix
std::vector<std::size_t> my_matrix_interaction(nb_proc,0);
std::vector<std::size_t> global_matrix_interaction(nb_proc*nb_proc,0);
std::vector<std::vector<std::size_t>> matrix_interaction(2,std::vector<std::size_t>(nb_proc,0));
std::vector<std::size_t> global_matrix_interaction(nb_proc,0);
// Initialise idx on particle_distribution
std::size_t idx_part = 0;
// Interate on every leaf to know where she is
......@@ -795,7 +797,7 @@ std::vector<std::size_t> get_matrix_interaction(
// if she is on the current proc
if(current_leaf >= particle_distribution[idx_part].first
&& current_leaf <= particle_distribution[idx_part].second){
my_matrix_interaction[idx_part] += 1;
matrix_interaction[0][idx_part] += 1;
} else {
// While the current leaf is not on the good interval
while(particle_distribution[idx_part].second < current_leaf){
......@@ -807,24 +809,25 @@ std::vector<std::size_t> get_matrix_interaction(
} else {
// In the case it's a normal case, we juste increment the
// number of leaf send at the proc idx_part
my_matrix_interaction[idx_part] += 1;
matrix_interaction[0][idx_part] += 1;
}
}
}
// now we have the number of leaf to send at every proc
// we proceed a AllGather to share this information at every proc
conf.comm.allgather(my_matrix_interaction.data(),
nb_proc,
// we proceed a AlltoAll to share this information at every proc
conf.comm.alltoall(matrix_interaction[0].data(),
1,
my_MPI_SIZE_T,
global_matrix_interaction.data(),
nb_proc,
matrix_interaction[1].data(),
1,
my_MPI_SIZE_T);
// removing bad leaf
needed_leaf.erase(std::remove(needed_leaf.begin(),needed_leaf.end(),0),needed_leaf.end());
return {begin(global_matrix_interaction),end(global_matrix_interaction)};
return {begin(matrix_interaction),end(matrix_interaction)};
}
/**
* This function compute the number of block needed to send all leaf
* stock in leaf_needed.
......@@ -840,20 +843,22 @@ std::vector<MortonIndex> get_nb_block_from_leaf(GroupOctreeClass& tree,
std::size_t nb_leaf)
{
std::vector<MortonIndex> block_to_send(tree.getNbParticleGroup(),0);
if(nb_leaf == 0)
return {block_to_send.begin(),block_to_send.begin()};
// declaration of idx varaibles
unsigned idx_vector = 0;
unsigned idx_leaf = 0;
// iterate on every group
for(int idx_group = 0 ; idx_group < tree.getNbParticleGroup() ; ++idx_group){
if(idx_leaf == nb_leaf)
if(idx_leaf >= nb_leaf)
break;
// get the current block
auto* container = tree.getParticleGroup(idx_group);
// get first leaf in this interval
while( container->getStartingIndex() > leaf_needed[idx_leaf] && idx_leaf < nb_leaf ){
while( idx_leaf < nb_leaf && container->getStartingIndex() > leaf_needed[idx_leaf]){
++idx_leaf;
}
if(idx_leaf == nb_leaf)
if(idx_leaf >= nb_leaf)
break;
while( container->getEndingIndex() < leaf_needed[idx_leaf] &&
idx_leaf < nb_leaf){
......@@ -921,12 +926,10 @@ std::vector<MortonIndex> get_nb_block_from_leaf(GroupOctreeClass& tree,
break;
}
// while the morton index of the current node is not high
while(node_needed[idx_node] < containers->getStartingIndex() &&
idx_node < nb_node){
while(idx_node < nb_node && node_needed[idx_node] < containers->getStartingIndex()){
++idx_node;
}
while(node_needed[idx_node] < containers->getEndingIndex() &&
idx_node < nb_node){
while(idx_node < nb_node && node_needed[idx_node] < containers->getEndingIndex()){
if(containers->isInside(node_needed[idx_node])){
block_to_send[idx_vect] = idx_group;
++idx_vect;
......@@ -959,7 +962,8 @@ std::vector<MortonIndex> get_nb_block_from_leaf(GroupOctreeClass& tree,
template<class GroupOctreeClass>
void send_get_number_of_block_node_level(
std::vector<MortonIndex>& vect_recv,
std::vector<std::size_t>& global_matrix_interaction,
std::vector<std::vector<std::size_t>> global_matrix_interaction,
//std::vector<std::size_t>& global_matrix_interaction,
std::size_t& nb_msg_recv,
GroupOctreeClass& tree,
std::vector<std::pair<int,int>>& nb_block_to_receiv,
......@@ -968,10 +972,6 @@ std::vector<MortonIndex> get_nb_block_from_leaf(GroupOctreeClass& tree,
const inria::mpi_config& conf
)
{
// Get MPI Info
const unsigned nb_proc = conf.comm.size();
const unsigned my_rank = conf.comm.rank();
int idx_status = 0;
int idx_proc = 0;
inria::mpi::request tab_mpi_status[nb_msg_recv];
......@@ -979,12 +979,10 @@ std::vector<MortonIndex> get_nb_block_from_leaf(GroupOctreeClass& tree,
std::vector<bool> block_already_send(tree.getNbCellGroupAtLevel(level),false);
// Post the number reception of the number of block
for(unsigned i = (my_rank*nb_proc);
i < (my_rank*nb_proc)+nb_proc;
++i)
for(unsigned i = 0; i < global_matrix_interaction[0].size() ; ++i)
{
// If we have interaction with this proc
if(global_matrix_interaction[i] != 0){
if(global_matrix_interaction[0][i] != 0){
nb_block_to_receiv[idx_status].first = idx_proc;
tab_mpi_status[idx_status] = conf.comm.irecv(
&nb_block_to_receiv[idx_status].second,
......@@ -1000,20 +998,20 @@ std::vector<MortonIndex> get_nb_block_from_leaf(GroupOctreeClass& tree,
std::size_t idx_vect = 0;
idx_status = 0;
// Posting every send message
for(unsigned i = my_rank; i < global_matrix_interaction.size() ; i+= nb_proc ){
for(unsigned i = 0; i < global_matrix_interaction[1].size() ; ++i){
// If we have interaction with this proc
if(global_matrix_interaction[i] != 0){
if(global_matrix_interaction[1][i] != 0){
// Compute the number of leaf
if(leaf_level){
leaf_to_send[idx_status].second = get_nb_block_from_leaf(
tree,
&vect_recv.data()[idx_vect],
global_matrix_interaction[i]);
global_matrix_interaction[1][i]);
} else {
leaf_to_send[idx_status].second = get_nb_block_from_node(
tree,
&vect_recv.data()[idx_vect],
global_matrix_interaction[i],
global_matrix_interaction[1][i],
level,
&block_already_send);
}
......@@ -1026,7 +1024,7 @@ std::vector<MortonIndex> get_nb_block_from_leaf(GroupOctreeClass& tree,
MPI_INT,
idx_proc,1
);
idx_vect += global_matrix_interaction[i];
idx_vect += global_matrix_interaction[1][i];
idx_status += 1;
}
idx_proc += 1;
......@@ -1050,15 +1048,12 @@ std::vector<MortonIndex> get_nb_block_from_leaf(GroupOctreeClass& tree,
*/
std::vector<MortonIndex> send_get_leaf_morton(
std::vector<MortonIndex>& needed_leaf,
std::vector<std::size_t>& global_matrix_interaction,
std::vector<std::vector<std::size_t>>& global_matrix_interaction,
//std::vector<std::size_t>& global_matrix_interaction,
std::size_t& nb_msg_recv,
std::size_t& nb_leaf_recv,
const inria::mpi_config& conf)
{
// Get MPI Info
const unsigned nb_proc = conf.comm.size();
const unsigned my_rank = conf.comm.rank();
// allocate tab of mpi status for synchronisazion
std::vector<inria::mpi::request> tab_mpi_status(nb_msg_recv);
......@@ -1070,15 +1065,15 @@ std::vector<MortonIndex> send_get_leaf_morton(
unsigned idx_vect = 0;
int idx_proc = 0;
// Posting every recv message
for(unsigned i = my_rank; i < global_matrix_interaction.size() ; i+= nb_proc ){
if(global_matrix_interaction[i] != 0){
for(unsigned i = 0; i < global_matrix_interaction[1].size() ; ++i ){
if(global_matrix_interaction[1][i] != 0){
irecv_splited(
conf,
&tab_mpi_status,
&idx_status,
&vect_recv,
&idx_vect,
global_matrix_interaction[i],
global_matrix_interaction[1][i],
idx_proc,1
);
}
......@@ -1088,16 +1083,13 @@ std::vector<MortonIndex> send_get_leaf_morton(
// Posting every send message
idx_proc = 0;
idx_vect = 0;
for(unsigned i = my_rank*nb_proc;
i < (my_rank*nb_proc)+nb_proc;
++i)
{
if(global_matrix_interaction[i] != 0){
for(unsigned i = 0; i < global_matrix_interaction[0].size() ; ++i){
if(global_matrix_interaction[0][i] != 0){
isend_splited(
conf,
&needed_leaf,
&idx_vect,
global_matrix_interaction[i],
global_matrix_interaction[0][i],
idx_proc,1
);
}
......@@ -1346,36 +1338,39 @@ void compute_block_node_level(
/**
* this function return a vector with the symbolic information of the needed
* block to build a LET Group tree
*
* @author benjamin.dufoyer@inria.fr
* @param needed_leaf list of the leaf needed
* @param global_matrix_interaction matrix of interaction
* @param tree local group tree
* @param conf MPI conf
*/
template<class GroupOctreeClass>
std::vector<std::vector<block_t>> send_get_symbolic_block_at_level(
std::vector<MortonIndex>& needed_leaf,
std::vector<size_t>& global_matrix_interaction,
std::vector<std::vector<size_t>>& matrix_interaction,
GroupOctreeClass& tree,
int level,
const inria::mpi_config& conf
) {
// Get MPI Info
const unsigned nb_proc = conf.comm.size();
const unsigned my_rank = conf.comm.rank();
// Compute the number of sended message and recev message
){
std::size_t nb_msg_recv = 0;
std::size_t nb_leaf_recv = 0;
std::size_t nb_msg_send = 0;
for(unsigned i = my_rank; i < global_matrix_interaction.size() ; i+= nb_proc ){
if(global_matrix_interaction[i] != 0){
nb_msg_recv += 1;
nb_leaf_recv += global_matrix_interaction[i];
}
// Getting the number of sended message
for(unsigned i = 0 ; i < matrix_interaction[0].size() ; ++i){
if(matrix_interaction[0][i] > 0 )
++nb_msg_send;
}
for(unsigned i = (my_rank*nb_proc); i < ((my_rank*nb_proc)+nb_proc); ++i){
if(global_matrix_interaction[i] != 0){
nb_msg_send += 1;
// Getting the number of recv message and the number of leaf
for(unsigned i = 0 ; i < matrix_interaction[1].size() ; ++i){
if(matrix_interaction[1][i] > 0){
++nb_msg_recv;
nb_leaf_recv += matrix_interaction[1][i];
}
}
......@@ -1386,21 +1381,24 @@ std::vector<std::vector<block_t>> send_get_symbolic_block_at_level(
std::vector<MortonIndex> vect_recv =
send_get_leaf_morton(
needed_leaf,
global_matrix_interaction,
matrix_interaction,
nb_msg_recv,
nb_leaf_recv,
conf);
////////////////////////////////////////////////////////////
// SECOND STEP
// Compute the block to send to other proc
// And send the number of block sended
////////////////////////////////////////////////////////////
// Init variable to stock
std::vector<std::pair<int,int>> nb_block_to_receiv(nb_msg_send);
std::vector<std::pair<int,std::vector<MortonIndex>>>
list_of_block_to_send(nb_msg_recv);
// Second step
// Compute the block to send to other proc
// And send the number of block sended
send_get_number_of_block_node_level(
vect_recv,
global_matrix_interaction,
matrix_interaction,
nb_msg_send,
tree,
nb_block_to_receiv,
......@@ -1408,7 +1406,10 @@ std::vector<std::vector<block_t>> send_get_symbolic_block_at_level(
level,
conf);
////////////////////////////////////////////////////////////
/// THIRD STEP
/// Getting the list of leaf needed by every proc
////////////////////////////////////////////////////////////
return exchange_block(
nb_block_to_receiv,
list_of_block_to_send,
......@@ -1463,10 +1464,6 @@ void add_let_leaf_block_to_tree(
tree.add_LET_block(leaf_block_to_add,level,local_min_m_idx);
}
}
......
......@@ -328,7 +328,6 @@ public:
tree,
dim);
std::vector<MortonIndex> needed_leaf;
if(leaf_level){
// this function return the concatenation of the leaf for the P2P and
......@@ -342,7 +341,7 @@ public:
);
}
std::vector<size_t> global_matrix_interaction = dstr_grp_tree_builder::get_matrix_interaction(
std::vector<std::vector<size_t>> global_matrix_interaction = dstr_grp_tree_builder::get_matrix_interaction(
needed_leaf,
index_particle_distribution,
this->mpi_conf);
......@@ -358,6 +357,7 @@ public:
tree,
level,
this->mpi_conf);
// Add the block recev to the local group tree
dstr_grp_tree_builder::add_let_leaf_block_to_tree(
tree,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment