Commit 1881c45a authored by DUFOYER Benjamin's avatar DUFOYER Benjamin
Browse files

fix bug where getting M2L interaction + optimization get_nb_block

parent 2c8bc5b6
......@@ -34,7 +34,7 @@
#error "what is happening here?"
#endif
#define MAX_SIZE_MPI_MESSAGE 2000000
#define MAX_SIZE_MPI_MESSAGE 4000000
namespace dstr_grp_tree_builder{
......@@ -475,6 +475,7 @@ void share_particle_division(
/**
* IDEA Factoriser la fin avec la fonction pour le M2L IDEA
* This function compute the morton index of every leaf needed for the P2P
* First we compute every morton index needed for every leaf
* We sort the result
......@@ -571,10 +572,9 @@ std::vector<MortonIndex> get_leaf_P2P_interaction(
}
/**
* IDEA on peut factoriser le post traitement de du P2P avec celui qui est fait
* ici IDEA
* ici IDEA
*
* This function compute the leaf needed for the M2L operation
* We take every leaf of the tree, get her parent, get the neigbors of
......@@ -608,63 +608,35 @@ std::vector<MortonIndex> get_leaf_M2L_interaction_at_level(
auto* containers = tree.getCellGroup(level,idxGroup);
MortonIndex curr_m_idx;
// +1 is to pass the test at the first try
MortonIndex last_m_idx = (containers->getEndingIndex() >> dim)+1;
for(int leafIdx = 0;
leafIdx < containers->getNumberOfCellsInBlock();
++leafIdx){
// Getting the current morton index
curr_m_idx = containers->getCellMortonIndex(leafIdx);
// Compute the morton index of the father
curr_m_idx = curr_m_idx >> dim;
// If it's a new father
if(curr_m_idx != last_m_idx){
last_m_idx = curr_m_idx;
// Compute coordinate
MortonIndex interactionsIndexes[216];
int interactionsPosition[216];
FTreeCoordinate coord(curr_m_idx);
// Getting neigbors of the father
int counter = coord.getInteractionNeighbors(level,interactionsIndexes,interactionsPosition);
for(int idxNeighbor = 0 ; idxNeighbor < counter ; ++idxNeighbor){
if( idxNeighbor >= global_min_m_idx
&& idxNeighbor <= global_max_m_idx)
{
if(idxNeighbor < local_min_m_idx ||
idxNeighbor > local_max_m_idx){
//Stock the leaf
if(idx_vector > external_leaf.size()){
std::cout << "ERROR " << std::endl;
}
external_leaf[idx_vector] = idxNeighbor;
++idx_vector;
// Compute coordinate
MortonIndex interactionsIndexes[216];
int interactionsPosition[216];
FTreeCoordinate coord(curr_m_idx);
// Getting neigbors of the father
int counter = coord.getInteractionNeighbors(level,interactionsIndexes,interactionsPosition);
for(int idxNeighbor = 0 ; idxNeighbor < counter ; ++idxNeighbor){
MortonIndex tmp = interactionsIndexes[idxNeighbor];
if( tmp >= global_min_m_idx
&& tmp <= global_max_m_idx)
{
if(tmp < local_min_m_idx ||
tmp > local_max_m_idx){
//Stock the leaf
if(idx_vector > external_leaf.size()){
std::cout << "ERROR " << std::endl;
}
external_leaf[idx_vector] = tmp;
++idx_vector;
}
/*
// Compute the first and the last child
MortonIndex first_child =
interactionsIndexes[idxNeighbor] << dim;
//std::cout << interactionsIndexes[idxNeighbor] << std::endl;
MortonIndex last_child =
((interactionsIndexes[idxNeighbor]+1) << dim)-1;
// Add child if needed
for(MortonIndex idx_Child = first_child ; idx_Child <= last_child ; ++idx_Child){
if( idx_Child >= global_min_m_idx
&& idx_Child <= global_max_m_idx)
{
if(idx_Child < local_min_m_idx ||
idx_Child > local_max_m_idx){
//Stock the leaf
if(idx_vector > external_leaf.size()){
std::cout << "ERROR " << std::endl;
}
external_leaf[idx_vector] = idx_Child;
++idx_vector;
}
}
} // end for child*/
} // end for neigbors
} // end if
}
} // end for neigbors
} // end for leaf
} // end for group
// if we have leaf in the vector
......@@ -672,6 +644,7 @@ std::vector<MortonIndex> get_leaf_M2L_interaction_at_level(
//sort the result
FQuickSort<MortonIndex>::QsSequential(external_leaf.data(),idx_vector);
// compute the number of leaf
std::size_t nb_leaf = 1;
MortonIndex last_leaf = external_leaf[0];
for(unsigned i = 1 ; i < idx_vector ; ++i){
......@@ -705,7 +678,6 @@ std::vector<MortonIndex> get_leaf_M2L_interaction_at_level(
}
/**
* The goal of this function is to concat the two vector
* and erase duplicate data
......@@ -853,52 +825,53 @@ std::vector<std::size_t> get_matrix_interaction(
return {begin(global_matrix_interaction),end(global_matrix_interaction)};
}
/**
* This function compute the number of block needed to send all leaf
* stock in leaf_needed.
* This function return a vector with all idx of block needed by the proc
* @author benjamin.dufoyer@inria.fr
* @param tree GroupTree
* @param leaf_needed Vector where leaf are stock
* @return Vector with all block idx
*/
template<class GroupOctreeClass>
std::vector<MortonIndex> get_nb_block_from_leaf(GroupOctreeClass& tree,
/**
* This function compute the number of block needed to send all leaf
* stock in leaf_needed.
* This function return a vector with all idx of block needed by the proc
* @author benjamin.dufoyer@inria.fr
* @param tree GroupTree
* @param leaf_needed Vector where leaf are stock
* @return Vector with all block idx
*/
template<class GroupOctreeClass>
std::vector<MortonIndex> get_nb_block_from_leaf(GroupOctreeClass& tree,
MortonIndex* leaf_needed,
std::size_t nb_leaf)
{
std::vector<MortonIndex> block_to_send(nb_leaf,0);
int idx_vect = 0 ;
// iterate of every leaf
for(unsigned i = 0 ; i < nb_leaf; ++i){
bool leaf_ok = false;
// iteracte of every block of particule
for(unsigned idxGroup = 0 ; idxGroup < (unsigned)tree.getNbParticleGroup() ; ++idxGroup){
if(leaf_ok)
break;
auto* containers = tree.getParticleGroup(idxGroup);
for(unsigned idx_leaf_block = 0; idx_leaf_block < (unsigned)containers->getNumberOfLeavesInBlock() ; ++idx_leaf_block){
if(leaf_ok)
break;
MortonIndex curr_m_idx = containers->getLeafMortonIndex(idx_leaf_block);
if(leaf_needed[i] == (unsigned)curr_m_idx){
if(idx_vect == 0){
block_to_send[idx_vect] = idxGroup;
++idx_vect;
leaf_ok = true;
} else if(block_to_send[idx_vect-1] != idxGroup){
block_to_send[idx_vect] = idxGroup;
++idx_vect;
leaf_ok = true;
}
}
}
}
}
return {block_to_send.begin(),block_to_send.begin()+idx_vect};
}
{
std::vector<MortonIndex> block_to_send(tree.getNbParticleGroup(),0);
// declaration of idx varaibles
unsigned idx_vector = 0;
unsigned idx_leaf = 0;
// iterate on every group
for(int idx_group = 0 ; idx_group < tree.getNbParticleGroup() ; ++idx_group){
if(idx_leaf == nb_leaf)
break;
// get the current block
auto* container = tree.getParticleGroup(idx_group);
// get first leaf in this interval
while( container->getStartingIndex() > leaf_needed[idx_leaf] && idx_leaf < nb_leaf ){
++idx_leaf;
}
if(idx_leaf == nb_leaf)
break;
while( container->getEndingIndex() < leaf_needed[idx_leaf] &&
idx_leaf < nb_leaf){
// if the leaf exist, keep the leaf
if(container->exists(leaf_needed[idx_leaf])){
block_to_send[idx_vector] = idx_group;
++idx_vector;
++idx_leaf;
break;
}
++idx_leaf;
}
if(idx_leaf == nb_leaf)
break;
}
return {block_to_send.begin(),block_to_send.begin()+idx_vector};
}
/*
template<class GroupOctreeClass>
std::vector<MortonIndex> get_nb_block_from_node(GroupOctreeClass& tree,
MortonIndex* node_needed,
......@@ -924,9 +897,65 @@ std::vector<std::size_t> get_matrix_interaction(
}
}
return {block_to_send.begin(),block_to_send.begin()+idx_vect};
}
}*/
template<class GroupOctreeClass>
std::vector<MortonIndex> get_nb_block_from_node(GroupOctreeClass& tree,
MortonIndex* node_needed,
std::size_t nb_node,
int level,
std::vector<bool>* block_already_send)
{
int idx_vect = 0 ;
std::vector<int> block_to_send(tree.getNbCellGroupAtLevel(level),0);
unsigned idx_node = 0;
// iterate on every group
for(unsigned idx_group = 0; idx_group < (unsigned)tree.getNbCellGroupAtLevel(level) ;++idx_group){
// if the current block hasnt been already send
if(!block_already_send->at(idx_group)){
auto* containers = tree.getCellGroup(level,idx_group);
// check if we have check every node
if(idx_node == nb_node){
break;
}
// while the morton index of the current node is not high
while(node_needed[idx_node] < containers->getStartingIndex() &&
idx_node < nb_node){
++idx_node;
}
while(node_needed[idx_node] < containers->getEndingIndex() &&
idx_node < nb_node){
if(containers->isInside(node_needed[idx_node])){
block_to_send[idx_vect] = idx_group;
++idx_vect;
++idx_node;
break;
}
++idx_node;
}
if(idx_node == nb_node){
break;
}
}
}
return {block_to_send.begin(),block_to_send.begin()+idx_vect};
}
/*
* In this function know the block needed by every proc
* So we need need to compute the parents of every block and send the
* number of parent's block to every proc so we have
*
* 1st Step : Compute the parents block
* 2nd Step : Send and recev the number of parents block per level
* @author benjamin.dufoyer@inria.fr
* @param nb_block_to_receiv number of block to receiv by every proc per level
* @param list_of_block_to_send list of block to send for every proc per level
* @param tree local group octree
* @param conf MPI CONF
*/
template<class GroupOctreeClass>
void send_get_number_of_block_node_level(
std::vector<MortonIndex>& vect_recv,
......@@ -949,7 +978,6 @@ std::vector<std::size_t> get_matrix_interaction(
bool leaf_level = (tree.getHeight()-1 == level);
std::vector<bool> block_already_send(tree.getNbCellGroupAtLevel(level),false);
// Post the number reception of the number of block
for(unsigned i = (my_rank*nb_proc);
i < (my_rank*nb_proc)+nb_proc;
......@@ -968,7 +996,6 @@ std::vector<std::size_t> get_matrix_interaction(
}
idx_proc += 1;
}
idx_proc = 0;
std::size_t idx_vect = 0;
idx_status = 0;
......@@ -1318,99 +1345,7 @@ void compute_block_node_level(
}
/*
* In this function know the block needed by every proc
* So we need need to compute the parents of every block and send the
* number of parent's block to every proc so we have
*
* 1st Step : Compute the parents block
* 2nd Step : Send and recev the number of parents block per level
* @author benjamin.dufoyer@inria.fr
* @param nb_block_to_receiv number of block to receiv by every proc per level
* @param list_of_block_to_send list of block to send for every proc per level
* @param tree local group octree
* @param conf MPI CONF
*/
template<class GroupOctreeClass>
void send_get_number_of_block_node_level(
std::vector<std::vector<std::pair<int,int>>>& nb_block_to_receiv,
std::vector<std::vector<std::pair<int,std::vector<std::size_t>>>>& list_of_block_to_send,
GroupOctreeClass& tree,
const inria::mpi_config& conf
)
{
////////////////////////////////////////////////////////
// First Step : compute the number of parents per level
// In this step, we compute the number parents and sotck
// the idx
////////////////////////////////////////////////////////
// in the vector the level and the idex are inverse
// index 0 of the vector is the level tree.height-1 --> leaf level
// index 1 of the vector is the level tree.height-2 ..
int current_level = tree.getHeight()-2;
for(int i = 1; i < tree.getHeight()-2 ; ++i){
compute_block_node_level(list_of_block_to_send[i-1],
list_of_block_to_send[i],
current_level,
tree);
--current_level;
}
//////////////////////////////////////////////////////////////////////
// Second Step : Send and recev the number of parents block per level
//////////////////////////////////////////////////////////////////////
// computing the number of message
int nb_message_to_receiv = 0;
// we start at 1 because 0 is the leaf level
for(unsigned i = 1 ; i < nb_block_to_receiv.size() ; ++i){
for(unsigned j = 0 ; j < nb_block_to_receiv[i].size(); ++j){
if(nb_block_to_receiv[i].size() != nb_block_to_receiv[0].size() ){
nb_block_to_receiv[i].resize(nb_block_to_receiv[0].size());
}
nb_message_to_receiv += 1;
}
}
// Posting every irecv
inria::mpi::request tab_mpi_status[nb_message_to_receiv];
int idx_tab_req = 0;
for(unsigned i = 1 ; i < nb_block_to_receiv.size() ; ++i ){
// Post the irecv
for(unsigned j = 0 ; j < nb_block_to_receiv[0].size() ; ++j){
nb_block_to_receiv[i][j].first = nb_block_to_receiv[0][j].first;
tab_mpi_status[idx_tab_req] =
conf.comm.irecv(
&nb_block_to_receiv[i][j].second,
1,
MPI_INT,
nb_block_to_receiv[i][j].first,1);
idx_tab_req++;
}
}
// Allocate buffer
std::vector<int> buffer(nb_message_to_receiv);
int idx_buffer = 0;
// Posting every isend
for(unsigned i = 1 ; i < list_of_block_to_send.size() ; ++i ){
for(unsigned j = 0 ; j < list_of_block_to_send[i].size() ; ++j ){
// TODO Verifier l'indice
buffer[idx_buffer] =
(int)list_of_block_to_send[i][j].second.size();
conf.comm.isend(
&buffer.data()[idx_buffer],
1,
MPI_INT,
list_of_block_to_send[i][j].first,1);
idx_buffer++;
}
}
// Wait for all MPI Request
inria::mpi::request::waitall(nb_message_to_receiv,tab_mpi_status);
}
template<class GroupOctreeClass>
......@@ -1474,7 +1409,6 @@ std::vector<std::vector<block_t>> send_get_symbolic_block_at_level(
conf);
return exchange_block(
nb_block_to_receiv,
list_of_block_to_send,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment