Commit d370006e authored by COULAUD Olivier's avatar COULAUD Olivier
Browse files

start building the let

parent 126c806c
......@@ -752,11 +752,159 @@ namespace scalfmm::tree
}
#endif
}
template<typename OctreeTree, typename VectorMortonIdx, typename MortonDistribution>
inline VectorMortonIdx get_p2p_interaction(parallel_manager& para, OctreeTree& tree,
const VectorMortonIdx& local_morton_idx,
const MortonDistribution& leaves_distrib)
{
VectorMortonIdx outsideIndex_to_add();
return outsideIndex_to_add;
}
template<typename OctreeTree, typename VectorMortonIdx, typename MortonDistribution>
inline VectorMortonIdx get_m2l_interaction_at_level(parallel_manager& para, OctreeTree& tree,
const VectorMortonIdx& local_morton_idx,
const MortonDistribution& cell_distrib, const int& level)
{
VectorMortonIdx outsideIndex_to_add();
return outsideIndex_to_add;
}
template<typename OctreeTree, typename VectorMortonIdx>
///
/// \brief merge two sorted vectors
///
/// Elements appear only once
///
/// \param[in] v1 first vector to merge
/// \param[in] v2 vector to merge
///
/// \return the merged vector
/// to the first vector
///
inline VectorMortonIdx merge_unique(VectorMortonIdx& v1, const VectorMortonIdx& v2)
{
VectorMortonIdx dst;
std::merge(v1.begin(), v1.end(), v2.begin(), v2.end(), std::back_inserter(dst));
auto last = std::unique(dst.begin(), dst.end());
dst.erase(last, dst.end());
return dst;
}
} // namespace distrib
namespace let
{
///
/// \brief construct the local essential tree (LET) at the level.
///
/// We start from a given Morton index distribution and we compute all
/// interactions needed
/// in the algorithm steps.
/// At the leaf level it corresponts to the interactions omming from the
/// direct pass (P2P operators)
/// and in the transfer pass (M2L operator). For the other levels we
/// consider only the M2L interactions.
/// The leaves_distrib and the cells_distrib might be different
/// At the end the let has also all the interaction list computed
///
/// \param[inout] tree the tree to compute the let.
/// \param[in] local_morton_idx the morton index of the particles in the
/// processors.
///
/// \param[in] leaves_distrib the morton index distribution for the
/// particles/leaves.
///
/// \param[in] cells_distrib the morton index distribution for
/// the cells at the leaf level.
///
/// \param[in] level the level to construct the let
///
template<typename OctreeTree, typename VectorMortonIdx, typename MortonDistribution>
void build_let_at_level(parallel_manager& para, OctreeTree& tree, const VectorMortonIdx& local_morton_idx,
const MortonDistribution& leaves_distrib, const MortonDistribution& cells_distrib,
int& level)
{
// stock in the variable if we are at the leaf level
bool leaf_level = (tree.leaf_level() == level);
// update the morton index
// if(!leaf_level)
// {
// gmin = gmin >> 3;
// gmax = gmax >> 3;
// }
// const MortonIndex global_min_m_idx = gmin;
// const MortonIndex global_max_m_idx = gmax;
// // Compute min and max local morton index at the level
// needed if(this->getNbCellGroupAtLevel(level) > 0)
// {
// lmin = this->getCellGroup(level, 0)->getStartingIndex();
// lmax = this->getCellGroup(level,
// this->getNbCellGroupAtLevel(level) -
// 1)->getEndingIndex()
// - 1;
// }
// else
// {
// lmin = -1;
// lmax = -1;
// }
// const MortonIndex local_min_m_idx = lmin;
// const MortonIndex local_max_m_idx = lmax;
// declare variable, needed because we fill it in a if case
VectorMortonIdx needed_idx;
if(leaf_level)
{
// we compute the cells needed in the M2L operators
auto m2l_idx = std::move(
distrib::get_m2l_interaction_at_level(para, tree, local_morton_idx, cells_distrib, leaf_level));
// we compute the cells needed in the P2P operators
auto p2p_idx = std::move(get_p2p_interaction(para, tree, local_morton_idx, leaves_distrib));
// wemerge the two contributions
needed_idx = distrib::merge_unique(m2l_idx, p2p_idx);
}
else
{
// we compute the cells needed in the M2L operators
needed_idx =
std::move(distrib::get_m2l_interaction_at_level(para, tree, local_morton_idx, cells_distrib, level));
}
#ifdef TT
std::vector<std::pair<MortonIndex, MortonIndex>> index_particle_distribution =
group_linear_tree.get_index_particle_distribution();
// Get the interaction matrix
// matrix[2][nproc]
// first line for Morton index to Send
// second line for Morton index to Recv
std::vector<std::vector<size_t>> global_matrix_interaction = dstr_grp_tree_builder::get_matrix_interaction(
needed_leaf, index_particle_distribution, group_linear_tree.get_mpi_conf());
// Send and get leaf
// Auto is used to get the block more easly
// it's a std::pair<std::vector<cell_symbolic_block>,std::vector<particle_symbolic_block>>
// block_t is a struct define on FDistributedGroupTreeBuilder.hpp
auto let_block = dstr_grp_tree_builder::send_get_symbolic_block_at_level(
needed_leaf, global_matrix_interaction, *this, level, group_linear_tree.get_mpi_conf());
// free needed leaf
std::vector<MortonIndex>().swap(needed_leaf);
// free interaction matrix
std::vector<std::vector<size_t>>().swap(global_matrix_interaction);
// add the LET block to the tree
tree->add_LET_block(let_block, level);
#endif
tree.insert_component_at_level(level, needed_idx);
}
//
// @param[in] mpi_comm the MPI communicator
// @param[in] manager the parallel manager
// @param[inout] myParticleslocal array of particles on my node. On output the
// array is sorted
// @param[in] number_of_particles total number of particles in the simulation
......@@ -856,6 +1004,7 @@ namespace scalfmm::tree
leafMortonIdx.resize(particle_container.size());
// As the particles are sorted the leafMortonIdx is sorted too
/// leafMortonIdx is a linear tree
#pragma omp parallel for shared(localNumberOfParticles, box, leaf_level)
for(std::size_t part = 0; part < particle_container.size(); ++part)
{
......@@ -866,12 +1015,7 @@ namespace scalfmm::tree
// << std::endl;
}
out::print("rank(" + std::to_string(rank) + ") leafMortonIdx: ", leafMortonIdx);
///
/// Construct the let according to the distributions particles and cells
///
if(manager.get_num_processes() > 1)
{
}
///
/// construct the local tree based on the let
///
......@@ -884,70 +1028,13 @@ namespace scalfmm::tree
}
localGroupTree = new Tree_type(static_cast<std::size_t>(leaf_level + 1), order, box,
static_cast<std::size_t>(groupSizeCells), container);
/// return the tree
///
#ifdef TT
// But we have lost the link between particles and the distribution
// create the linear tree
// a linear tree is a tree, with only the leaf
// auto linear_tree =
// scalfmm::tree::linear::create_balanced_linear_tree_at_level(manager.get_communicator(),
// tuple_of_indexes);
// inria::linear_tree::create_balanced_linear_tree_at_leaf(manager.get_communicator(),
// level, box,
// tuple_of_indexes);
// Now we have a balanced tree (leaf per processor) according to the morton index
// std::cout << "Linear tree \n";
// if(manager.io_master())
// {
// for(auto& e: linear_tree)
// {
// std::cout << e[0] << " " << e[1] <<
// std::endl;
// }
// }
// auto start = linear_tree.front()[0], end = linear_tree.back()[0];
// std::cout << "Local size " << linear_tree.size() << " " << start << " " << end << " "
// << tuple_of_indexes.front()[0] << " " << tuple_of_indexes.back()[0] <<
// std::endl;
// // Now we have to redistribute the particles
//
// int nbPart = 0;
// for(std::size_t part = 0; part < particle_container.number_elements(); ++part)
// {
// auto ind = scalfmm::index::get_morton_index(particle_container.position(part), box,
// level); if(manager.io_master())
// {
// std::cout << " start " << ind << " ind " << start << " end " << end << std::endl;
// }
// if(ind >= start && ind <= end)
// {
// ++nbPart;
// }
// }
// std::cout << "Number of particles: " << nbPart << std::endl;
///
/// The particles are distributed so we can construct the octree as in sequential case
///
/// ToDO
// // create GroupLinearTree
// scalfmm::tree::linear::FGroupLinearTree<typename decltype(linear_tree)::value_type,
// Index_type>
// group_linear_tree{manager.get_communicator()};
// group_linear_tree.create_local_group_linear_tree(&linear_tree, groupSize);
// std::cout << "%%%%%%%%%% " << group_linear_tree << std::endl;
// m_idx_distribution = group_linear_tree.get_index_particle_distribution_implicit();
/// Now we construct the Let by adding the cells used in the different operators (P2P, M2M, M2L, L2L)
///
/// TO DO
/// Construct the let according to the distributions particles and cells
///
/// The Local essential tree (Let) is build
//
#endif
if(manager.get_num_processes() > 1)
{
build_let_at_level(localGroupTree, leafMortonIdx, particles_distrib, cells_distrib, leaf_level)
}
}
} // namespace let
......
......@@ -385,7 +385,23 @@ namespace scalfmm::component
--cell_level_it;
}
}
///
/// \brief insert component associated to morton index at level in the tree
///
///
/// \param component_morton_index to add
/// \param level level in the tree to add component associated to morton
/// indes
///
template<typename VectorMortonIdx>
inline auto insert_component_at_level(const int& level, const VectorMortonIdx& component_morton_index)
{
std::cerr << " insert_component_at_level not yet implemented" << std::endl;
}
///
/// \brief trace
/// \param level_trace
///
inline auto trace(std::size_t level_trace = 0) -> void
{
auto level_0 = []() {};
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment