Commit 3e969814 authored by COULAUD Olivier's avatar COULAUD Olivier
Browse files

Remove some outputs

parent 47ad9e4f
......@@ -33,7 +33,7 @@ auto main([[maybe_unused]] int argc, [[maybe_unused]] char* argv[]) -> int
{
parallel_manager para;
para.init();
#ifndef LET
constexpr int dimension = 2;
constexpr int nb_inputs_near = 1;
constexpr int nb_outputs_near = 1;
......@@ -143,11 +143,10 @@ auto main([[maybe_unused]] int argc, [[maybe_unused]] char* argv[]) -> int
scalfmm::tree::let::buildLetTree(para, number_of_particles, particles_set, box, leaf_level, group_size, group_size,
letGroupTree, mortonCellDistribution, nb_block, order);
// scalfmm::tree::print("rank(" + std::to_string(rank) + ") data distrib:
// ", mortonCellDistribution);
#ifdef SCALFMM_USE_MPI
para.get_communicator().barrier();
std::this_thread::sleep_for(std::chrono::seconds(5));
std::this_thread::sleep_for(std::chrono::seconds(2));
#endif
///
......@@ -155,94 +154,16 @@ auto main([[maybe_unused]] int argc, [[maybe_unused]] char* argv[]) -> int
///
///////////////////////////////////////////////////////////////////////////////////////////////////////
/// Save the data
// auto nbDataPerRecord = nb_val_to_red_per_part;
// scalfmm::tools::DistFmaGenericWriter<value_type> writer(output_file, para);
// std::cout << "number_of_particles " << number_of_particles << std::endl;
// writer.writeHeader(centre, width, number_of_particles, sizeof(value_type), nbDataPerRecord, dimension,
// loader.get_number_of_input_per_record());
///
/// writer.write( .... todo)
///
///////////////////////////////////////////////////////////////////////////////////////////////////////
#else
// MPI communicator
// auto world = inria::mpi::communicator::world();
inria::mpi_config conf(para.get_communicator());
const auto rank = conf.comm.rank();
const int N = 30;
// Input is an empty vector, no allocation is done
std::vector<int> input{};
int block = rank == para.get_num_processes() - 1 ? N - int(N / 3) * rank : int(N / 3);
input.resize(block);
std::cout << "block: " << block << std::endl;
std::iota(begin(input), end(input), block * rank);
std::cout << " proc " << conf.comm.rank() << " in (" << input.size() << "): ";
for(auto& v: input)
{
std::cout << v << " ";
}
std::cout << std::endl;
std::array<int, 3> proportions{100, 200, 300};
int total = std::accumulate(proportions.begin(), proportions.end(), 0);
int N_local = N;
// int(N * proportions[rank] / total + 1);
// All processes have a large enough buffer
std::vector<int> output;
output.resize(N_local);
int none_value = 0;
try
{
std::array<int, 3> proportions{100, 200, 300};
// Distribution object, keeps all even elements on 0, all odd elements on 1
struct dist
{
std::vector<int> mapping{15, 20, 30};
int operator()(const int& i)
{
for(int k = 0; k < mapping.size(); ++k)
{
if(i <= mapping[k])
{
return k;
}
}
}
};
inria::proportional_distribution<> distrib(conf, input, proportions);
// inria::distribute(conf, input, output, distrib);
inria::distribute(conf, input, output, dist{});
// inria::distribute(conf, begin(input), end(input), begin(output),
// end(output), distrib);
}
catch(std::out_of_range& e)
{
std::cerr << e.what() << '\n';
}
std::cout << " output.size() " << output.size() << std::endl;
for(int i = 1; i < output.size(); ++i)
{
N_local = i;
if(output[i] == none_value)
{
N_local = i;
break;
}
}
output.resize(N_local);
std::cout << " proc " << rank << " out (" << output.size() << "): ";
for(auto& v: output)
{
std::cout << v << " ";
}
std::cout << std::endl;
// auto nbDataPerRecord = nb_val_to_red_per_part;
// scalfmm::tools::DistFmaGenericWriter<value_type> writer(output_file, para);
// std::cout << "number_of_particles " << number_of_particles << std::endl;
// writer.writeHeader(centre, width, number_of_particles, sizeof(value_type), nbDataPerRecord, dimension,
// loader.get_number_of_input_per_record());
///
/// writer.write( .... todo)
///
///////////////////////////////////////////////////////////////////////////////////////////////////////
#endif
delete letGroupTree;
para.end();
......
......@@ -258,13 +258,24 @@ namespace scalfmm::container
meta::sub_tuple(*it, typename particle_type::range_variables_type{}) = p;
}
/// @brief TODO
///
/// @param i
/// @param p
/// \brief reset the outputs in the container
///
inline auto reset_outputs() -> void {}
inline auto reset_outputs() -> void
{
using value_type = typename Particle::outputs_value_type;
auto it = std::begin(*this);
for(std::size_t i{0}; i < this->size(); ++i)
{
auto proxy = proxy_type(*it);
for(std::size_t ii{0}; ii < proxy.sizeof_outputs(); ++ii)
{
proxy.outputs(ii) = value_type(0.0);
}
++it;
}
}
/// @brief
///
/// @return
......
......@@ -1004,7 +1004,7 @@ namespace scalfmm::tree
// {
// std::cout << p << " ";
// }
std::cout << std::endl;
// std::cout << std::endl;
for(int idx = 0; idx < cell_symbolics.number_of_neighbors; ++idx)
{
auto& p = index[idx];
......@@ -1127,7 +1127,6 @@ namespace scalfmm::tree
using mortonIdx_type = typename VectorMortonIdx::value_type;
auto nb_proc = para.get_num_processes();
auto rank = para.get_process_id();
std::cout << "check_if_morton_index_exist in progress \n";
std::vector<int> nb_messages_to_send(nb_proc, 0);
std::vector<int> nb_messages_to_receive(nb_proc, 0);
// beging index to send to process k
......@@ -1150,18 +1149,15 @@ namespace scalfmm::tree
}
}
start[rank] = start[rank + 1];
para.get_communicator().barrier();
// out::print("rank(" + std::to_string(rank) + ") local_idx : ", local_morton_idx);
// out::print("rank(" + std::to_string(rank) + ") start : ", start);
out::print("rank(" + std::to_string(rank) + ") needed_idx : ", needed_idx);
// out::print("rank(" + std::to_string(rank) + ") needed_idx : ", needed_idx);
// out::print("rank(" + std::to_string(rank) + ") Nb msg send : ", nb_messages_to_send);
// exchange the vector all to all to know which process send us a
// message
auto comm = para.get_communicator();
auto mpi_type = inria::mpi::get_datatype<int>();
comm.barrier();
std::cout << "================================================\n";
comm.alltoall(nb_messages_to_send.data(), 1, mpi_type, nb_messages_to_receive.data(), 1, mpi_type);
// out::print("rank(" + std::to_string(rank) + ") Nb msg receiv : ",
// nb_messages_to_receive);
......@@ -1210,7 +1206,7 @@ namespace scalfmm::tree
// {
// std::cout << elt[ki] << " ";
// }
std::cout << std::endl;
// std::cout << std::endl;
std::size_t start = 0;
for(int ki = 0; ki < nb_messages_to_receive[i]; ++ki)
{
......@@ -1231,7 +1227,7 @@ namespace scalfmm::tree
// {
// std::cout << elt[ki] << " ";
// }
std::cout << std::endl;
// std::cout << std::endl;
}
}
}
......@@ -1257,7 +1253,7 @@ namespace scalfmm::tree
comm.irecv(&(needed_idx[start[i]]), nb_messages_to_send[i], mpi_type, i, 300));
}
}
out::print("rank(" + std::to_string(rank) + ") needed_idx : ", needed_idx);
// out::print("rank(" + std::to_string(rank) + ") needed_idx : ", needed_idx);
for(std::size_t i = 0; i < nb_messages_to_receive.size(); ++i)
{
......@@ -1273,7 +1269,7 @@ namespace scalfmm::tree
last = last - 1;
}
needed_idx.erase(last, needed_idx.end());
out::print("rank(" + std::to_string(rank) + ") needed_idx : ", needed_idx);
// out::print("rank(" + std::to_string(rank) + ") needed_idx : ", needed_idx);
std::cout << scalfmm::colors::green << " --> End distrib::check_if_morton_index_exist "
<< scalfmm::colors::reset << std::endl;
}
......@@ -1318,51 +1314,49 @@ namespace scalfmm::tree
// stock in the variable if we are at the leaf level
bool leaf_level = (tree.leaf_level() == level);
VectorMortonIdx needed_idx;
if(leaf_level)
{
// we compute the cells needed in the M2L operators
auto m2l_idx = std::move(distrib::get_m2l_interaction_at_level(para, tree, local_morton_idx,
cells_distrib, tree.leaf_level()));
// we compute the cells needed in the P2P operators
auto p2p_idx = std::move(distrib::get_p2p_interaction(para, tree, local_morton_idx, leaves_distrib));
// wemerge the two contributions
// std::cout << "rank(" << my_rank << ") m2l_idx ";
// for(auto p: m2l_idx)
// {
// std::cout << p << " ";
// }
std::cout << std::endl;
std::cout << "rank(" << my_rank << ") p2p_idx ";
for(auto p: p2p_idx)
{
std::cout << p << " ";
}
std::cout << std::endl;
needed_idx = distrib::merge_unique(m2l_idx, p2p_idx);
}
else
{
// we compute the cells needed in the M2L operators
needed_idx =
std::move(distrib::get_m2l_interaction_at_level(para, tree, local_morton_idx, cells_distrib, level));
}
std::cout << "rank(" << my_rank << ") needed_idx ";
// we compute the cells needed in the M2L operators
auto needed_idx =
std::move(distrib::get_m2l_interaction_at_level(para, tree, local_morton_idx, cells_distrib, level));
std::cout << "rank(" << my_rank << ") needed_idx(m2l) ";
for(auto p: needed_idx)
{
std::cout << p << " ";
}
std::cout << std::endl;
/// Look if the morton index really exists in the distributed tree
para.get_communicator().barrier();
std::cout << " End group_let()\n";
distrib::check_if_morton_index_exist(para, needed_idx, cells_distrib, local_morton_idx);
// para.get_communicator().barrier();
///
tree.insert_component_at_level(level, needed_idx);
tree.insert_cells_at_level(level, needed_idx);
std::cout << scalfmm::colors::green << " --> End let::build_let_at_level() at level = " << level
<< scalfmm::colors::reset << std::endl;
}
template<typename OctreeTree, typename VectorMortonIdx, typename MortonDistribution>
void build_let_leaves(parallel_manager& para, OctreeTree& tree, const VectorMortonIdx& local_morton_idx,
const MortonDistribution& leaves_distrib, const MortonDistribution& cells_distrib)
{
std::cout << scalfmm::colors::green << " --> Begin let::build_let_leaves() " << scalfmm::colors::reset
<< std::endl;
auto my_rank = para.get_process_id();
// we compute the cells needed in the P2P operators
auto needed_idx = std::move(distrib::get_p2p_interaction(para, tree, local_morton_idx, leaves_distrib));
std::cout << "rank(" << my_rank << ") needed_idx(p2p) ";
for(auto p: needed_idx)
{
std::cout << p << " ";
}
std::cout << std::endl;
/// Look if the morton index really exists in the distributed tree
distrib::check_if_morton_index_exist(para, needed_idx, cells_distrib, local_morton_idx);
///
tree.insert_leaves(needed_idx);
std::cout << scalfmm::colors::green << " --> End let::build_let_leaves()" << scalfmm::colors::reset
<< std::endl;
}
//
// @param[in] manager the parallel manager
// @param[inout] myParticleslocal array of particles on my node. On output the
......@@ -1432,7 +1426,7 @@ namespace scalfmm::tree
/// Now i have all of my particles in a vector, they all have a morton index
///
////////////////////////////////////////////////////////////////////////////////////////////
//// Construct a uniform distribution for the cells at the leaves level
//// Construct a uniform distribution for the leaves/cells at the leaves level
///
/// A morton index should be own by only one process
///
......@@ -1440,30 +1434,40 @@ namespace scalfmm::tree
///
/// Build a uniform distribution of the leaves/cells
///
auto cells_distrib = std::move(scalfmm::tree::distrib::balanced_leaves(manager, leafMortonIdx));
auto leaves_distrib = std::move(scalfmm::tree::distrib::balanced_leaves(manager, leafMortonIdx));
//// End
////////////////////////////////////////////////////////////////////////////////////////////
///
////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////
//// Construct a uniform distribution for the leaves level (particles)
//// Construct a uniform distribution for the particles
/// On each process we have the same number of particles. The number of leaves might differ significally
///
/// A morton index should be own by only one process
///
auto particles_distrib = std::move(
scalfmm::tree::distrib::balanced_particles(manager, particle_container, tmp, number_of_particles));
/// Add the leaves/cells used in the algorithm + Let
distrib::fit_particles_in_distrib(manager, particle_container, tmp, cells_distrib, box, leaf_level,
number_of_particles);
/// Check the new distribution
//// End
////////////////////////////////////////////////////////////////////////////////////////////
///
out::print("rank(" + std::to_string(rank) + ") cells_distrib: ", cells_distrib);
/// Check the two distributions
///
out::print("rank(" + std::to_string(rank) + ") cells_distrib: ", leaves_distrib);
out::print("rank(" + std::to_string(rank) + ") particles_distrib: ", particles_distrib);
////////////////////////////////////////////////////////////////////////////////////////////
/// Set the particles on the good process according to the computed distribution
///
distrib::fit_particles_in_distrib(manager, particle_container, tmp, leaves_distrib, box, leaf_level,
number_of_particles);
////////////////////////////////////////////////////////////////////////////////////////////
/// Compute the new morton indexe associated to the particles
/// on the process
///
/// We considere now the leaves_distrib !!!!!
///
std::cout << scalfmm::colors::red;
out::print(" --> We consider leaves_distrib to construct the let: ", leaves_distrib);
std::cout << scalfmm::colors::reset << std::endl;
leafMortonIdx.resize(particle_container.size());
// As the particles are sorted the leafMortonIdx is sorted too
/// leafMortonIdx is a linear tree
......@@ -1472,37 +1476,63 @@ namespace scalfmm::tree
{
leafMortonIdx[part] =
scalfmm::index::get_morton_index(particle_container[part].position(), box, leaf_level);
// std::cout << rank << " " << part << " " << particle_container[part] << " " <<
// leafMortonIdx[part]
// << std::endl;
}
auto last = std::unique(leafMortonIdx.begin(), leafMortonIdx.end());
leafMortonIdx.erase(last, leafMortonIdx.end());
out::print("rank(" + std::to_string(rank) + ") leafMortonIdx: ", leafMortonIdx);
////////////////////////////////////////////////////////////////////////////////////////////
///
/// construct the local tree based on the let
/// Construct the local tree based on our set of particles
///
/// Put the particles inside the container or particles (for compatibility)
///
// just for compatibility
using container_type = scalfmm::container::particle_container<typename Vector_type::value_type>;
container_type container(particle_container.size());
for(std::size_t i = 0; i < particle_container.size(); ++i)
{
container.push_particle(i, particle_container[i]);
}
/// Set true because the particles are already sorted
/// In fact we have all the leaves to add in leafMortonIdx - could be used to construct
/// the tree !!!
///
localGroupTree = new Tree_type(static_cast<std::size_t>(leaf_level + 1), order, box,
static_cast<std::size_t>(groupSizeCells), container);
static_cast<std::size_t>(groupSizeCells), container, true);
///
/// Construct the let according to the distributions particles and
/// cells
////////////////////////////////////////////////////////////////////////////////////////////
///
if(manager.get_num_processes() > 1)
{
using distrib_type = decltype(cells_distrib);
////////////////////////////////////////////////////////////////////////////////////////////
/// Construct the let according to the distributions particles and cells
///
using distrib_type = decltype(leaves_distrib);
std::vector<distrib_type> level_dist(localGroupTree->height());
level_dist[leaf_level] = cells_distrib;
build_let_at_level(manager, *localGroupTree, leafMortonIdx, particles_distrib, cells_distrib,
level_dist[leaf_level] = leaves_distrib;
///
/// Find and add the leaves to add at the leaves level
build_let_leaves(manager, *localGroupTree, leafMortonIdx, particles_distrib, leaves_distrib);
/// If the distribution is not the same for the leaf and the cell we have to redistribute the
/// morton index according to the good distribution
/// Todo inria::dist::distribute()
//////////////////////////////////////////////////////////////////
/// Construct a uniform distribution of the morton index
///
// try
// {
// inria::mpi_config conf(manager.get_communicator());
// inria::distribute(conf, begin(leafMortonIdx), end(leafMortonIdx), leaves_distrib,
// inria::uniform_distribution{conf, leafMortonIdx});
// }
// catch(std::out_of_range& e)
// {
// std::cerr << e.what() << '\n';
// }
///
/// Find and add the cells to add at the leaves level
build_let_at_level(manager, *localGroupTree, leafMortonIdx, leaves_distrib, level_dist[leaf_level],
leaf_level);
for(int l = leaf_level - 1; l >= localGroupTree->top_level(); --l)
{
std::cout << "level: " << l << " leaf_level " << leaf_level << "ttp" << localGroupTree->top_level()
......@@ -1510,10 +1540,11 @@ namespace scalfmm::tree
level_dist[l] =
std::move(distrib::build_upper_distribution(manager, l, leafMortonIdx, level_dist[l + 1]));
out::print("rank(" + std::to_string(rank) + ") leafMortonIdx: ", leafMortonIdx);
build_let_at_level(manager, *localGroupTree, leafMortonIdx, particles_distrib, level_dist[l], l);
build_let_at_level(manager, *localGroupTree, leafMortonIdx, leaves_distrib, level_dist[l], l);
}
}
#ifdef SCALFMM_USE_MPI
std::cout << std::flush;
manager.get_communicator().barrier();
std::cout << scalfmm::colors::green << " --> End let::group_let() " << scalfmm::colors::reset << std::endl;
#endif
......
......@@ -111,7 +111,7 @@ namespace scalfmm::component
/// This constructor create a group tree from a particle container index.
/// The morton index are computed and the particles are sorted in a first stage.
/// Then the leaf level is done.
/// Finally the other leve are proceed one after the other.
/// Finally the other level are proceed one after the other.
/// It should be easy to make it parallel using for and tasks.
/// If no limite give inLeftLimite = -1
/// \param tree_height
......@@ -126,7 +126,7 @@ namespace scalfmm::component
template<typename ParticleContainer>
explicit group_tree(std::size_t tree_height, std::size_t order, box_type const& box,
std::size_t number_of_component_per_group, ParticleContainer const& particle_container,
bool particles_are_sorted = false, int in_left_limit = -1, int in_top_level = 2)
bool particles_are_sorted = false, int in_top_level = 2, int in_left_limit = -1)
: m_tree_height(tree_height)
, m_top_level(in_top_level)
, m_order(order)
......@@ -410,17 +410,29 @@ namespace scalfmm::component
}
}
///
/// \brief insert component associated to morton index at level in the tree
/// \brief insert cells associated to morton index at level in the tree
///
///
/// \param component_morton_index to add
/// \param level level in the tree to add component associated to morton
/// \param cells_index the indexes of the cells to add
/// indes
///
template<typename VectorMortonIdx>
inline auto insert_component_at_level(const int& level, const VectorMortonIdx& component_morton_index)
inline auto insert_cells_at_level(const int& level, const VectorMortonIdx& cells_index)
{
std::cerr << " insert_component_at_level not yet implemented" << std::endl;
std::cerr << " insert_cells_at_level not yet implemented" << std::endl;
}
///
/// \brief insert leaves associated to morton index at level in the tree
///
///
/// \param leaves_index the indexes of the leaves to add
/// indes
///
template<typename VectorMortonIdx>
inline auto insert_leaves(const VectorMortonIdx& leaves_index)
{
std::cerr << " insert_leaves not yet implemented" << std::endl;
}
///
/// \brief trace the index of the cells and leaves in the tree
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment