Commit 47ad9e4f authored by COULAUD Olivier's avatar COULAUD Olivier
Browse files

Let construction is nearly finish. We hust have to insert in the tree the new cells

parent 8a35e7e8
#include <array>
#include <chrono>
#include <thread>
#include "parameters.hpp"
#include "scalfmm/container/particle.hpp"
......@@ -8,8 +10,8 @@
#include "scalfmm/tools/fma_loader.hpp"
#include "scalfmm/tree/box.hpp"
#include "scalfmm/matrix_kernels/laplace.hpp"
#include "scalfmm/interpolation/uniform.hpp"
#include "scalfmm/matrix_kernels/laplace.hpp"
#include "scalfmm/tree/cell.hpp"
#include "scalfmm/tree/group_let.hpp"
......@@ -24,12 +26,13 @@
///
///
/// ./examples/RelWithDebInfo/test-build-let --input-file ../data/prolate.fma --order 3 --tree-height 4 --group-size 3
/// mpirun --oversubscribe -np 3 ./examples/RelWithDebInfo/test-build-let --input-file ../buildMPI/prolate.fma --order 3 --tree-height 3 --group-size 3
/// mpirun --oversubscribe -np 3 ./examples/RelWithDebInfo/test-build-let --input-file ../buildMPI/prolate.fma --order
/// 3 --tree-height 3 --group-size 3
auto main([[maybe_unused]] int argc, [[maybe_unused]] char* argv[]) -> int
{
parallel_manager para;
para.init();
parallel_manager para;
para.init();
#ifndef LET
constexpr int dimension = 2;
constexpr int nb_inputs_near = 1;
......@@ -45,13 +48,13 @@ auto main([[maybe_unused]] int argc, [[maybe_unused]] char* argv[]) -> int
using interpolator_type = scalfmm::interpolation::uniform_interpolator<double, dimension, matrix_kernel_type>;
using particle_type = scalfmm::container::particle<value_type, dimension, value_type, nb_inputs_near, value_type,
nb_outputs_near/*, mortonIndex_type, globalIndex_type*/>;
using read_particle_type = scalfmm::container::particle<value_type, dimension, value_type, nb_inputs_near, value_type,
0, mortonIndex_type, globalIndex_type>;
nb_outputs_near /*, mortonIndex_type, globalIndex_type*/>;
using read_particle_type = scalfmm::container::particle<value_type, dimension, value_type, nb_inputs_near,
value_type, 0, mortonIndex_type, globalIndex_type>;
using container_type = scalfmm::container::particle_container<particle_type>;
using position_type = typename particle_type::position_type;
// using cell_type =
// scalfmm::component::cell<value_type, dimension, nb_inputs_far, nb_outputs_far, std::complex<value_type>>;
// using cell_type =
// scalfmm::component::cell<value_type, dimension, nb_inputs_far, nb_outputs_far, std::complex<value_type>>;
using cell_type = scalfmm::component::cell<typename interpolator_type::storage_type>;
using leaf_type = scalfmm::component::leaf<particle_type>;
using box_type = scalfmm::component::box<position_type>;
......@@ -86,7 +89,6 @@ auto main([[maybe_unused]] int argc, [[maybe_unused]] char* argv[]) -> int
//
//
///////////////////////////////////////////////////////////////////////////////////////////////////////
/// Read the data in parallel
///
......@@ -103,7 +105,7 @@ auto main([[maybe_unused]] int argc, [[maybe_unused]] char* argv[]) -> int
box_type box(width, centre);
//
container_type container(local_number_of_particles);
std::vector <particle_type> particles_set(local_number_of_particles);
std::vector<particle_type> particles_set(local_number_of_particles);
for(std::size_t idx = 0; idx < local_number_of_particles; ++idx)
{
loader.fillParticle(values_to_read, nb_val_to_red_per_part);
......@@ -113,9 +115,9 @@ auto main([[maybe_unused]] int argc, [[maybe_unused]] char* argv[]) -> int
{
e = values_to_read[ii++];
}
// p.variables()
// p.variables()
particles_set[idx] = p;
//container.push_particle(idx, p);
// container.push_particle(idx, p);
}
///
///////////////////////////////////////////////////////////////////////////////////////////////////////
......@@ -135,17 +137,19 @@ auto main([[maybe_unused]] int argc, [[maybe_unused]] char* argv[]) -> int
group_tree_type* letGroupTree = nullptr;
//
int rank = para.get_process_id();
int leaf_level = tree_height - 1;
scalfmm::tree::let::buildLetTree(para, number_of_particles, particles_set, box, leaf_level, group_size,
group_size, letGroupTree,
mortonCellDistribution, nb_block, order);
// scalfmm::tree::print("rank(" + std::to_string(rank) + ") data distrib: ", mortonCellDistribution);
scalfmm::tree::let::buildLetTree(para, number_of_particles, particles_set, box, leaf_level, group_size, group_size,
letGroupTree, mortonCellDistribution, nb_block, order);
// scalfmm::tree::print("rank(" + std::to_string(rank) + ") data distrib:
// ", mortonCellDistribution);
#ifdef SCALFMM_USE_MPI
para.get_communicator().barrier();
std::this_thread::sleep_for(std::chrono::seconds(5));
#endif
///
///////////////////////////////////////////////////////////////////////////////////////////////////////
///
......@@ -156,82 +160,91 @@ auto main([[maybe_unused]] int argc, [[maybe_unused]] char* argv[]) -> int
// std::cout << "number_of_particles " << number_of_particles << std::endl;
// writer.writeHeader(centre, width, number_of_particles, sizeof(value_type), nbDataPerRecord, dimension,
// loader.get_number_of_input_per_record());
///
/// writer.write( .... todo)
///
///////////////////////////////////////////////////////////////////////////////////////////////////////
///
/// writer.write( .... todo)
///
///////////////////////////////////////////////////////////////////////////////////////////////////////
#else
// MPI communicator
// auto world = inria::mpi::communicator::world();
inria::mpi_config conf(para.get_communicator());
const auto rank = conf.comm.rank();
const int N = 30;
// Input is an empty vector, no allocation is done
std::vector<int> input{};
int block =
rank == para.get_num_processes() - 1 ? N - int(N / 3) * rank : int(N / 3);
input.resize(block);
std::cout << "block: " << block << std::endl;
std::iota(begin(input), end(input), block * rank);
std::cout << " proc " << conf.comm.rank() << " in (" << input.size()
<< "): ";
for (auto& v : input) {
std::cout << v << " ";
}
std::cout << std::endl;
std::array<int, 3> proportions{100, 200, 300};
int total = std::accumulate(proportions.begin(), proportions.end(), 0);
int N_local = N;
// int(N * proportions[rank] / total + 1);
// All processes have a large enough buffer
std::vector<int> output;
output.resize(N_local);
int none_value = 0;
try {
std::array<int, 3> proportions{100, 200, 300};
// Distribution object, keeps all even elements on 0, all odd elements on 1
struct dist {
std::vector<int> mapping{15, 20, 30};
int operator()(const int& i) {
for (int k = 0; k < mapping.size(); ++k) {
if (i <= mapping[k]) {
return k;
}
}
}
};
inria::proportional_distribution<> distrib(conf, input, proportions);
// inria::distribute(conf, input, output, distrib);
inria::distribute(conf, input, output, dist{});
// inria::distribute(conf, begin(input), end(input), begin(output),
// end(output), distrib);
} catch (std::out_of_range& e) {
std::cerr << e.what() << '\n';
}
std::cout <<" output.size() " << output.size() << std::endl;
for (int i = 1; i < output.size(); ++i) {
N_local = i;
if (output[i] == none_value) {
N_local = i;
break;
// MPI communicator
// auto world = inria::mpi::communicator::world();
inria::mpi_config conf(para.get_communicator());
const auto rank = conf.comm.rank();
const int N = 30;
// Input is an empty vector, no allocation is done
std::vector<int> input{};
int block = rank == para.get_num_processes() - 1 ? N - int(N / 3) * rank : int(N / 3);
input.resize(block);
std::cout << "block: " << block << std::endl;
std::iota(begin(input), end(input), block * rank);
std::cout << " proc " << conf.comm.rank() << " in (" << input.size() << "): ";
for(auto& v: input)
{
std::cout << v << " ";
}
}
std::cout << std::endl;
output.resize(N_local);
std::array<int, 3> proportions{100, 200, 300};
int total = std::accumulate(proportions.begin(), proportions.end(), 0);
int N_local = N;
// int(N * proportions[rank] / total + 1);
std::cout <<" proc "<< rank<< " out (" << output.size() << "): ";
for (auto& v : output) {
std::cout << v << " ";
}
std::cout << std::endl;
// All processes have a large enough buffer
std::vector<int> output;
output.resize(N_local);
int none_value = 0;
try
{
std::array<int, 3> proportions{100, 200, 300};
// Distribution object, keeps all even elements on 0, all odd elements on 1
struct dist
{
std::vector<int> mapping{15, 20, 30};
int operator()(const int& i)
{
for(int k = 0; k < mapping.size(); ++k)
{
if(i <= mapping[k])
{
return k;
}
}
}
};
inria::proportional_distribution<> distrib(conf, input, proportions);
// inria::distribute(conf, input, output, distrib);
inria::distribute(conf, input, output, dist{});
// inria::distribute(conf, begin(input), end(input), begin(output),
// end(output), distrib);
}
catch(std::out_of_range& e)
{
std::cerr << e.what() << '\n';
}
std::cout << " output.size() " << output.size() << std::endl;
for(int i = 1; i < output.size(); ++i)
{
N_local = i;
if(output[i] == none_value)
{
N_local = i;
break;
}
}
output.resize(N_local);
std::cout << " proc " << rank << " out (" << output.size() << "): ";
for(auto& v: output)
{
std::cout << v << " ";
}
std::cout << std::endl;
#endif
delete letGroupTree;
para.end();
return 0;
}
......@@ -59,6 +59,7 @@ namespace scalfmm::component
private:
const std::size_t m_tree_height{};
const std::size_t m_top_level{}; ///< the level to stop the FMM algorithm (generally 2)
const std::size_t m_order{};
const std::size_t m_number_of_component_per_group{};
cell_group_vector_type m_group_of_cell_per_level;
......@@ -68,6 +69,7 @@ namespace scalfmm::component
public:
[[nodiscard]] auto box_center() const { return m_box.center(); }
[[nodiscard]] inline auto leaf_level() const noexcept -> std::size_t { return m_tree_height - 1; }
[[nodiscard]] inline auto top_level() const noexcept -> std::size_t { return m_top_level; }
[[nodiscard]] auto box_width(std::size_t dimension = 0) const { return m_box.width(dimension); }
[[nodiscard]] auto leaf_width(std::size_t dimension = 0) const
......@@ -103,19 +105,30 @@ namespace scalfmm::component
// }
//
/**
* This constructor create a group tree from a particle container index.
* The morton index are computed and the particles are sorted in a first stage.
* Then the leaf level is done.
* Finally the other leve are proceed one after the other.
* It should be easy to make it parallel using for and tasks.
* If no limite give inLeftLimite = -1
*/
///
/// \brief group_tree
///
/// This constructor create a group tree from a particle container index.
/// The morton index are computed and the particles are sorted in a first stage.
/// Then the leaf level is done.
/// Finally the other leve are proceed one after the other.
/// It should be easy to make it parallel using for and tasks.
/// If no limite give inLeftLimite = -1
/// \param tree_height
/// \param order
/// \param box
/// \param number_of_component_per_group
/// \param particle_container
/// \param particles_are_sorted
/// \param in_left_limit
/// \param in_top_level the root of the tree used in the FMM (2 execpt for periodic box)
///
template<typename ParticleContainer>
explicit group_tree(std::size_t tree_height, std::size_t order, box_type const& box,
std::size_t number_of_component_per_group, ParticleContainer const& particle_container,
bool particles_are_sorted = false, int in_left_limit = -1)
bool particles_are_sorted = false, int in_left_limit = -1, int in_top_level = 2)
: m_tree_height(tree_height)
, m_top_level(in_top_level)
, m_order(order)
, m_number_of_component_per_group(number_of_component_per_group)
, m_group_of_cell_per_level(tree_height)
......@@ -500,8 +513,8 @@ namespace scalfmm::component
std::cout << " index: \n";
component::for_each(std::begin(*ptr_group), std::end(*ptr_group), [](auto& leaf) {
auto& leaf_symbolics = leaf.symbolics();
std::cout << " " << leaf.index() << " p2p_list (" << leaf_symbolics.number_of_neighbors
<< "): ";
std::cout << " " << leaf.index() << " p2p_list ("
<< leaf_symbolics.existing_neighbors_in_group << "): ";
// get the p2p interacion list
// auto index = leaf_symbolics.interaction_indexes;
auto index = leaf_symbolics.interaction_iterators;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment