Attention une mise à jour du service Gitlab va être effectuée le mardi 14 décembre entre 13h30 et 14h00. Cette mise à jour va générer une interruption du service dont nous ne maîtrisons pas complètement la durée mais qui ne devrait pas excéder quelques minutes.

Commit f073b73a authored by COULAUD Olivier's avatar COULAUD Olivier
Browse files

test-compose date decomposition is OK

parent 8b27a368
......@@ -206,7 +206,12 @@ auto run(inria::tcli::parser<Parameters...> const& parser, parallel_manager& par
<< scalfmm::colors::reset << '\n';
}
////
/// \brief build_distrib
/// build the distribution of cells per processor
/// index_dist = first particle index inside the cell owened by the processor
/// particles inside cell k start at index_dist[k]
/// and its number is index_dist[k+1] - index_dist[k]
/// Thanks to this we are able to construct my local bloc matrix
///
auto level = tree_height - 1;
std::vector<int> index_dist;
build_distrib(para, container, number_of_particles, box, level, index_dist);
......@@ -214,10 +219,11 @@ auto run(inria::tcli::parser<Parameters...> const& parser, parallel_manager& par
auto rank = para.get_process_id();
scalfmm::out::print("rank(" + std::to_string(rank) + ") blocs distrib: ", index_dist);
/// Construct the bloc matrix
build_matrix();
////
para.end();
return 0;
}
......@@ -254,4 +260,6 @@ auto main([[maybe_unused]] int argc, [[maybe_unused]] char* argv[]) -> int
<< " 3 for dimension 3 " << std::endl;
break;
}
para.end();
}
......@@ -64,33 +64,81 @@ class parallel_manager
omp_set_num_threads(num_threads);
#endif
}
#ifdef SCALFMM_USE_MPI
///
/// \brief Constructeur with a MPI communicator
///
/// To avoid deadlock the communicator HAVE TO BE duplicated
///
/// \param in_communicator MPI communicator
///
parallel_manager(const int num_threads, MPI_Comm in_communicator)
: m_number_threads(num_threads) {
this->init(in_communicator);
#ifdef _OPENMP
omp_set_num_threads(num_threads);
#endif
}
#endif
//////////
void init()
{
#ifdef SCALFMM_USE_MPI
int provided, err, size;
// err = MPI_Init(nullptr, nullptr);
err = MPI_Init_thread(nullptr, nullptr, MPI_THREAD_SERIALIZED, &provided);
m_communicator = inria::mpi::communicator::world();
m_number_processes = m_communicator.size();
m_process_id = m_communicator.rank();
if(io_master())
{
std::cout << "MPI_Init_thread err= " << err << " provided= " << provided << std::endl;
std::cout << "init mpi: " << m_communicator << std::endl;
}
this->init(inria::mpi::communicator::world());
#endif
}
#ifdef SCALFMM_USE_MPI
void init(MPI_Comm in_communicator)
///
/// \brief initialize the communication level
///
/// To avoid deadlock the communicator HAVE TO BE duplicated
///
/// \param in_communicator MPI communicator
///
void init( MPI_Comm in_communicator)
{
int provided;
int error = MPI_Query_thread(&provided);
if (provided < MPI_THREAD_SERIALIZED) {
std::cerr
<< " MPI should at least initialized at MPI_THREAD_SERIALIZED level"
<< std::endl;
MPI_Finalize();
std::exit(EXIT_FAILURE);
}
m_communicator = inria::mpi::communicator(in_communicator);
m_number_processes = m_communicator.size();
m_process_id = m_communicator.rank();
}
#endif
#ifdef SCALFMM_USE_MPI
///
/// \brief initialize the communication level
///
/// To avoid deadlock the communicator HAVE TO BE duplicated
///
/// \param in_communicator MPI communicator object
///
void init( inria::mpi::communicator &in_communicator)
{
int provided;
int error = MPI_Query_thread(&provided);
if (provided < MPI_THREAD_SERIALIZED) {
std::cerr
<< " MPI should at least initialized at MPI_THREAD_SERIALIZED level"
<< std::endl;
MPI_Finalize();
std::exit(EXIT_FAILURE);
}
m_communicator = in_communicator;
m_number_processes = m_communicator.size();
m_process_id = m_communicator.rank();
}
#endif
void end()
{
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment