Mentions légales du service

Skip to content
Snippets Groups Projects
Commit 436dcd58 authored by Olivier COULAUD's avatar Olivier COULAUD
Browse files

Start fma writer in MPI

parent 9619194c
Branches
Tags
No related merge requests found
Pipeline #230191 failed
......@@ -4,7 +4,6 @@
#include "scalfmm/container/point.hpp"
#include <scalfmm/container/iterator.hpp>
//
#include "scalfmm/meta/traits.hpp"
//
#include "scalfmm/meta/type_pack.hpp"
//#include "scalfmm/meta/utils.hpp"
......
......@@ -5,6 +5,7 @@
#include "parameters.hpp"
#include "scalfmm/container/particle.hpp"
#include "scalfmm/container/particle_container.hpp"
//#include "scalfmm/meta/traits.hpp"
#include "scalfmm/tools/colorized.hpp"
#include "scalfmm/tools/fma_dist_loader.hpp"
#include "scalfmm/tools/fma_loader.hpp"
......@@ -66,8 +67,9 @@ namespace local_args
};
} // namespace local_args
template<int dimension>
auto run(parallel_manager& para, const std::string& input_file, const int tree_height, const int& part_group_size,
const int& leaf_group_size, const int order, bool use_leaf_distribution, bool use_particle_distribution) -> int
auto run(parallel_manager& para, const std::string& input_file, const std::string& output_file, const int tree_height,
const int& part_group_size, const int& leaf_group_size, const int order, bool use_leaf_distribution,
bool use_particle_distribution) -> int
{
constexpr int nb_inputs_near = 1;
constexpr int nb_outputs_near = 1;
......@@ -162,13 +164,17 @@ auto run(parallel_manager& para, const std::string& input_file, const int tree_h
///
///////////////////////////////////////////////////////////////////////////////////////////////////////
/// Save the data
// auto nbDataPerRecord = nb_val_to_red_per_part;
// scalfmm::tools::DistFmaGenericWriter<value_type> writer(output_file, para);
// std::cout << "number_of_particles " << number_of_particles << std::endl;
// writer.writeHeader(centre, width, number_of_particles, sizeof(value_type), nbDataPerRecord, dimension,
// loader.get_number_of_input_per_record());
const int nbDataPerRecord = scalfmm::container::particle_traits<particle_type>::number_of_elements;
const int inputs_size = scalfmm::container::particle_traits<particle_type>::inputs_size;
// static constexpr std::size_t nbDataPerRecord = particle_type::number_of_elements;
scalfmm::tools::DistFmaGenericWriter<value_type> writer(output_file, para);
/// Get the number of particles
std::cout << "number_of_particles " << number_of_particles << std::endl;
///
writer.writeHeader(centre, width, number_of_particles, sizeof(value_type), nbDataPerRecord, dimension, inputs_size);
///
/// writer.write( .... todo)
writer.writeFromTree(letGroupTree, number_of_particles);
///
///////////////////////////////////////////////////////////////////////////////////////////////////////
......@@ -230,7 +236,7 @@ auto main([[maybe_unused]] int argc, [[maybe_unused]] char* argv[]) -> int
{
constexpr int dim = 2;
run<dim>(para, input_file, tree_height, group_size, group_size, order, use_leaf_distribution,
run<dim>(para, input_file, output_file, tree_height, group_size, group_size, order, use_leaf_distribution,
use_particle_distribution);
break;
}
......@@ -238,7 +244,7 @@ auto main([[maybe_unused]] int argc, [[maybe_unused]] char* argv[]) -> int
{
constexpr int dim = 3;
run<dim>(para, input_file, tree_height, group_size, group_size, order, use_leaf_distribution,
run<dim>(para, input_file, output_file, tree_height, group_size, group_size, order, use_leaf_distribution,
use_particle_distribution);
break;
}
......
......@@ -8,6 +8,7 @@
#include <iostream>
#include <iterator>
//
#include "scalfmm/meta/traits.hpp"
#include <scalfmm/container/iterator.hpp>
#include <scalfmm/container/point.hpp>
#include <scalfmm/matrix_kernels/laplace.hpp>
......
......@@ -238,123 +238,123 @@ namespace scalfmm::tools
{ /* MPI_File_close(&_mpiFile);*/
}
// /**
// * Write all for all particles the position, physical values, potential and forces
// *
// * @param myOctree the octree
// * @param nbParticlesnumber of particles
// * @param mortonLeafDistribution the morton distribution of the leaves (this is a vecor of size 2* the
// number of
// * MPI processes
// *
// */
// template<class OCTREECLASS>
// void writeDistributionOfParticlesFromGroupedOctree(OCTREECLASS& myOctree, const std::size_t&
// nbParticles,
// const std::vector<MortonIndex>&
// mortonLeafDistribution)
// {
// //
// // Write the header
// int sizeType = 0, ierr = 0;
// FReal tt = 0.0;
// MPI_Datatype mpistd::size_t_t = m_parallelManager->GetType(nbParticles);
// MPI_Datatype mpiFReal_t = m_parallelManager->GetType(tt);
// MPI_Type_size(mpiFReal_t, &sizeType);
// int myRank = m_parallelManager->global().processId();
// _headerSize = 0;
// //
// unsigned int typeFReal[2] = {sizeof(FReal), static_cast<unsigned int>(_nbDataTowritePerRecord)};
// if(myRank == 0)
// {
// ierr = MPI_File_write_at(_mpiFile, 0, &typeFReal, 2, MPI_INT, MPI_STATUS_IGNORE);
// }
// MPI_Type_size(MPI_INT, &sizeType);
// _headerSize += sizeType * 2;
// if(myRank == 0)
// {
// ierr = MPI_File_write_at(_mpiFile, _headerSize, &nbParticles, 1, mpistd::size_t_t,
// MPI_STATUS_IGNORE);
// }
// MPI_Type_size(mpistd::size_t_t, &sizeType);
// _headerSize += sizeType * 1;
// auto centerOfBox = myOctree.getBoxCenter();
// FReal boxSim[4] = {myOctree.getBoxWidth() * 0.5, centerOfBox.getX(), centerOfBox.getX(),
// centerOfBox.getX()};
/**
* Write all for all particles the position, physical values, potential and forces
*
* @param myOctree the octree
* @param nbParticlesnumber of particles
* @param mortonLeafDistribution the morton distribution of the leaves (this is a vecor of size 2* the
number of
* MPI processes
*
*/
template<typename OCTREECLASS>
void writeFromTree(const OCTREECLASS& myOctree, const std::size_t& nbParticles)
{
// //
// // Write the header
// int sizeType = 0, ierr = 0;
// FReal tt = 0.0;
// MPI_Datatype mpistd::size_t_t = m_parallelManager->GetType(nbParticles);
// MPI_Datatype mpiFReal_t = m_parallelManager->GetType(tt);
// MPI_Type_size(mpiFReal_t, &sizeType);
// int myRank = m_parallelManager->global().processId();
// _headerSize = 0;
// //
// unsigned int typeFReal[2] = {sizeof(FReal), static_cast<unsigned
// int>(_nbDataTowritePerRecord)}; if(myRank == 0)
// {
// ierr = MPI_File_write_at(_mpiFile, 0, &typeFReal, 2, MPI_INT, MPI_STATUS_IGNORE);
// }
// MPI_Type_size(MPI_INT, &sizeType);
// _headerSize += sizeType * 2;
// if(myRank == 0)
// {
// ierr = MPI_File_write_at(_mpiFile, _headerSize, &nbParticles, 1, mpistd::size_t_t,
// MPI_STATUS_IGNORE);
// }
// MPI_Type_size(mpistd::size_t_t, &sizeType);
// _headerSize += sizeType * 1;
// auto centerOfBox = myOctree.getBoxCenter();
// FReal boxSim[4] = {myOctree.getBoxWidth() * 0.5, centerOfBox.getX(), centerOfBox.getX(),
// centerOfBox.getX()};
// if(myRank == 0)
// {
// ierr = MPI_File_write_at(_mpiFile, _headerSize, &boxSim[0], 4, mpiFReal_t, MPI_STATUS_IGNORE);
// }
// if(ierr > 0)
// {
// std::cerr << "Error during the construction of the header in "
// "FMpiFmaGenericWriter::writeDistributionOfParticlesFromOctree"
// << std::endl;
// }
// MPI_Type_size(mpiFReal_t, &sizeType);
// _headerSize += sizeType * 4;
// //
// // Construct the local number of particles on my process
// std::size_t nbLocalParticles = 0, maxPartLeaf = 0;
// MortonIndex starIndex = mortonLeafDistribution[2 * myRank],
// endIndex = mortonLeafDistribution[2 * myRank + 1];
// myOctree.template forEachCellLeaf<typename OCTREECLASS::LeafClass_T>(
// [&](typename OCTREECLASS::GroupSymbolCellClass_T* gsymb,
// typename OCTREECLASS::GroupCellUpClass_T* /* gmul */,
// typename OCTREECLASS::GroupCellDownClass_T* /* gloc */,
// typename OCTREECLASS::LeafClass_T* leafTarget) {
// if(!(gsymb->getMortonIndex() < starIndex || gsymb->getMortonIndex() > endIndex))
// {
// auto n = leafTarget->getNbParticles();
// nbLocalParticles += n;
// maxPartLeaf = std::max(maxPartLeaf, n);
// }
// });
// std::vector<FReal> particles(maxPartLeaf * _nbDataTowritePerRecord);
// // Build the offset for eaxh processes
// std::size_t before = 0; // Number of particles before me (rank < myrank)
// MPI_Scan(&nbLocalParticles, &before, 1, mpistd::size_t_t, MPI_SUM,
// m_parallelManager->global().getComm()); before -= nbLocalParticles; MPI_Offset offset =
// _headerSize + sizeType * _nbDataTowritePerRecord * before;
// //
// // Write particles in file
// myOctree.template forEachCellLeaf<typename OCTREECLASS::LeafClass_T>(
// [&](typename OCTREECLASS::GroupSymbolCellClass_T* gsymb,
// typename OCTREECLASS::GroupCellUpClass_T* /* gmul */,
// typename OCTREECLASS::GroupCellDownClass_T* /* gloc */,
// typename OCTREECLASS::LeafClass_T* leafTarget) {
// if(!(gsymb->getMortonIndex() < starIndex || gsymb->getMortonIndex() > endIndex))
// {
// const std::size_t nbPartsInLeaf = leafTarget->getNbParticles();
// const FReal* const posX = leafTarget->getPositions()[0];
// const FReal* const posY = leafTarget->getPositions()[1];
// const FReal* const posZ = leafTarget->getPositions()[2];
// const FReal* const physicalValues = leafTarget->getPhysicalValues();
// const FReal* const forceX = leafTarget->getForcesX();
// const FReal* const forceY = leafTarget->getForcesY();
// const FReal* const forceZ = leafTarget->getForcesZ();
// const FReal* const potential = leafTarget->getPotentials();
// for(int i = 0, k = 0; i < nbPartsInLeaf; ++i, k += _nbDataTowritePerRecord)
// {
// particles[k] = posX[i];
// particles[k + 1] = posY[i];
// particles[k + 2] = posZ[i];
// particles[k + 3] = physicalValues[i];
// particles[k + 4] = potential[i];
// particles[k + 5] = forceX[i];
// particles[k + 6] = forceY[i];
// particles[k + 7] = forceZ[i];
// }
// MPI_File_write_at(_mpiFile, offset, particles.data(),
// static_cast<int>(_nbDataTowritePerRecord * nbPartsInLeaf), mpiFReal_t,
// MPI_STATUS_IGNORE);
// offset += sizeType * _nbDataTowritePerRecord * nbPartsInLeaf;
// }
// });
// if(myRank == 0)
// {
// ierr = MPI_File_write_at(_mpiFile, _headerSize, &boxSim[0], 4, mpiFReal_t,
// MPI_STATUS_IGNORE);
// }
// if(ierr > 0)
// {
// std::cerr << "Error during the construction of the header in "
// "FMpiFmaGenericWriter::writeDistributionOfParticlesFromOctree"
// << std::endl;
// }
// MPI_Type_size(mpiFReal_t, &sizeType);
// _headerSize += sizeType * 4;
// //
// // Construct the local number of particles on my process
// std::size_t nbLocalParticles = 0, maxPartLeaf = 0;
// MortonIndex starIndex = mortonLeafDistribution[2 * myRank],
// endIndex = mortonLeafDistribution[2 * myRank + 1];
// myOctree.template forEachCellLeaf<typename OCTREECLASS::LeafClass_T>(
// [&](typename OCTREECLASS::GroupSymbolCellClass_T* gsymb,
// typename OCTREECLASS::GroupCellUpClass_T* /* gmul */,
// typename OCTREECLASS::GroupCellDownClass_T* /* gloc */,
// typename OCTREECLASS::LeafClass_T* leafTarget) {
// if(!(gsymb->getMortonIndex() < starIndex || gsymb->getMortonIndex() > endIndex))
// {
// auto n = leafTarget->getNbParticles();
// nbLocalParticles += n;
// maxPartLeaf = std::max(maxPartLeaf, n);
// }
// });
// std::vector<FReal> particles(maxPartLeaf * _nbDataTowritePerRecord);
// // Build the offset for eaxh processes
// std::size_t before = 0; // Number of particles before me (rank < myrank)
// MPI_Scan(&nbLocalParticles, &before, 1, mpistd::size_t_t, MPI_SUM,
// m_parallelManager->global().getComm()); before -= nbLocalParticles; MPI_Offset offset =
// _headerSize + sizeType * _nbDataTowritePerRecord * before;
// //
// // Write particles in file
// myOctree.template forEachCellLeaf<typename OCTREECLASS::LeafClass_T>(
// [&](typename OCTREECLASS::GroupSymbolCellClass_T* gsymb,
// typename OCTREECLASS::GroupCellUpClass_T* /* gmul */,
// typename OCTREECLASS::GroupCellDownClass_T* /* gloc */,
// typename OCTREECLASS::LeafClass_T* leafTarget) {
// if(!(gsymb->getMortonIndex() < starIndex || gsymb->getMortonIndex() > endIndex))
// {
// const std::size_t nbPartsInLeaf = leafTarget->getNbParticles();
// const FReal* const posX = leafTarget->getPositions()[0];
// const FReal* const posY = leafTarget->getPositions()[1];
// const FReal* const posZ = leafTarget->getPositions()[2];
// const FReal* const physicalValues = leafTarget->getPhysicalValues();
// const FReal* const forceX = leafTarget->getForcesX();
// const FReal* const forceY = leafTarget->getForcesY();
// const FReal* const forceZ = leafTarget->getForcesZ();
// const FReal* const potential = leafTarget->getPotentials();
// for(int i = 0, k = 0; i < nbPartsInLeaf; ++i, k += _nbDataTowritePerRecord)
// {
// particles[k] = posX[i];
// particles[k + 1] = posY[i];
// particles[k + 2] = posZ[i];
// particles[k + 3] = physicalValues[i];
// particles[k + 4] = potential[i];
// particles[k + 5] = forceX[i];
// particles[k + 6] = forceY[i];
// particles[k + 7] = forceZ[i];
// }
// MPI_File_write_at(_mpiFile, offset, particles.data(),
// static_cast<int>(_nbDataTowritePerRecord * nbPartsInLeaf),
// mpiFReal_t, MPI_STATUS_IGNORE);
// offset += sizeType * _nbDataTowritePerRecord * nbPartsInLeaf;
// }
// });
// MPI_File_close(&_mpiFile);
// }
#ifdef SCALFMM_USE_MPI
MPI_File_close(&_mpiFile);
#endif
}
// /**
// * Write all for all particles the position, physical values, potential and forces
......
......@@ -1117,13 +1117,14 @@ namespace scalfmm::tree
return i;
}
}
return -1;
}
///
/// \brief find if the index exists owning the index
/// \param[in] index
/// \param[in] distrib the index distribution
/// \param[in] start [optional]position to strat in the distrbution
/// vector \return the process number
/// vector \return the process number (if -1 index not in my distribution)
///
template<typename MortonIdx, typename VetorMortonIdx>
inline std::int64_t find_index(const MortonIdx& index, const VetorMortonIdx& my_index, std::size_t& start)
......@@ -1146,6 +1147,7 @@ namespace scalfmm::tree
return -1;
}
}
return -1;
}
///
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment