Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
solverstack
ScalFMM
Commits
d6f8893f
Commit
d6f8893f
authored
Mar 01, 2021
by
COULAUD Olivier
Browse files
test-build-let compile and run without MPI
parent
bf451068
Changes
5
Hide whitespace changes
Inline
Side-by-side
experimental/examples/CMakeLists.txt
View file @
d6f8893f
...
...
@@ -20,11 +20,12 @@ set(source_tests_files
# debug & check
count_kernel.cpp
test_l2p.cpp
test-build-let.cpp
)
if
(
SCALFMM_USE_MPI
)
set
(
source_tests_files
${
source_tests_files
}
test-build-let.cpp
)
endif
()
#
if(SCALFMM_USE_MPI)
#
set(source_tests_files ${source_tests_files}
#
test-build-let.cpp)
#
endif()
# Add execs - 1 cpp = 1 exec
foreach
(
exec
${
source_tests_files
}
)
...
...
experimental/examples/test-build-let.cpp
View file @
d6f8893f
...
...
@@ -7,13 +7,22 @@
#include "scalfmm/tools/fma_dist_loader.hpp"
#include "scalfmm/tools/fma_loader.hpp"
#include "scalfmm/tree/box.hpp"
#include "scalfmm/matrix_kernels/laplace.hpp"
#include "scalfmm/interpolation/uniform.hpp"
#include "scalfmm/tree/cell.hpp"
#include "scalfmm/tree/group_let.hpp"
#include "scalfmm/tree/group_tree.hpp"
#include "scalfmm/tree/leaf.hpp"
#include "scalfmm/utils/parallel_manager.hpp"
///
/// \brief main
/// \param argv
/// \return
///
/// ./examples/RelWithDebInfo/test-build-let --input-file ../data/prolate.fma --order 3 --tree-height 4 --group-size 3
auto
main
([[
maybe_unused
]]
int
argc
,
[[
maybe_unused
]]
char
*
argv
[])
->
int
{
...
...
@@ -29,14 +38,19 @@ auto main([[maybe_unused]] int argc, [[maybe_unused]] char* argv[]) -> int
using
mortonIndex_type
=
std
::
size_t
;
using
globalIndex_type
=
std
::
size_t
;
using
matrix_kernel_type
=
scalfmm
::
matrix_kernels
::
laplace
::
one_over_r
;
using
interpolator_type
=
scalfmm
::
interpolation
::
uniform_interpolator
<
double
,
dimension
,
matrix_kernel_type
>
;
using
particle_type
=
scalfmm
::
container
::
particle
<
value_type
,
dimension
,
value_type
,
nb_inputs_near
,
value_type
,
nb_outputs_near
/*, mortonIndex_type, globalIndex_type*/
>
;
using
read_particle_type
=
scalfmm
::
container
::
particle
<
value_type
,
dimension
,
value_type
,
nb_inputs_near
,
value_type
,
0
,
mortonIndex_type
,
globalIndex_type
>
;
using
container_type
=
scalfmm
::
container
::
particle_container
<
particle_type
>
;
using
position_type
=
typename
particle_type
::
position_type
;
using
cell_type
=
scalfmm
::
component
::
cell
<
value_type
,
dimension
,
nb_inputs_far
,
nb_outputs_far
,
std
::
complex
<
value_type
>>
;
// using cell_type =
// scalfmm::component::cell<value_type, dimension, nb_inputs_far, nb_outputs_far, std::complex<value_type>>;
using
cell_type
=
scalfmm
::
component
::
cell
<
typename
interpolator_type
::
storage_type
>
;
using
leaf_type
=
scalfmm
::
component
::
leaf
<
particle_type
>
;
using
box_type
=
scalfmm
::
component
::
box
<
position_type
>
;
using
group_tree_type
=
scalfmm
::
component
::
group_tree
<
cell_type
,
leaf_type
,
box_type
>
;
...
...
@@ -124,10 +138,10 @@ auto main([[maybe_unused]] int argc, [[maybe_unused]] char* argv[]) -> int
int
leaf_level
=
tree_height
-
1
;
scalfmm
::
tree
::
let
::
buildLetTree
(
para
,
number_of_particles
,
particles_set
,
box
,
leaf_level
,
group_size
,
scalfmm
::
tree
::
let
::
buildLetTree
(
para
,
number_of_particles
,
particles_set
,
box
,
leaf_level
,
group_size
,
group_size
,
letGroupTree
,
mortonCellDistribution
,
nb_block
);
// scalfmm::tree::print("rank(" + std::to_string(rank) + ") data distrib: ", mortonCellDistribution);
// scalfmm::tree::print("rank(" + std::to_string(rank) + ") data distrib: ", mortonCellDistribution);
///
...
...
experimental/include/scalfmm/tools/fma_dist_loader.hpp
View file @
d6f8893f
...
...
@@ -185,6 +185,7 @@ namespace scalfmm::tools
std
::
cout
<<
"centerOfBox "
<<
centerOfBox
<<
" boxWidth "
<<
boxWidth
<<
" nbParticles "
<<
nbParticles
<<
" dataType "
<<
dataType
<<
" nbDataPerRecord "
<<
nbDataPerRecord
<<
" dimension "
<<
dimension
<<
" nb_input_values "
<<
nb_input_values
<<
std
::
endl
;
#ifdef SCALFMM_USE_MPI
for
(
auto
a
:
typeFReal
)
{
std
::
cout
<<
"typeFReal "
<<
a
<<
std
::
endl
;
...
...
@@ -221,12 +222,15 @@ namespace scalfmm::tools
// Build the header offset
std
::
cout
<<
" headerSize "
<<
m_headerSize
<<
std
::
endl
;
FFmaGenericWriter
<
FReal
>::
close
();
#endif
}
#ifdef SCALFMM_USE_MPI
auto
comm
=
m_parallelManager
->
get_communicator
();
comm
.
bcast
(
&
m_headerSize
,
1
,
MPI_INT
,
0
);
// MPI_Bcast(&_headerSize, 1, MPI_INT, 0, m_parallelManager->global().getComm());
std
::
cout
<<
" _headerSize "
<<
m_headerSize
<<
std
::
endl
;
#endif
// MPI_File_close(&_mpiFile);
}
...
...
experimental/include/scalfmm/tree/group_let.hpp
View file @
d6f8893f
...
...
@@ -6,9 +6,6 @@
#include <string>
#include <tuple>
#include <inria/algorithm/distributed/sort.hpp>
#include <inria/linear_tree/balance_tree.hpp>
//#include <scalfmm/tree/group_linear_tree.hpp>
#include <scalfmm/tree/utils.hpp>
#include <scalfmm/utils/io_helpers.hpp> // for out::print
...
...
@@ -16,6 +13,8 @@
#ifdef SCALFMM_USE_MPI
#include <inria/algorithm/distributed/distribute.hpp>
#include <inria/algorithm/distributed/mpi.hpp>
#include <inria/algorithm/distributed/sort.hpp>
#include <inria/linear_tree/balance_tree.hpp>
#include <mpi.h>
#endif
...
...
@@ -61,7 +60,8 @@ namespace scalfmm::tree
#endif
return
buff_recev
;
}
template
<
typename
data_type
>
#ifdef SCALFMM_USE_MPI
///
/// \brief exchange_data_left_right exchange data letf and roght between processor left and right
///
...
...
@@ -73,11 +73,12 @@ namespace scalfmm::tree
///
/// \return a tuple containg the value_right of processor on the left and the
/// value left comming from processor right
///
template
<
typename
data_type
>
auto
exchange_data_left_right
(
inria
::
mpi_config
&
conf
,
data_type
&
data_left
,
data_type
&
data_right
)
{
// Setting parametter
data_type
buff_p
{
0
},
buff_n
{
0
};
#ifdef SCALFMM_USE_MPI
auto
comm
=
conf
.
comm
;
int
nb_proc
=
comm
.
size
();
int
my_rank
=
comm
.
rank
();
...
...
@@ -103,10 +104,11 @@ namespace scalfmm::tree
inria
::
mpi
::
request
::
waitall
(
2
,
tab_mpi_status
);
//////////////
#endif
}
return
std
::
make_tuple
(
buff_p
,
buff_n
);
}
// namespace distrib
}
#endif
///
/// \brief balanced_leaves
///
...
...
@@ -793,13 +795,13 @@ namespace scalfmm::tree
return
m1
<
m2
;
});
#else
std
::
sort
(
particle_container
.
begin
(),
particle_container
.
end
(),
[
&
box
,
&
leaf_level
](
const
auto
&
p1
,
const
auto
&
p2
)
{
auto
m1
=
scalfmm
::
index
::
get_morton_index
(
p1
.
position
(),
box
,
leaf_level
);
auto
m2
=
scalfmm
::
index
::
get_morton_index
(
p2
.
position
(),
box
,
leaf_level
);
std
::
sort
(
particle_container
.
begin
(),
particle_container
.
end
(),
[
&
box
,
&
leaf_level
](
const
auto
&
p1
,
const
auto
&
p2
)
{
auto
m1
=
scalfmm
::
index
::
get_morton_index
(
p1
.
position
(),
box
,
leaf_level
);
auto
m2
=
scalfmm
::
index
::
get_morton_index
(
p2
.
position
(),
box
,
leaf_level
);
return
m1
<
m2
;
});
return
m1
<
m2
;
});
#endif
out
::
print
(
"rank("
+
std
::
to_string
(
rank
)
+
") partArray final: "
,
particle_container
);
///
...
...
experimental/include/scalfmm/utils/parallel_manager.hpp
View file @
d6f8893f
...
...
@@ -46,7 +46,7 @@ class parallel_manager
parallel_manager
()
:
m_number_processes
(
1
)
,
m_process_id
(
1
)
,
m_process_id
(
0
)
,
m_number_threads
(
1
)
{
#ifdef _OPENMP
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment