Commit 69d0f197 authored by Nathalie Furmento's avatar Nathalie Furmento
Browse files

website: update tutorials/2014-05-PATC

git-svn-id: svn+ssh://scm.gforge.inria.fr/svn/starpu/website@12899 176f6dd6-97d6-42f4-bd05-d3db9ad07c7a
parent 1b5be09d
CFLAGS += $(shell pkg-config --cflags starpu-1.1)
LDFLAGS += $(shell pkg-config --libs starpu-1.1)
vector_scal: vector_scal.o vector_scal_cpu.o vector_scal_cuda.o vector_scal_opencl.o
%.o: %.cu
nvcc $(CFLAGS) $< -c $
vector_scal_task_insert: vector_scal_task_insert.o vector_scal_cpu.o vector_scal_cuda.o vector_scal_opencl.o
mult: mult.c
clean:
rm -f vector_scal *.o
rm -f vector_scal_task_insert mult *.o
/* StarPU --- Runtime system for heterogeneous multicore architectures.
*
* Copyright (C) 2010-2011, 2013 Université de Bordeaux 1
* Copyright (C) 2010 Mehdi Juhoor <mjuhoor@gmail.com>
* Copyright (C) 2010, 2011, 2012, 2013 Centre National de la Recherche Scientifique
*
* StarPU is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation; either version 2.1 of the License, or (at
* your option) any later version.
*
* StarPU is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* See the GNU Lesser General Public License in COPYING.LGPL for more details.
*/
/*
* This example shows a simple implementation of a blocked matrix
* multiplication. Note that this is NOT intended to be an efficient
* implementation of sgemm! In this example, we show:
* - how to declare dense matrices (starpu_matrix_data_register)
* - how to manipulate matrices within codelets (eg. descr[0].blas.ld)
* - how to use filters to partition the matrices into blocks
* (starpu_data_partition and starpu_data_map_filters)
* - how to unpartition data (starpu_data_unpartition) and how to stop
* monitoring data (starpu_data_unregister)
* - how to manipulate subsets of data (starpu_data_get_sub_data)
* - how to construct an autocalibrated performance model (starpu_perfmodel)
* - how to submit asynchronous tasks
*/
#include <string.h>
#include <math.h>
#include <sys/types.h>
#include <sys/time.h>
#include <signal.h>
#include <starpu.h>
static float *A, *B, *C;
static starpu_data_handle_t A_handle, B_handle, C_handle;
static unsigned nslicesx = 4;
static unsigned nslicesy = 4;
static unsigned xdim = 1024;
static unsigned ydim = 1024;
static unsigned zdim = 512;
/*
* That program should compute C = A * B
*
* A of size (z,y)
* B of size (x,z)
* C of size (x,y)
|---------------|
z | B |
|---------------|
z x
|----| |---------------|
| | | |
| | | |
| A | y | C |
| | | |
| | | |
|----| |---------------|
*/
/*
* The codelet is passed 3 matrices, the "descr" union-type field gives a
* description of the layout of those 3 matrices in the local memory (ie. RAM
* in the case of CPU, GPU frame buffer in the case of GPU etc.). Since we have
* registered data with the "matrix" data interface, we use the matrix macros.
*/
void cpu_mult(void *descr[], STARPU_ATTRIBUTE_UNUSED void *arg)
{
float *subA, *subB, *subC;
uint32_t nxC, nyC, nyA;
uint32_t ldA, ldB, ldC;
/* .blas.ptr gives a pointer to the first element of the local copy */
subA = (float *)STARPU_MATRIX_GET_PTR(descr[0]);
subB = (float *)STARPU_MATRIX_GET_PTR(descr[1]);
subC = (float *)STARPU_MATRIX_GET_PTR(descr[2]);
/* .blas.nx is the number of rows (consecutive elements) and .blas.ny
* is the number of lines that are separated by .blas.ld elements (ld
* stands for leading dimension).
* NB: in case some filters were used, the leading dimension is not
* guaranteed to be the same in main memory (on the original matrix)
* and on the accelerator! */
nxC = STARPU_MATRIX_GET_NX(descr[2]);
nyC = STARPU_MATRIX_GET_NY(descr[2]);
nyA = STARPU_MATRIX_GET_NY(descr[0]);
ldA = STARPU_MATRIX_GET_LD(descr[0]);
ldB = STARPU_MATRIX_GET_LD(descr[1]);
ldC = STARPU_MATRIX_GET_LD(descr[2]);
/* we assume a FORTRAN-ordering! */
unsigned i,j,k;
for (i = 0; i < nyC; i++)
{
for (j = 0; j < nxC; j++)
{
float sum = 0.0;
for (k = 0; k < nyA; k++)
{
sum += subA[j+k*ldA]*subB[k+i*ldB];
}
subC[j + i*ldC] = sum;
}
}
}
static void init_problem_data(void)
{
unsigned i,j;
/* we initialize matrices A, B and C in the usual way */
A = (float *) malloc(zdim*ydim*sizeof(float));
B = (float *) malloc(xdim*zdim*sizeof(float));
C = (float *) malloc(xdim*ydim*sizeof(float));
/* fill the A and B matrices */
srand(2009);
for (j=0; j < ydim; j++)
{
for (i=0; i < zdim; i++)
{
A[j+i*ydim] = (float)(starpu_drand48());
}
}
for (j=0; j < zdim; j++)
{
for (i=0; i < xdim; i++)
{
B[j+i*zdim] = (float)(starpu_drand48());
}
}
for (j=0; j < ydim; j++)
{
for (i=0; i < xdim; i++)
{
C[j+i*ydim] = (float)(0);
}
}
}
static void partition_mult_data(void)
{
/* note that we assume a FORTRAN ordering here! */
/* The BLAS data interface is described by 4 parameters:
* - the location of the first element of the matrix to monitor (3rd
* argument)
* - the number of elements between columns, aka leading dimension
* (4th arg)
* - the number of (contiguous) elements per column, ie. contiguous
* elements (5th arg)
* - the number of columns (6th arg)
* The first elements is a pointer to the data_handle that will be
* associated to the matrix, and the second elements gives the memory
* node in which resides the matrix: 0 means that the 3rd argument is
* an adress in main memory.
*/
starpu_matrix_data_register(&A_handle, STARPU_MAIN_RAM, (uintptr_t)A,
ydim, ydim, zdim, sizeof(float));
starpu_matrix_data_register(&B_handle, STARPU_MAIN_RAM, (uintptr_t)B,
zdim, zdim, xdim, sizeof(float));
starpu_matrix_data_register(&C_handle, STARPU_MAIN_RAM, (uintptr_t)C,
ydim, ydim, xdim, sizeof(float));
/* A filter is a method to partition a data into disjoint chunks, it is
* described by the means of the "struct starpu_data_filter" structure that
* contains a function that is applied on a data handle to partition it
* into smaller chunks, and an argument that is passed to the function
* (eg. the number of blocks to create here).
*/
/* StarPU supplies some basic filters such as the partition of a matrix
* into blocks, note that we are using a FORTRAN ordering so that the
* name of the filters are a bit misleading */
struct starpu_data_filter vert =
{
.filter_func = starpu_matrix_filter_vertical_block,
.nchildren = nslicesx
};
struct starpu_data_filter horiz =
{
.filter_func = starpu_matrix_filter_block,
.nchildren = nslicesy
};
/*
* Illustration with nslicex = 4 and nslicey = 2, it is possible to access
* sub-data by using the "starpu_data_get_sub_data" method, which takes a data handle,
* the number of filters to apply, and the indexes for each filters, for
* instance:
*
* A' handle is starpu_data_get_sub_data(A_handle, 1, 1);
* B' handle is starpu_data_get_sub_data(B_handle, 1, 2);
* C' handle is starpu_data_get_sub_data(C_handle, 2, 2, 1);
*
* Note that here we applied 2 filters recursively onto C.
*
* "starpu_data_get_sub_data(C_handle, 1, 3)" would return a handle to the 4th column
* of blocked matrix C for example.
*
* |---|---|---|---|
* | | | B'| | B
* |---|---|---|---|
* 0 1 2 3
* |----| |---|---|---|---|
* | | | | | | |
* | | 0 | | | | |
* |----| |---|---|---|---|
* | A' | | | | C'| |
* | | | | | | |
* |----| |---|---|---|---|
* A C
*
* IMPORTANT: applying filters is equivalent to partitionning a piece of
* data in a hierarchical manner, so that memory consistency is enforced
* for each of the elements independantly. The tasks should therefore NOT
* access inner nodes (eg. one column of C or the whole C) but only the
* leafs of the tree (ie. blocks here). Manipulating inner nodes is only
* possible by disapplying the filters (using starpu_data_unpartition), to
* enforce memory consistency.
*/
starpu_data_partition(B_handle, &vert);
starpu_data_partition(A_handle, &horiz);
/* starpu_data_map_filters is a variable-arity function, the first argument
* is the handle of the data to partition, the second argument is the
* number of filters to apply recursively. Filters are applied in the
* same order as the arguments.
* This would be equivalent to starpu_data_partition(C_handle, &vert) and
* then applying horiz on each sub-data (ie. each column of C)
*/
starpu_data_map_filters(C_handle, 2, &vert, &horiz);
}
static struct starpu_perfmodel mult_perf_model =
{
.type = STARPU_HISTORY_BASED,
.symbol = "mult_perf_model"
};
static struct starpu_codelet cl =
{
/* we can only execute that kernel on a CPU yet */
/* CPU implementation of the codelet */
.cpu_funcs = {cpu_mult, NULL},
.cpu_funcs_name = {"cpu_mult", NULL},
/* the codelet manipulates 3 buffers that are managed by the
* DSM */
.nbuffers = 3,
.modes = {STARPU_R, STARPU_R, STARPU_W},
/* in case the scheduling policy may use performance models */
.model = &mult_perf_model
};
static int launch_tasks(void)
{
int ret;
/* partition the work into slices */
unsigned taskx, tasky;
for (taskx = 0; taskx < nslicesx; taskx++)
{
for (tasky = 0; tasky < nslicesy; tasky++)
{
/* C[taskx, tasky] = A[tasky] B[taskx] */
/* by default, starpu_task_create() returns an
* asynchronous task (ie. task->synchronous = 0) */
struct starpu_task *task = starpu_task_create();
/* this task implements codelet "cl" */
task->cl = &cl;
/*
* |---|---|---|---|
* | | * | | | B
* |---|---|---|---|
* X
* |----| |---|---|---|---|
* |****| Y | |***| | |
* |****| | |***| | |
* |----| |---|---|---|---|
* | | | | | | |
* | | | | | | |
* |----| |---|---|---|---|
* A C
*/
/* there was a single filter applied to matrices A
* (respectively B) so we grab the handle to the chunk
* identified by "tasky" (respectively "taskx). The "1"
* tells StarPU that there is a single argument to the
* variable-arity function starpu_data_get_sub_data */
task->handles[0] = starpu_data_get_sub_data(A_handle, 1, tasky);
task->handles[1] = starpu_data_get_sub_data(B_handle, 1, taskx);
/* 2 filters were applied on matrix C, so we give
* starpu_data_get_sub_data 2 arguments. The order of the arguments
* must match the order in which the filters were
* applied.
* NB: starpu_data_get_sub_data(C_handle, 1, k) would have returned
* a handle to the column number k of matrix C.
* NB2: starpu_data_get_sub_data(C_handle, 2, taskx, tasky) is
* equivalent to
* starpu_data_get_sub_data(starpu_data_get_sub_data(C_handle, 1, taskx), 1, tasky)*/
task->handles[2] = starpu_data_get_sub_data(C_handle, 2, taskx, tasky);
/* this is not a blocking call since task->synchronous = 0 */
ret = starpu_task_submit(task);
if (ret == -ENODEV) return ret;
STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
}
}
return 0;
}
int main(STARPU_ATTRIBUTE_UNUSED int argc,
STARPU_ATTRIBUTE_UNUSED char **argv)
{
int ret;
/* start the runtime */
ret = starpu_init(NULL);
if (ret == -ENODEV)
return 77;
STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
/* initialize matrices A, B and C and register them to StarPU */
init_problem_data();
/* partition matrices into blocks that can be manipulated by the
* codelets */
partition_mult_data();
/* submit all tasks in an asynchronous fashion */
ret = launch_tasks();
if (ret == -ENODEV) goto enodev;
/* wait for termination */
starpu_task_wait_for_all();
/* remove the filters applied by the means of starpu_data_map_filters; now
* it's not possible to manipulate a subset of C using starpu_data_get_sub_data until
* starpu_data_map_filters is called again on C_handle.
* The second argument is the memory node where the different subsets
* should be reassembled, 0 = main memory (RAM) */
starpu_data_unpartition(A_handle, STARPU_MAIN_RAM);
starpu_data_unpartition(B_handle, STARPU_MAIN_RAM);
starpu_data_unpartition(C_handle, STARPU_MAIN_RAM);
/* stop monitoring matrix C : after this, it is not possible to pass C
* (or any subset of C) as a codelet input/output. This also implements
* a barrier so that the piece of data is put back into main memory in
* case it was only available on a GPU for instance. */
starpu_data_unregister(A_handle);
starpu_data_unregister(B_handle);
starpu_data_unregister(C_handle);
free(A);
free(B);
free(C);
starpu_shutdown();
return 0;
enodev:
starpu_shutdown();
return 77;
}
#how many nodes and cores
#PBS -W x=NACCESSPOLICY:SINGLEJOB -q mirage -l nodes=1:ppn=12
make mult
STARPU_WORKER_STATS=1 mult
#how many nodes and cores
#PBS -W x=NACCESSPOLICY:SINGLEJOB -q mirage -l nodes=1:ppn=12
make
make vector_scal_task_insert
vector_scal_task_insert
# to force the implementation on a GPU device, by default, it will enable CUDA
......
......@@ -32,7 +32,7 @@ platform.
<P>
Once you are connected, we advise you to add the following lines at
the end of your <tt>.bashrc</tt> file.
</pp>
</p>
<tt><pre>
module purge
......@@ -45,7 +45,7 @@ module load runtime/starpu/1.1.0
<p>
Jobs can be submitted to the platform to reserve a set of nodes and to
execute a application on these nodes. We advise not to reserve nodes
execute a application on those nodes. We advise not to reserve nodes
interactively so as not to block the machines for the others
participants. Here a script (available
for <a href="files/starpu_machine_display.pbs">download</a>) to submit your
......@@ -118,11 +118,10 @@ following (<a href="files/Makefile">available for download</a>):
<tt><pre>
CFLAGS += $(shell pkg-config --cflags starpu-1.1)
LDFLAGS += $(shell pkg-config --libs starpu-1.1)
vector_scal_task_insert: vector_scal_task_insert.o vector_scal_cpu.o vector_scal_cuda.o vector_scal_opencl.o
%.o: %.cu
nvcc $(CFLAGS) $< -c $
clean:
rm -f vector_scal_task_insert *.o
vector_scal_task_insert: vector_scal_task_insert.o vector_scal_cpu.o vector_scal_cuda.o vector_scal_opencl.o
</pre></tt>
<p>
......@@ -200,48 +199,67 @@ to main memory.</li>
</ul>
</p>
</div>
</div>
<!--
<div class="section">
<h3>Data partitioning</h3>
<p>In the previous section, we submitted only one task. We here discuss how to
<p>
In the previous section, we submitted only one task. We here discuss how to
<i>partition</i> data so as to submit multiple tasks which can be executed in
parallel by the various CPUs and GPUs.</p>
parallel by the various CPUs and GPUs.
</p>
<p>Let's examine <tt>examples/basic_examples/mult.c</tt>.
<p>
Let's examine <a href="files/mult.c">mult.c</a>.
<ul>
<li>The computation kernel, <tt>cpu_mult</tt> is a trivial matrix multiplication
<li>
The computation kernel, <tt>cpu_mult</tt> is a trivial matrix multiplication
kernel, which operates on 3 given DSM interfaces. These will actually not be
whole matrices, but only small parts of matrices.</li>
<li><tt>init_problem_data</tt> initializes the whole A, B and C matrices.</li>
<li><tt>partition_mult_data</tt> does the actual registration and partitioning.
whole matrices, but only small parts of matrices.
</li>
<li>
<tt>init_problem_data</tt> initializes the whole A, B and C matrices.
</li>
<li>
<tt>partition_mult_data</tt> does the actual registration and partitioning.
Matrices are first registered completely, then two partitioning filters are
declared. The first one, <tt>vert</tt>, is used to split B and C vertically. The
second one, <tt>horiz</tt>, is used to split A and C horizontally. We thus end
up with a grid of pieces of C to be computed from stripes of A and B.</li>
<li><tt>launch_tasks</tt> submits the actual tasks: for each piece of C, take
the appropriate piece of A and B to produce the piece of C.</li>
<li>The access mode is interesting: A and B just need to be read from, and C
up with a grid of pieces of C to be computed from stripes of A and B.
</li>
<li>
<tt>launch_tasks</tt> submits the actual tasks: for each piece of C, take
the appropriate piece of A and B to produce the piece of C.
</li>
<li>
The access mode is interesting: A and B just need to be read from, and C
will only be written to. This means that StarPU will make copies of the pieces
of A and B along the machines, where they are needed for tasks, and will give to
the tasks some
uninitialized buffers for the pieces of C, since they will not be read from.</li>
<li>The main code initializes StarPU and data, launches tasks, unpartitions data,
uninitialized buffers for the pieces of C, since they will not be read
from.
</li>
<li>
The main code initializes StarPU and data, launches tasks, unpartitions data,
and unregisters it. Unpartitioning is an interesting step: until then the pieces
of C are residing on the various GPUs where they have been computed.
Unpartitioning will collect all the pieces of C into the main memory to form the
whole C result matrix.</li>
whole C result matrix.
</li>
</ul>
</p>
<p>Run the application with the batch scheduler, enabling some statistics:
<p>
Run the application with the <a href="files/mult.pbs">batch scheduler</a>, enabling some statistics:
<tt><pre>
STARPU_WORKER_STATS=1 [PATH]/examples/basic_examples/mult
#how many nodes and cores
#PBS -W x=NACCESSPOLICY:SINGLEJOB -q mirage -l nodes=1:ppn=12
make mult
STARPU_WORKER_STATS=1 mult
</pre></tt>
Figures show how the computation were distributed on the various processing
......@@ -249,6 +267,7 @@ units.
</p>
<!--
<p>
<tt>examples/mult/xgemm.c</tt> is a very similar matrix-matrix product example,
but which makes use of BLAS kernels for much better performance. The <tt>mult_kernel_common</tt> functions
......@@ -271,9 +290,10 @@ only one of them, so you may have to wait a long time, so submit this in
background in a separate terminal), the interesting thing here is that
with <b>no</b> application modification beyond making it use a task-based
programming model, we get multi-GPU support for free!</p>
-->
</div>
<!--
<div class="section">
<h3>More advanced examples</h3>
<p>
......@@ -295,6 +315,7 @@ Thanks to being already using a task-based programming model, MAGMA and PLASMA
have been easily ported to StarPU by simply using <tt>starpu_insert_task</tt>.
</p>
</div>
-->
<div class="section">
<h3>Exercise</h3>
......@@ -305,6 +326,7 @@ of tasks</p>
</div>
<!--
<div class="section">
<h2>Hands-on session part 2: Optimizations</h2>
......@@ -440,6 +462,7 @@ trivially.</p>
</div>
</div>
-->
<div class="section" id="contact">
<h2>Contact</h2>
......@@ -451,18 +474,29 @@ For any questions regarding StarPU, please contact the StarPU developers mailing
<div class="section">
<h3>More performance optimizations</h3>
<p>The starpu documentation <a href="http://runtime.bordeaux.inria.fr/StarPU/starpu.html#Performance-optimization">optimization chapter</a> provides more optimization tips for further reading after the Spring School.</p>
<p>
The StarPU
documentation <a href="http://runtime.bordeaux.inria.fr/StarPU/doc/html/PerformanceFeedback.html">performance
feedback chapter</a> provides more optimization tips for further
reading after this tutorial.
</p>
</div>
<div class="section">
<h3>FxT tracing support</h3>
<p>In addition to online profiling, StarPU provides offline profiling tools,
<p>
In addition to online profiling, StarPU provides offline profiling tools,
based on recording a trace of events during execution, and analyzing it
afterwards.</p>
afterwards.
</p>
<p>The tool used by StarPU to record a trace is called FxT, and can be downloaded from <a href="http://download.savannah.gnu.org/releases/fkt/fxt-0.2.14.tar.gz">savannah</a>. The build process is as usual:
<p>
The tool used by StarPU to record a trace is called FxT, and can be
downloaded
from <a href="http://download.savannah.gnu.org/releases/fkt/fxt-0.2.14.tar.gz">savannah</a>.
The build process is as usual:
</p>
<tt><pre>
......@@ -471,7 +505,9 @@ $ make
$ make install
</pre></tt>
<p>StarPU should then be recompiled with FxT support:</p>
<p>
StarPU should then be recompiled with FxT support:
</p>
<tt><pre>
$ ./configure --with-fxt --prefix=$HOME
......@@ -480,46 +516,62 @@ $ make
$ make install