Commit 4f782f6e authored by EYRAUD-DUBOIS Lionel's avatar EYRAUD-DUBOIS Lionel

Merge branch 'master' into comms

parents 59dee724 57d2e1a8
......@@ -3,7 +3,6 @@ cmake_minimum_required(VERSION 3.0.2)
project(pmtool)
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${PROJECT_SOURCE_DIR})
add_definitions("--std=c++11")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g")
include_directories("include")
option(PROFILING "Include profiling information" OFF)
......@@ -15,6 +14,14 @@ if(PROFILING)
SET(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -pg")
endif()
option(DEBUG "Include debug information" ON)
if(DEBUG)
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g")
else()
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3")
endif()
find_package(LibRec REQUIRED)
include_directories(${LIBREC_INCLUDE_DIRS})
......@@ -58,3 +65,5 @@ if(THREADS_FOUND)
target_link_libraries(pmtool Threads::Threads)
endif()
add_executable(instanceinfo instanceinfo.cpp)
target_link_libraries(instanceinfo core)
......@@ -188,7 +188,7 @@ IF(CPLEX_FOUND)
SET(CPLEX_INCLUDE_DIRS ${CPLEX_INCLUDE_DIR} ${CPLEX_CONCERT_INCLUDE_DIR})
SET(CPLEX_LIBRARIES ${CPLEX_CONCERT_LIBRARY} ${CPLEX_ILOCPLEX_LIBRARY} ${CPLEX_LIBRARY} )
IF(CMAKE_SYSTEM_NAME STREQUAL "Linux")
SET(CPLEX_LIBRARIES "${CPLEX_LIBRARIES};m;pthread")
SET(CPLEX_LIBRARIES "${CPLEX_LIBRARIES};m;pthread;dl")
ENDIF(CMAKE_SYSTEM_NAME STREQUAL "Linux")
ENDIF(CPLEX_FOUND)
......
# This module finds librec.
#
# User can give LIBREC_ROOT_DIR as a hint stored in the cmake cache.
# User can give LIBREC_INSTALL_DIR as a hint stored in the cmake cache.
#
# It sets the following variables:
# LIBREC_FOUND - Set to false, or undefined, if librec isn't found.
# LIBREC_INCLUDE_DIRS - include directory
# LIBREC_LIBRARIES - library files
# include(LibFindMacros)
# Dependencies
# libfind_package(Magick++ Magick)
# Use pkg-config to get hints about paths
# libfind_pkg_check_modules(librec_PKGCONF ImageMagick++)
set(LIBREC_INSTALL_DIR "" CACHE PATH "Librec install directory")
......@@ -35,11 +28,10 @@ find_library(LIBREC_LIBRARY
message(STATUS "librec library: ${LIBREC_LIBRARY}")
include(FindPackageHandleStandardArgs)
# handle the QUIETLY and REQUIRED arguments and set LIBXML2_FOUND to TRUE
# handle the QUIETLY and REQUIRED arguments and set LIBREC_FOUND to TRUE
# if all listed variables are TRUE
find_package_handle_standard_args(LIBREC DEFAULT_MSG
find_package_handle_standard_args(LibRec DEFAULT_MSG
LIBREC_LIBRARY LIBREC_INCLUDE_DIR)
mark_as_advanced(LIBREC_INCLUDE_DIR LIBREC_LIBRARY )
......
# Input format specification
`pmtool` accepts two different input formats for specifying instances: an in-house format,
and one based on `librec` which allows easy interaction with `StarPU`. Examples of each format
can be found in the `data/` directory: files with the `.pm` extension follow the in-house format, files with the `.rec`
extension follow the recfile format.
and one based on `librec` which allows easy interaction with `StarPU`. Examples of each format
can be found in the `data/` directory: files with the `.pm` extension follow the in-house format, files with the `.rec`
extension follow the recfile format.
## In-house format
In the default in-house format, all the information about an instance is specified in one file,
specified with several arrays. Each array starts with a `[`, ends with a `]`, and contains a comma-separated
list of values.
A file must contain, in this order, the following arrays:
+ the number of ressource of each type. The length of this array specifies the number of resource
types, and its $`j`$-th coordinate indicates how many resources of type $`j`$ are available.
+ one array per resource type, specifying the execution time of tasks on this resource type.
All these arrays should have the same length, equal to the number of task types. The $`i`$-th coordinate of the
$`j`$-th array specifies the execution time of task type $`i`$ on resource type $`j`$.
+ one array specifies the type of each task. The length of this array is equal to the number $`n`$ of tasks in
the instance, and the $`k`$-th coordinate specifies the type of task $`k`$. Values in this array should be
between 0 and the length of the "execution time" arrays minus one.
+ one array per task, specifying the precedence constraints. The $`k`$-th array indicates the indices (between $`1`$ and $`n`$)
of the predecessors of task $`k`$.
Arrays do not have to be separated, however it is good practice in my opinion to keep one per line.
Task numbering can be arbitrary (and does not have to follow dependencies), `pmtool` automatically computes a
topological ordering of the graph anywhere it is needed, and thus checks that there is no cycle in the graph.
In the default in-house format, all the information about an instance
is specified in one file, specified with several arrays. Each array
starts with a `[`, ends with a `]`, and contains a comma-separated
list of values.
A file must contain, in this order, the following arrays:
+ the number of ressource of each type. The length of this array
specifies the number of resource types, and its $`j`$-th coordinate
indicates how many resources of type $`j`$ are available.
+ one array per resource type, specifying the execution time of tasks on this resource type.
All these arrays should have the same length, equal to the number of task types. The $`i`$-th coordinate of the
$`j`$-th array specifies the execution time of task type $`i`$ on resource type $`j`$.
+ one array specifies the type of each task. The length of this array is equal to the number $`n`$ of tasks in
the instance, and the $`k`$-th coordinate specifies the type of task $`k`$. Values in this array should be
between 0 and the length of the "execution time" arrays minus one.
+ one array per task, specifying the precedence constraints. The $`k`$-th array indicates the indices (between $`1`$ and $`n`$)
of the predecessors of task $`k`$.
Arrays do not have to be separated, however it is good practice in my opinion to keep one per line.
Task numbering can be arbitrary (and does not have to follow dependencies), `pmtool` automatically computes a
topological ordering of the graph anywhere it is needed, and thus checks that there is no cycle in the graph.
## Recfile format
This format uses a more standard representation (the same that Debian uses in its package system, and that StarPU uses
as an output). It is more verbose, more extensible, and allows to differentiate between the graph and the
platform description. `pmtool` automatically uses this format if the file name ends with `.rec`.
This format uses a more standard representation (the same that Debian uses in its package system, and that StarPU uses
as an output). It is more verbose, more extensible, and allows to differentiate between the graph and the
platform description. `pmtool` automatically uses this format if the file name ends with `.rec`.
In the recfile format, information is specified as *records*, which contain one *key/value* pair per line. The precise
format is `Key: Value`, where the value may be anything (and may contain spaces). The key and value are separated by `: `.
Records are separated by double newlines.
In the recfile format, information is specified as *records*, which contain one *key/value* pair per line. The precise
format is `Key: Value`, where the value may be anything (and may contain spaces). The key and value are separated by `: `.
Records are separated by double newlines.
### The instance file
The format of the instance file is designed as to accept the `tasks.rec` files as provided by StarPU's export tools.
The instance file contains one record per task, with the following keys:
+ `Name` and `Footprint` specify the task type. All tasks with identical names and footprints belong to the same task type.
+ `JobId` is an integer identifier of the task
+ `DependsOn` contains a space-separated list of the `JobId`s of the predecessor of the current task.
+ `EstimatedTime` contains a space-separated list of the execution time of this task on each resource (if there are
The format of the instance file is designed as to accept the `tasks.rec` files as provided by StarPU's export tools.
The instance file contains one record per task, with the following keys:
+ `Name` and `Footprint` specify the task type. All tasks with identical names and footprints belong to the same task type.
+ `JobId` is an integer identifier of the task
+ `DependsOn` contains a space-separated list of the `JobId`s of the predecessor of the current task.
+ `EstimatedTime` contains a space-separated list of the execution time of this task on each resource (if there are
several resources of the same type, the corresponding value is repeated as many times as needed). The number of values
in this field should be the same for all tasks.
The fields `Name`, `Footprint` and `JobId` are compulsory. The field `EstimatedTime` is compulsory if no platform file
is provided. The field `DependsOn` is optional (defaults to no dependencies). Other optional fields exist:
+ `SubmitOrder` provides another, more robust identifier from StarPU, which allows to identify tasks from one StarPU run
to the next. It is used as an identifier of tasks in exported schedules if the `--subimt-order` option is specified
on the command line. It should be an integer.
+ `Tag` is a custom information provided by the application programmer in StarPU, providing another way of identifying tasks
in a user-defined manner. It is appended to the task identifier (either job id or submit order) if the
`--use-tags` option is specified on the command line.
+ `WorkerId`, `StartTime` and `EndTime` allow to specify a schedule, providing a convienient way to compare
the actual schedule from StarPU to schedules computed with `pmtool`'s algorithms. If these fields are specified for
all tasks, then a shared data is added to the instance with the key `rl`; the schedule can then be recovered
by adding the `rep` (reproduce) algorithm to the command line, with the option `key=rl`, in the following way:
```
The fields `Name`, `Footprint` and `JobId` are compulsory. The field `EstimatedTime` is compulsory if no platform file
is provided. The field `DependsOn` is optional (defaults to no dependencies). Other optional fields exist:
+ `SubmitOrder` provides another, more robust identifier from StarPU, which allows to identify tasks from one StarPU run
to the next. It is used as an identifier of tasks in exported schedules if the `--subimt-order` option is specified
on the command line. It should be an integer.
+ `Tag` is a custom information provided by the application programmer in StarPU, providing another way of identifying tasks
in a user-defined manner. It is appended to the task identifier (either job id or submit order) if the
`--use-tags` option is specified on the command line.
+ `WorkerId`, `StartTime` and `EndTime` allow to specify a schedule, providing a convienient way to compare
the actual schedule from StarPU to schedules computed with `pmtool`'s algorithms. If these fields are specified for
all tasks, then a shared data is added to the instance with the key `rl`; the schedule can then be recovered
by adding the `rep` (reproduce) algorithm to the command line, with the option `key=rl`, in the following way:
```
pmtool tasks.rec -a rep:key=rl
```
+ `Handles`, `Modes` and `Sizes` allow to specify which data this task
......@@ -85,28 +88,30 @@ pmtool tasks.rec -a rep:key=rl
wrote to this handle. Tasks which write to a handle may change its
size.
Because of internal behavior of StarPU, this format allows *virtual* tasks to be added to the instance. A task is virtual
if it has no `Name` field. `JobId` is still compulsory for virtual tasks. Dependencies from real tasks go *through* virtual tasks
to make them depend on any real task that this virtual task depends on.
Because of internal behavior of StarPU, this format allows *virtual* tasks to be added to the instance. A task is virtual
if it has no `Name` field. `JobId` is still compulsory for virtual tasks. Dependencies from real tasks go *through* virtual tasks
to make them depend on any real task that this virtual task depends on.
### The platform file
To avoid specifying `EstimatedTime` for all tasks, it is possible to specify the platform file separately. In StarPU,
this file is provided by the `starpu_perfmodel_recdump` utility in the `tools/` directory. This file contains two parts:
first the number of resources of each type, then the timings of each type of task.
The first part starts with ```%rec: worker_count``` on a separate line (this is a special feature of the recfile format to separate a file
into different databases). It then contains one record per resource type, with two fields:
+ `Architecture` provides an identifier for this resource type
+ `NbWorkers` should contain an integer specifying the number of resources of this type
The second part starts with ```%rec: timing``` on a separate line, and contains one record for each task type/resource type combination.
Records contain the following fields:
+ `Name` and `Footprint` represent the task type, similarly to the instance file.
+ `Architecture` represent the resource type, as specified in the first part.
+ `Mean` contain the execution time (which is computed as an average by StarPU).
+ Files produced by StarPU also contain an `Stddev` field, which contains the standard deviation
of the measurements, but this field is ignored by `pmtool`.
To avoid specifying `EstimatedTime` for all tasks, it is possible to specify the platform file separately. In StarPU,
this file is provided by the `starpu_perfmodel_recdump` utility in the `tools/` directory. This file contains two parts:
first the number of resources of each type, then the timings of each type of task.
The first part starts with ```%rec: worker_count``` on a separate line (this is a special feature of the recfile format to separate a file
into different databases). It then contains one record per resource type, with two fields:
+ `Architecture` provides an identifier for this resource type
+ `NbWorkers` should contain an integer specifying the number of resources of this type
The second part starts with ```%rec: timing``` on a separate line, and contains one record for each task type/resource type combination.
Records contain the following fields:
+ `Name` and `Footprint` represent the task type, similarly to the instance file.
+ `Architecture` represent the resource type, as specified in the first part.
+ `Mean` contain the execution time (which is computed as an average by StarPU).
+ Files produced by StarPU also contain an `Stddev` field, which contains the standard deviation
of the measurements, but this field is ignored by `pmtool`.
......
This diff is collapsed.
......@@ -6,6 +6,8 @@
#include <OnlineQA.h>
#include <OnlineECT.h>
#include <OnlineERLS.h>
#include <OnlineLG.h>
#include <OnlineMG.h>
#include "algorithm.h"
#include "listAlgorithm.h"
......@@ -20,6 +22,7 @@
#ifdef WITH_CPLEX
#include "CriticalPath.h"
#include "AreaBound.h"
#include "DepBound.h"
#include "IterDepBound.h"
......@@ -39,6 +42,7 @@ static const int opt_bw = 15;
static const int opt_thread = 16;
static const int opt_tags = 17;
static const int opt_submit = 18;
static const int opt_export_type = 19;
static struct option long_options[] = {
......@@ -62,6 +66,7 @@ static struct option long_options[] = {
{"threads", optional_argument, 0, opt_thread},
{"use-tags", no_argument, 0, opt_tags},
{"submit-order", no_argument, 0, opt_submit},
{"export-type", no_argument, 0, opt_export_type},
{0, 0, 0, 0 }
};
......@@ -180,6 +185,9 @@ void ProgramOptions::parse(int argc, char** argv) {
case opt_submit:
useSubmitOrder = true;
break;
case opt_export_type:
outputTypeInExport = true;
break;
case '?':
case 'h':
usage();
......@@ -248,6 +256,7 @@ void ProgramOptions::displayBoundList() {
cerr << "Bounds available:" << endl;
#ifdef WITH_CPLEX
cerr << " area \t\t area bound, hybridized" << endl;
cerr << " cp \t\t critical path" << endl;
cerr << " dep \t\t global area bound with dependencies, hybridized" << endl;
cerr << " iterdep \t global area bound with dependencies, hybridized + iteratively adding local area bounds" << endl;
cerr << " mixed \t compute time to end for all tasks, and area bounds for the beginning of the graph" << endl;
......@@ -273,6 +282,8 @@ Bound* createBound(const string& name, const AlgOptions& options) {
bound = new HybridBound(new AreaStart(options), options);
if(name == "interval")
bound = new IntervalBound(options);
if(name == "cp")
bound = new CriticalPath(options);
#endif
if(bound == NULL){
cerr << "Unknown bound " << name <<". For a list of bounds, use --help" << endl;
......@@ -307,6 +318,10 @@ Algorithm* createAlg(const string& name, const AlgOptions& options) {
alg = new OnlineECT(options);
if(name == "erls")
alg = new OnlineERLS(options);
if(name == "lg")
alg = new OnlineLG(options);
if(name == "mg")
alg = new OnlineMG(options);
#ifdef WITH_CPLEX
if(name == "lp")
alg = new SchedLPIndep(options);
......
......@@ -14,7 +14,7 @@ compile with `make`.
All bounds require CPLEX, which can be specified with the
`-DCPLEX_ROOT_DIR=<...>` option to `cmake`.
Depends on `librec-dev`, and CPLEX depends on libgpg-error-dev and
Depends on `librec-dev` ([See here](https://www.gnu.org/software/recutils/)), and CPLEX depends on libgpg-error-dev and
libgcrypt-dev.
## Table of contents
......@@ -292,11 +292,17 @@ resources. All of these only work with two types of resources.
dual approximation, dynamic-programming based algorithm. Based on the simpler algorithm from
[Scheduling Independent Tasks on Multi-cores with GPU Accelerators](https://hal.inria.fr/hal-00921357), described
in Section 5.2. Also described in [Scheduling Independent Tasks on Multi-cores with GPU Accelerators](https://hal.inria.fr/hal-01081625),
Section 5.2.
Section 5.2. Additional option: `disc` selects the discretization precision (default 3.0)
+ `dualhp`
dual approximation, heteroprio-based greedy algorithm. Inspired from [Scheduling Data Flow Program in XKaapi: A
New Affinity Based Algorithm for Heterogeneous Architectures](https://hal.inria.fr/hal-01081629v1), with only the
second part of the schedule.
+ `dp3demi`
dual approximation, dynamic programming based algorithm. Based
on APPROX-3/2 from
[Scheduling Independent Moldable Tasks on Multi-Cores with GPUs](https://hal.inria.fr/hal-01516752),
but restricted to the non moldable case. Should also appear as a
more generic (2q+1)/(2q) approximation in IJFCS.
+ `accel`
Accel algorithm from [Scheduling Independent Tasks on Multi-cores with GPU Accelerators](https://hal.inria.fr/hal-01081625),
Section 4.
......@@ -324,6 +330,9 @@ resources. All of these only work with two types of resources.
Implements the CLB2C strategy (see [Considerations on distributed load balancing for fully heterogeneous machines: Two particular cases.](https://doi.org/10.1109/IPDPSW.2015.36)).
Not exactly equivalent to this strategy, since `indep` performs list scheduling based only on the
assignment to different types of resources.
+ `minmin`
Implements the MinMin strategy (see [A Comparison of Eleven Static Heuristics for Mapping a Class of Independent Tasks onto Heterogeneous Distributed Computing Systems](https://doi.org/10.1006/jpdc.2000.1714).)
+ `rank` (except for style `strict`)
as for the previous algorithms
+ `dosort` (except for `strict`, default `yes`)
......@@ -437,7 +446,10 @@ Reminder: all bounds require to compile with CPLEX.
Simple area bound, with one variable per type of tasks and type of
resource. Very quick.
* `dep`: mixed area-dependency bound
* `cp`
Simple critical path bound. Very quick.
* `dep`: mixed area-dependency bound. Always better than `max(area, cp)`, but slower
Options:
+ `mode`: `normal` (default) or `concurrent`
if concurrent, uses Cplex's concurrent solving mode
......@@ -512,6 +524,10 @@ often:
* `submit-order`
Use the `SubmitOrder` field in `instance.rec` input files instead of `JobId`.
* `export-type`
When exporting in `.rec` format with the `export=` option of algorithms, specify
all workers of this type instead of the particular worker.
* `--bw <BW>`
Specify the bandwidth used for communications. Unit is not
specified, since it depends on the unit used to specify data sizes
......
......@@ -5,6 +5,7 @@ set(BOUNDS_SRC
HybridBound.cpp
IntervalBound.cpp
IterDepBound.cpp
CriticalPath.cpp
)
add_library(bounds ${BOUNDS_SRC})
#include <vector>
#include "algorithm.h"
#include "CriticalPath.h"
#include "util.h"
using namespace std;
CriticalPath::CriticalPath(const AlgOptions & options) {
}
double CriticalPath::compute(Instance& ins) {
vector<double> ranks = ins.computeMinRank();
return getMax(ranks);
}
......@@ -115,6 +115,21 @@ double AlgOptions::asDouble(const string& key, const double def) const {
return (stod(opt->second));
}
void AlgOptions::updateValue(double & v, const std::string &key) const {
if(isPresent(key))
v = asDouble(key);
}
void AlgOptions::updateValue(int & v, const std::string &key) const {
if(isPresent(key))
v = asInt(key);
}
void AlgOptions::updateValue(string & v, const std::string &key) const {
if(isPresent(key))
v = asString(key);
}
AlgOptions::AlgOptions(string toParse) {
parse(toParse);
}
......
......@@ -332,6 +332,9 @@ vector<double> Instance::computeRanks(vector<double> wbar, int to, int from) {
int i = topOrder[j];
// Compute max
double m = -std::numeric_limits<double>::infinity();
if(revDep[i].empty())
m = 0;
else
for(int k = 0; k < (int) revDep[i].size(); k++)
if(rank[revDep[i][k]] > m)
m = rank[revDep[i][k]];
......
......@@ -105,8 +105,8 @@ string ExportToString::getResult() {
/* ExportAlloc: exports in .rec format */
ExportAlloc::ExportAlloc(string filename, Instance* ins, bool submitOrder)
: output(filename), instance(ins), submitOrder(submitOrder) {
ExportAlloc::ExportAlloc(string filename, Instance* ins, bool submitOrder, bool outputType)
: output(filename), instance(ins), submitOrder(submitOrder), outputType(outputType) {
}
void ExportAlloc::onSchedule(int i, int w, double s, double f) {
......@@ -128,10 +128,15 @@ void ExportAlloc::onSchedule(int i, int w, double s, double f) {
int type = instance->getType(w);
output << "Workers: ";
if (outputType && instance->workerIDs[type].size() > 1) {
output << "Workers:";
for(auto& i: instance->workerIDs[type])
output << i << " ";
output << " " << i;
output << endl;
} else {
output << "SpecificWorker: " << w << endl;
}
if(instance->workerNames.size() > 0) {
output << "Architecture: " << instance->workerNames[type] << endl;
......
#include "util.h"
#include "cmath"
#include <cmath>
#include <sstream>
using namespace std;
......
#ifndef CRITICALPATH_H
#define CRITICALPATH_H
#include "algorithm.h"
class CriticalPath : public Bound {
public:
CriticalPath(const AlgOptions & options);
double compute(Instance& ins);
std::string name() { return "cp"; };
};
#endif
......@@ -4,7 +4,6 @@
#include "instance.h"
#include "algorithm.h"
#include "algoptions.h"
#include "listAlgorithm.h"
class Dmdas : public Algorithm {
......
#ifndef INDEPDP2_H
#define INDEPDP2_H
#include "IndepAllocator.h"
#include "IndepDualGeneric.h"
#include "instance.h"
#include <vector>
extern double lowerBoundTwoResource(Instance& ins, std::vector<int> taskSet,
double CPUload = 0, double GPUload = 0);
class IndepDP2 : public IndepAllocator {
class IndepDP2 : public IndepDualGeneric {
protected:
double tryGuess(Instance &, std::vector<int> taskSet, double maxGPUload, double maxlen,
IndepResult & result, bool getResult);
double epsilon = 0.01;
double tryGuess(Instance &, std::vector<int> taskSet, std::vector<double>& loads,
double maxlen, IndepResult & result, bool getResult);
double discretizationConstant = 3.0;
#ifdef WITH_CPLEX
bool solveWithCplex;
bool cplexUseDiscretizedValues;
#endif
public:
IndepDP2(const AlgOptions& opt);
IndepResult compute(Instance &, std::vector<int> &taskSet, std::vector<double> &loads);
};
......
#ifndef INDEPDP3DEMI_H
#define INDEPDP3DEMI_H
#include "IndepDualGeneric.h"
#include "instance.h"
#include <vector>
class IndepDP3Demi : public IndepDualGeneric {
protected:
double tryGuess(Instance &, std::vector<int> taskSet, std::vector<double>& loads,
double target, IndepResult & result, bool getResult);
double discretizationConstant = 3.0;
#ifdef WITH_CPLEX
bool solveWithCplex;
bool cplexUseDiscretizedValues;
#endif
public:
IndepDP3Demi(const AlgOptions& opt);
};
#endif
#ifndef INDEPDUALGENERIC_H
#define INDEPDUALGENERIC_H
#include "IndepAllocator.h"
#include "instance.h"
#include <vector>
extern double lowerBoundTwoResource(Instance& ins, std::vector<int> taskSet,
double CPUload = 0, double GPUload = 0);
class IndepDualGeneric : public IndepAllocator {
protected:
virtual double tryGuess(Instance &, std::vector<int> taskSet, std::vector<double>& loads,
double maxlen, IndepResult & result, bool getResult) = 0;
double epsilon = 0.01;
public:
IndepDualGeneric(const AlgOptions& opt);
IndepResult compute(Instance &, std::vector<int> &taskSet, std::vector<double> &loads);
};
#endif
//
// Created by eyraud on 25/03/19.
// Based on A Comparison of Eleven Static Heuristics for
// Mapping a Class of Independent Tasks onto
// Heterogeneous Distributed Computing Systems
// doi:10.1006/jpdc.2000.1714
#ifndef PMTOOL_MINMIN_H
#define PMTOOL_MINMIN_H
#include "IndepAllocator.h"
class IndepMinMin : public IndepAllocator {
private:
Instance* ins;
std::vector<int> workerIndices;
std::vector<int> bestWorkers; /* length = nb worker types */
int getEarliestWorker(std::vector<double> &loads, int type);
inline double endTime(std::vector<double> &loads, int workerType, int task);
public:
IndepResult compute(Instance &ins, std::vector<int> &taskSet, std::vector<double> &loads) override;
IndepMinMin(const AlgOptions &options);
};
#endif //PMTOOL_MINMIN_H
//
// Created by eyraud on 11/12/17.
//
#ifndef PMTOOL_ONLINELG_H