Commit 48d336bc authored by VAN TOLL Wouter's avatar VAN TOLL Wouter
Browse files

Merge branch 'acceleration-based' into 'master'

Acceleration-based policy definition

See merge request !108
parents 90f6e848 1b8d1d2c
......@@ -35,6 +35,11 @@ Vector2D ForceBasedFunction::GetGradient(const Vector2D& velocity, Agent* agent,
return (velocity - targetV) / world->GetDeltaTime();
}
Vector2D ForceBasedFunction::GetGradientFromCurrentVelocity(Agent* agent, const WorldBase * world) const
{
return -ComputeForce(agent, world) / agent->getMass();
}
Vector2D ForceBasedFunction::GetGlobalMinimum(Agent* agent, const WorldBase* world) const
{
return ComputeTargetVelocity(agent, world);
......@@ -43,10 +48,9 @@ Vector2D ForceBasedFunction::GetGlobalMinimum(Agent* agent, const WorldBase* wor
void ForceBasedFunction::parseParameters(const CostFunctionParameters & params)
{
CostFunction::parseParameters(params);
params.ReadFloat("agentMass", agentMass);
}
Vector2D ForceBasedFunction::ComputeTargetVelocity(Agent* agent, const WorldBase* world) const
{
return agent->getVelocity() + ComputeForce(agent, world) / agentMass * world->GetDeltaTime();
return agent->getVelocity() + ComputeForce(agent, world) / agent->getMass() * world->GetDeltaTime();
}
\ No newline at end of file
......@@ -32,12 +32,6 @@
/// you can then focus on just computing the force itself, and everything else will be handled for you.</remarks>
class ForceBasedFunction : public CostFunction
{
protected:
/// <summary>The mass of the agent according to this function. Most force-based methods assume unit mass.</summary>
/// <remarks>This value is used for converting a force to a gradient.
/// The value corresponds to the XML parameter "agentMass".</remarks>
float agentMass = 1;
protected:
ForceBasedFunction() : CostFunction() {}
virtual ~ForceBasedFunction() {}
......@@ -61,6 +55,13 @@ public:
/// <returns>The gradient of the cost function, derived from the result of ComputeForce().</param>
virtual Vector2D GetGradient(const Vector2D& velocity, Agent* agent, const WorldBase * world) const override;
/// <summary>Computes the gradient of the cost at the agent's current velocity, in a way that is specific for force-based functions.</summary>
/// <remarks>In the case of ForceBasedFunction, this should simply return -F/m, and we can skip some computations from the regular GetGradient() function.</remarks>
/// <param name="agent">An agent in the simulation.</param>
/// <param name="world">The world in which the simulation takes place.</param>
/// <returns>The gradient of the cost function, derived from the result of ComputeForce().</param>
virtual Vector2D GetGradientFromCurrentVelocity(Agent* agent, const WorldBase * world) const override;
/// <summary>Computes the globally optimal velocity, in a way that is specific for force-based functions.</summary>
/// <remarks>In the case of ForceBasedFunction, this is the velocity that the agent would reach if it uses the force for one time step.</remarks>
/// <param name="agent">The agent that would use the requested velocity.</param>
......@@ -69,8 +70,7 @@ public:
virtual Vector2D GetGlobalMinimum(Agent* agent, const WorldBase * world) const override;
/// <summary>Parses the parameters of the cost function.</summary>
/// <remarks>By default, ForceBasedFunction loads the "agentMass" parameter,
/// along with the parameters of the parent class CostFunction.
/// <remarks>By default, ForceBasedFunction simply calls the parent version, i.e. CostFunction::parseParameters().
/// Override this method to let it load any additional parameters of your interest.</remarks>
/// <param name="params">An XML block that contains all parameters of this cost function.</param>
void parseParameters(const CostFunctionParameters & params) override;
......
......@@ -21,18 +21,19 @@
#include <core/agent.h>
#include <core/worldBase.h>
#include "../3rd-party/ORCA/ORCASolver.h"
#include <3rd-party/ORCA/ORCASolver.h>
Agent::Agent(size_t id, const Agent::Settings& settings) :
id_(id), settings_(settings),
position_(0, 0),
velocity_(0, 0),
acceleration_(0, 0),
contact_forces_(0, 0),
preferred_velocity_(0, 0),
next_velocity_(0, 0),
goal_(0, 0),
velocity_without_forces_(0, 0),
contact_forces_(0, 0),
viewing_direction_(0, 0)
viewing_direction_(0, 0),
next_acceleration_(0, 0),
next_contact_forces_(0, 0)
{
// set the seed for random-number generation
RNGengine_.seed((unsigned int)id);
......@@ -58,38 +59,43 @@ void Agent::ComputePreferredVelocity()
preferred_velocity_ = (goal_ - position_).getnormalized() * getPreferredSpeed();
}
void Agent::ComputeNextVelocity(WorldBase* world)
void Agent::ComputeAcceleration(WorldBase* world)
{
next_velocity_ = getPolicy()->ComputeNewVelocity(this, world);
next_acceleration_ = getPolicy()->ComputeAcceleration(this, world);
}
void Agent::ComputeContactForces(WorldBase* world)
{
contact_forces_ = getPolicy()->ComputeContactForces(this, world);
next_contact_forces_ = getPolicy()->ComputeContactForces(this, world);
}
void Agent::UpdateVelocityAndPosition(float dt)
void Agent::updateViewingDirection()
{
// The concept of a "relaxation time" is already part of the policy.
// So, we may assume that next_velocity_ can be used immediately, without interpolation.
velocity_without_forces_ = next_velocity_;
// apply forces
const float mass = 80.0f; // TODO: parameter?
const auto& acceleration_forces = contact_forces_ / mass;
velocity_ = velocity_without_forces_ + (acceleration_forces * dt);
// update the viewing direction:
// a) as a weighted average of the preferred and actual velocity
// weighted average of the preferred and actual velocity
const auto& vDiff = (2 * velocity_ + preferred_velocity_) / 3;
if (vDiff.sqrMagnitude() > 0.01)
viewing_direction_ = vDiff.getnormalized();
// b) based on the velocity that ignores contact forces
//if (velocity_without_forces_.sqrMagnitude() > 0.01)
// viewing_direction_ = velocity_without_forces_.getnormalized();
}
void Agent::UpdateVelocityAndPosition(WorldBase* world)
{
const float dt = world->GetDeltaTime();
// clamp the acceleration
acceleration_ = clampVector(next_acceleration_, getMaximumAcceleration());
// integrate the velocity; clamp to a maximum speed
velocity_ = clampVector(velocity_ + (acceleration_*dt), getMaximumSpeed());
// add contact forces
contact_forces_ = next_contact_forces_;
velocity_ += contact_forces_ / settings_.mass_ * dt;
// update the position
position_ = position_ + (velocity_ * dt);
position_ += velocity_ * dt;
updateViewingDirection();
}
#pragma endregion
......@@ -113,7 +119,6 @@ void Agent::setPosition(const Vector2D &position)
void Agent::setVelocity_ExternalApplication(const Vector2D &velocity, const Vector2D &viewingDirection)
{
velocity_ = velocity;
velocity_without_forces_ = velocity;
viewing_direction_ = viewingDirection;
}
......
......@@ -54,6 +54,9 @@ public:
/// <summary>Whether or not the agent should be removed when it has reached its goal.</summary>
bool remove_at_goal_ = false;
/// <summary>The mass of the agent, used when applying an acceleration vector. A commonly used value is 1, so this is not a mass in kilograms.</summary>
float mass_ = 1.0f;
};
private:
......@@ -64,14 +67,15 @@ private:
Vector2D position_;
Vector2D velocity_;
Vector2D preferred_velocity_;
Vector2D next_velocity_;
Vector2D goal_;
Vector2D acceleration_;
Vector2D contact_forces_;
Vector2D velocity_without_forces_;
Vector2D preferred_velocity_;
Vector2D goal_;
Vector2D viewing_direction_;
Vector2D contact_forces_;
Vector2D next_acceleration_;
Vector2D next_contact_forces_;
NeighborList neighbors_;
......@@ -84,6 +88,10 @@ private:
ORCALibrary::Solution orcaSolution_;
private:
void updateViewingDirection();
public:
#pragma region [Simulation-loop methods]
......@@ -104,19 +112,19 @@ public:
/// with a length equal to the agent's preferred speed.</remarks>
void ComputePreferredVelocity();
/// <summary>Uses this agent's Policy to compute a new velocity for the agent.</summary>
/// <summary>Uses this agent's Policy to compute a new acceleration vector for the agent.</summary>
/// <remarks>The result will be stored internally in the agent.</remarks>
/// <param name="world">A reference to the world in which the simulation takes place.</param>
void ComputeNextVelocity(WorldBase* world);
void ComputeAcceleration(WorldBase* world);
/// <summary>Computes forces with neighboring agents that are currently colliding with this agent.</summary>
/// <remarks>The result will be stored internally in the agent.</remarks>
/// <param name="world">A reference to the world in which the simulation takes place.</param>
void ComputeContactForces(WorldBase* world);
/// <summary>Uses the last computed "next velocity" to update the agent's velocity and position.</summary>
/// <param name="dt">The time that is passing in the current simulation frame.</param>
void UpdateVelocityAndPosition(float dt);
/// <summary>Updates the velocity and position of this Agent via Euler integration, using the last computed acceleration and contact forces.</summary>
/// <param name="world">A reference to the world in which the simulation takes place.</param>
void UpdateVelocityAndPosition(WorldBase* world);
/// @}
#pragma endregion
......@@ -142,6 +150,8 @@ public:
inline float getMaximumSpeed() const { return settings_.max_speed_; };
/// <summary>Returns the agent's maximum acceleration.</summary>
inline float getMaximumAcceleration() const { return settings_.max_acceleration_; };
/// <summary>Returns the agent's mass.</summary>
inline float getMass() const { return settings_.mass_; };
/// <summary>Returns a pointer to the Policy that describes the agent's navigation behavior.</summary>
inline Policy* getPolicy() const { return settings_.policy_; }
/// <summary>Returns whether or not the agent wants to be removed from the simulation when it reaches its goal.</summary>
......
......@@ -53,6 +53,11 @@ Vector2D CostFunction::GetGradient(const Vector2D& velocity, Agent* agent, const
return Vector2D(gradientX, gradientY);
}
Vector2D CostFunction::GetGradientFromCurrentVelocity(Agent* agent, const WorldBase* world) const
{
return GetGradient(agent->getVelocity(), agent, world);
}
Vector2D CostFunction::GetGlobalMinimum(Agent* agent, const WorldBase* world) const
{
// By default, we approximate the global optimum via sampling.
......
......@@ -164,6 +164,14 @@ public:
/// <returns>A 2D vector denoting the gradient of the cost function at the given velocity.</param>
virtual Vector2D GetGradient(const Vector2D& velocity, Agent* agent, const WorldBase* world) const;
/// <summary>Computes the gradient of the cost at the agent's current velocity.</summary>
/// <remarks>By default, this function simply calls GetGradient(),
/// but a subclass might have a more efficient implementation that computes the same result.</remarks>
/// <param name="agent">An agent in the simulation.</param>
/// <param name="world">The world in which the simulation takes place.</param>
/// <returns>The gradient of the cost function, derived from the result of ComputeForce().</param>
virtual Vector2D GetGradientFromCurrentVelocity(Agent* agent, const WorldBase * world) const;
/// <summary>Computes the global minimum of the cost function, i.e. the velocity with minimum cost.</summary>
/// <remarks>By default, this method uses sampling to approximate the global minimum.
/// Subclasses of CostFunction may choose to implement a better (e.g. closed-form) solution.</remarks>
......
......@@ -433,6 +433,7 @@ bool CrowdSimulator::FromConfigFile_loadSingleAgent(const tinyxml2::XMLElement*
agentElement->QueryFloatAttribute("pref_speed", &settings.preferred_speed_);
agentElement->QueryFloatAttribute("max_speed", &settings.max_speed_);
agentElement->QueryFloatAttribute("max_acceleration", &settings.max_acceleration_);
agentElement->QueryFloatAttribute("mass", &settings.mass_);
agentElement->QueryBoolAttribute("remove_at_goal", &settings.remove_at_goal_);
// position
......
......@@ -40,39 +40,31 @@ float Policy::getInteractionRange() const
return range;
}
Vector2D Policy::ComputeNewVelocity(Agent* agent, WorldBase * world)
Vector2D Policy::ComputeAcceleration(Agent* agent, WorldBase * world)
{
if (agent->getPreferredVelocity().isZero())
return Vector2D(0, 0);
const Vector2D& currentVelocity = agent->getVelocity();
float dt = world->GetDeltaTime();
// if the agent does not want to move, return a deceleration vector
if (agent->getPreferredVelocity().isZero())
return -currentVelocity / dt;
// compute a new acceleration using a certain optimization method
Vector2D acceleration;
// a) Gradient following: compute acceleration from cost-function gradients
if (optimizationMethod_ == OptimizationMethod::GRADIENT)
acceleration = getAccelerationFromGradient(agent, world);
return getAccelerationFromGradient(agent, world);
// b) Global optimization or sampling
else
{
// compute the ideal velocity according to the cost functions
const Vector2D& bestVelocity = (optimizationMethod_ == OptimizationMethod::GLOBAL
// - compute the ideal velocity according to the cost functions
const Vector2D& bestVelocity = (optimizationMethod_ == OptimizationMethod::GLOBAL
? getBestVelocityGlobal(agent, world)
: getBestVelocitySampling(agent, world, samplingParameters_));
// convert this to an acceleration using a relaxation time
// Note: the relaxation time should be at least the length of a frame.
acceleration = (bestVelocity - currentVelocity) / std::max(agent->getPolicy()->getRelaxationTime(), dt);
}
// clamp to a maximum acceleration
acceleration = clampVector(acceleration, agent->getMaximumAcceleration());
// use this to compute a new velocity, clamped to a maximum speed
return clampVector(currentVelocity + acceleration * dt, agent->getMaximumSpeed());
// - convert this to an acceleration using a relaxation time
// Note: the relaxation time should be at least the length of a frame.
return (bestVelocity - currentVelocity) / std::max(relaxationTime_, dt);
}
Vector2D Policy::getAccelerationFromGradient(Agent* agent, WorldBase * world)
......@@ -82,7 +74,7 @@ Vector2D Policy::getAccelerationFromGradient(Agent* agent, WorldBase * world)
// sum up the gradient of all cost functions
Vector2D TotalGradient(0, 0);
for (auto& costFunction : cost_functions_)
TotalGradient += costFunction.second * costFunction.first->GetGradient(CurrentVelocity, agent, world);
TotalGradient += costFunction.second * costFunction.first->GetGradientFromCurrentVelocity(agent, world);
// move in the opposite direction of this gradient
return -1 * TotalGradient;
......
......@@ -87,11 +87,11 @@ struct SamplingParameters
};
/// <summary>A navigation policy that agents can use for local navigation.</summary>
/// <remarks>In each frame of the simulation loop, an agent uses a policy to compute its next velocity to use.
/// <remarks>In each frame of the simulation loop, an agent uses a policy to compute an acceleration vector to apply in the upcoming step.
/// To do this, a policy contains the following main ingredients:<list>
/// <item>One or more *cost functions* that assign costs to velocities.</item>
/// <item>An *optimization method* that describes how to use these cost functions to compute a "best velocity".</item>
/// <item>Additional parameters that describe how to apply this "best velocity" to the agent.</item></list>
/// <item>One or more *cost functions* that assign costs to (hypothetical) velocities.</item>
/// <item>An *optimization method* that describes how to use these cost functions to compute an acceleration.</item>
/// <item>(Optionally) Additional parameters that describe how to apply this acceleration to the agent.</item></list>
/// The UMANS library can reproduce many local navigation algorithms by making particular choices for each ingredient.</remarks>
class Policy
{
......@@ -99,12 +99,13 @@ public:
/// <summary>An enum describing the possible optimization methods of a Policy.</summary>
enum class OptimizationMethod
{
/// <summary>Indicates that a Policy computes a "best velocity" by following the gradient of its cost functions.</summary>
/// <summary>Indicates that a Policy computes an acceleration by following the gradient of its cost functions.</summary>
GRADIENT,
/// <summary>Indicates that a Policy uses sampling to create several candidate velocities,
/// computes the cost of each candidate, and returns the candidate with the lowest cost.</summary>
/// computes the cost of each candidate, and returns an acceleration towards the candidate with the lowest cost.</summary>
SAMPLING,
/// <summary>Indicates that a Policy computes a "best velocity" by global optimization of its cost function.
/// <summary>Indicates that a Policy computes a "best velocity" by global optimization of its cost function,
/// and returns an acceleration towards this best velocity.
/// This option is useful for cost functions that have a closed-form solution for finding the optimum.</summary>
GLOBAL
};
......@@ -122,7 +123,7 @@ private:
/// In addition, specific cost functions may choose to use it (regardless of the optimization method).</summary>
float relaxationTime_ = 0;
/// <summary>A scaling factor to apply to contact forces. Use 0 to disable these forces completely.</summary>
float contactForceScale_ = 5000;
float contactForceScale_ = 5000.f / 80.f; // A constant of 5000 is often used, but in combination with an agent mass of 80 kg.
public:
/// <summary>Creates a Policy with the given details.</summary>
......@@ -135,10 +136,10 @@ public:
/// <summary>Destroys this Policy and all cost functions inside it.</summary>
~Policy();
/// <summary>Computes and returns a new velocity for a given agent, using the cost functions and optimization method of this Policy.</summary>
/// <param name="agent">The agent for which a new velocity should be computed.</param>
/// <summary>Computes and returns a new acceleration vector for a given agent, using the cost functions and optimization method of this Policy.</summary>
/// <param name="agent">The agent for which a new acceleration should be computed.</param>
/// <param name="world">The world in which the simulation takes place.</param>
Vector2D ComputeNewVelocity(Agent* agent, WorldBase* world);
Vector2D ComputeAcceleration(Agent* agent, WorldBase* world);
/// <summary>Computes and returns a 2D vector describing the contact forces experienced by a given agent due to collisions.
/// This force is already scaled by the scaling factor stored in this Policy.</summary>
......
......@@ -103,10 +103,10 @@ void WorldBase::DoStep()
for (int i = 0; i < n; ++i)
agents_[i]->ComputePreferredVelocity();
// 4. perform local navigation for each agent, to compute a new velocity for them
// 4. perform local navigation for each agent, to compute an acceleration vector for them
#pragma omp parallel for
for (int i = 0; i < n; ++i)
agents_[i]->ComputeNextVelocity(this);
agents_[i]->ComputeAcceleration(this);
// 5. compute contact forces for all agents
#pragma omp parallel for
......@@ -131,7 +131,7 @@ void WorldBase::DoStep_MoveAllAgents()
{
#pragma omp parallel for
for (int i = 0; i < (int)agents_.size(); i++)
agents_[i]->UpdateVelocityAndPosition(delta_time_);
agents_[i]->UpdateVelocityAndPosition(this);
}
#pragma region [Finding, adding, and removing agents]
......
......@@ -89,7 +89,7 @@ void WorldToric::DoStep_MoveAllAgents()
Agent* agent = agents_[i];
// update the agent's velocity and position as usual
agent->UpdateVelocityAndPosition(delta_time_);
agent->UpdateVelocityAndPosition(this);
// if the agent has crossed the bounding rectangle, warp it to the other side
float x = agent->getPosition().x;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment