diff --git a/.gitmodules b/.gitmodules
index a00a11c278a67d1840b7129353b5b300041d5646..41eb31850adb44dd081f02c3bdf24db19202f9ce 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,3 +1,7 @@
 [submodule "modules/Motor_Controller"]
 	path = modules/Motor_Controller
 	url = git@gitlab.inria.fr:spring/wp6_robot_behavior/Motor_Controller.git
+
+[submodule "modules/social_space_navigation_models"]
+	path = modules/social_space_navigation_models
+	url = git@gitlab.inria.fr:creinke/social-space-navigation-models.git
diff --git a/modules/__init__.py b/modules/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/mpi_sim/components/__init__.py b/mpi_sim/components/__init__.py
index b63bf80c05140fc01a571104e46f26bab21b1188..c1fcfa4232e49781fc33d0152e05e9bdf7d0213f 100644
--- a/mpi_sim/components/__init__.py
+++ b/mpi_sim/components/__init__.py
@@ -3,9 +3,13 @@ try:
     from mpi_sim.components.ari_navigation import ARINavigation
 except ImportError:
     pass
+# Uncomment to debug
+# from mpi_sim.components.ari_navigation import ARINavigation
+
 from mpi_sim.components.gaze_generator import GazeGenerator
 from mpi_sim.components.object_detector import ObjectDetector
-from mpi_sim.components.occupany_grid_mapping import OccupancyGridMapping
+from mpi_sim.components.occupancy_grid_mapping import OccupancyGridMapping
+from mpi_sim.components.social_mapping import SocialMapping
 from mpi_sim.components.social_force_navigation import SocialForceNavigation
 from mpi_sim.components.speech_generator import SpeechGenerator
 from mpi_sim.components.raycast_sensor import RayCastSensor
diff --git a/mpi_sim/components/ari_navigation.py b/mpi_sim/components/ari_navigation.py
index 6663b12cc42c9fea90cf6c60a757193605b7d987..71ef894cb962cbc02d41eb2b2ceef16bed15cb8e 100644
--- a/mpi_sim/components/ari_navigation.py
+++ b/mpi_sim/components/ari_navigation.py
@@ -9,7 +9,7 @@ import yaml
 from collections import namedtuple
 from mpi_sim.utils.box2d_utils import constraint_angle
 import cv2
-
+import os
 # TODO: need group identification for SocialMPC
 
 
@@ -69,6 +69,54 @@ class ARINavigation(GeneratorComponent):
         dc.max_human_nb = 20
         dc.max_groups_world = 10
 
+        dc.robot_config = mpi_sim.AttrDict()
+        dc.robot_config.base_footprint2base_link = [0., 0., 0.125]
+        dc.robot_config.base_link2wheel_left_link = [0., 0.220, 0.0]
+        dc.robot_config.base_link2wheel_right_link = [0., -0.220, 0.0]
+        dc.robot_config.base_link2caster_left_link = [-0.455, 0.102, -0.080]
+        dc.robot_config.base_link2caster_right_link = [-0.455, -0.102, -0.080]
+        dc.robot_config.base_link2head_1_link = [-0.226, 0.000, 1.192]
+        dc.robot_config.base_link2head_2_link = [-0.183, 0.000, 1.284]
+        dc.robot_config.base_link2head_front_camera_link = [-0.120, -0.000, 1.466]
+        dc.robot_config.base_link2head_front_camera_optical_frame = [-0.120, -0.000, 1.466]
+        dc.robot_config.base_footprint_radius= 0.3
+        dc.robot_config.wheel_radius = 0.125
+        dc.robot_config.wheel_width = 0.075
+        dc.robot_config.wheel_separation = 0.44
+        dc.robot_config.wheel_torque = 6.0
+        dc.robot_config.wheel_velocity = 1.0
+        dc.robot_config.caster_radius = 0.045
+        dc.robot_config.caster_width = 0.055
+        dc.robot_config.caster_separation_x = 0.91
+        dc.robot_config.caster_offset_y = 0.1025
+        dc.robot_config.caster_offset_z = -0.08
+        dc.robot_config.total_mass = 54.893
+        dc.robot_config.wheel_mass = 1.65651
+        dc.robot_config.L1 = [[0, 0, 1, 0], [1, 0, 0, 0.182], [0, 1, 0, -0.063], [0, 0, 0, 1]]
+        dc.robot_config.L2 = [[0, 0, -1, 0.093], [0, 1, 0, -0.043], [1, 0, 0, 0], [0, 0, 0, 1]]
+        dc.robot_config.L3 = [[1, 0, 0, 0], [0, 1, 0, 0.225], [0, 0, 1, -1.192], [0, 0, 0, 1]]
+        dc.robot_config.local_map_depth= 3.0
+        dc.robot_config.local_map_px_size= 0.05
+        dc.robot_config.max_speed_motor_wheel = 1.0
+        dc.robot_config.max_forward_speed= 0.5
+        dc.robot_config.max_backward_speed= -0.1
+        dc.robot_config.max_forward_acc= 0.2
+        dc.robot_config.max_ang_vel= 0.5
+        dc.robot_config.max_ang_acc= 0.4
+        dc.robot_config.max_pan_angle = 1.309
+        dc.robot_config.min_pan_angle = -1.309
+        dc.robot_config.max_tilt_angle = 0.4363
+        dc.robot_config.min_tilt_angle = -0.2618
+        dc.robot_config.max_speed_motor_head = 3.0
+        dc.robot_config.head_friction = 1.0
+        dc.robot_config.head_damping = 0.5
+        dc.robot_config.head_cam_fov_h = 62.0
+        dc.robot_config.head_cam_fov_v = 48.0
+        dc.robot_config.base_cam_fov_h = 360.0
+        dc.robot_config.has_pan = True
+        dc.robot_config.has_tilt = False
+
+
         return dc
     
 
@@ -99,13 +147,14 @@ class ARINavigation(GeneratorComponent):
 
         # TODO: remove the need for configuration in yml files
         #       add the configuration in the default config under a social_mpc and robot sub dictionary
+        
         config_filename = pkg_resources.resource_filename('modules', 'social_mpc.yaml')
         self.mpc_controller = RobotController(config_filename=config_filename)
 
         # get the robot config
         robot_config_filename = pkg_resources.resource_filename('modules', 'robot.yaml')
         config = yaml.load(open(robot_config_filename), Loader=yaml.FullLoader)
-        self.robot_config = RobotConfig(config)
+        self.robot_config = RobotConfig()
 
         # initialize the robot state
         self.state.robot_state = RobotState()
diff --git a/mpi_sim/components/occupany_grid_mapping.py b/mpi_sim/components/occupancy_grid_mapping.py
similarity index 93%
rename from mpi_sim/components/occupany_grid_mapping.py
rename to mpi_sim/components/occupancy_grid_mapping.py
index 3d18bf47cd73f9131ab82bf3f910faaabaa8d41d..11337d9742e74dc7cf226d581432a876ab3f739b 100644
--- a/mpi_sim/components/occupany_grid_mapping.py
+++ b/mpi_sim/components/occupancy_grid_mapping.py
@@ -2,7 +2,7 @@ import mpi_sim
 from mpi_sim.core.sensor_component import SensorComponent
 import numpy as np
 from mpi_sim.utils.box2d_utils import constraint_angle
-
+import cv2
 
 # TODO: if there are several agents that do mapping, use a common mapping process to generate the global map to save resources
 
@@ -75,6 +75,13 @@ class OccupancyGridMapping(SensorComponent):
         r"""The shape of the local map defined as (width, height)."""
         return self._local_map_shape
 
+    
+    def set_global_map(self, global_map):
+        self.state.global_map = mpi_sim.utils.OccupancyGridMap(
+                map=global_map,
+                area=self._global_map_area,
+                resolution=self._resolution
+            )
 
     def __init__(self, config=None, **kwargs):
         super().__init__(config=config, **kwargs)
@@ -107,6 +114,7 @@ class OccupancyGridMapping(SensorComponent):
             if isinstance(self.config.imported_global_map_path, str):
 
                 imported_global_map = np.load(file=self.config.imported_global_map_path)
+                
                 self.state.global_map = mpi_sim.utils.OccupancyGridMap(
                     map=imported_global_map,
                     area=self._global_map_area,
@@ -151,7 +159,8 @@ class OccupancyGridMapping(SensorComponent):
                 transform=self.config.transform
             )
         else :
-            imported_global_map = np.load(file=self.config.imported_global_map_path)  
+            imported_global_map = np.load(file=self.config.imported_global_map_path)
+            #imported_global_map = cv2.circle(imported_global_map*255, center = [75, 65], radius = 7, color = (255, 255, 255), thickness=-1)/255  
             if self.config.transform:
                imported_global_map=self.config.transform(imported_global_map)  
             self.state.global_map = mpi_sim.utils.OccupancyGridMap(
@@ -180,7 +189,7 @@ class OccupancyGridMapping(SensorComponent):
             self.state.local_map = mpi_sim.utils.create_local_perspective_occupancy_grid_map(
                 global_map=self.state.global_map,
                 position=self.object.position,
-                orientation=constraint_angle(self.object.orientation),
+                orientation=constraint_angle(np.pi/2+self.object.orientation),
                 depth=self.config.depth,
                 from_given_center = self.config.imported_global_map_px_coord_center
             )
diff --git a/mpi_sim/components/social_mapping.py b/mpi_sim/components/social_mapping.py
new file mode 100644
index 0000000000000000000000000000000000000000..d8cdf697f1d86efaeb18a70614c1146743e8eebb
--- /dev/null
+++ b/mpi_sim/components/social_mapping.py
@@ -0,0 +1,158 @@
+import mpi_sim
+from mpi_sim.core.sensor_component import SensorComponent
+import numpy as np
+from mpi_sim.utils.box2d_utils import constraint_angle
+
+
+import cv2
+
+# TODO: if there are several agents that do mapping, use a common mapping process to generate the global map to save resources
+
+
+class SocialMapping(SensorComponent):
+    r"""Component that creates a social map centered and oriented around its object, for example a robot agent."""
+
+    @staticmethod
+    def default_config():
+        dc = SensorComponent.default_config()
+        dc.name = 'social_mapping'
+        dc.area = None  # area of global map ((x_min, x_max), (y_min, y_max)), if none it is simulation.visible_area
+        dc.depth = 5.0  # depth if local in meters
+        dc.object_filter = None  # filter for the elements that should be mapped
+        dc.resolution = 0.05  # resolution of the maps in m per cell
+        dc.update_global_map_automatically = True  # should the global map be automatically updated each step
+        dc.update_local_map_automatically = False # update local map each time the smulation is step
+        return dc
+
+
+    @property
+    def resolution(self):
+        r"""Get the tesolution of the maps in m per cell."""
+        return self._resolution
+
+
+    @property
+    def global_map(self):
+        r"""Get the global social map from which the local map is generated."""
+        if self.state.global_map is None:
+            return None
+        else:
+            return self.update_global_map()
+
+
+    @property
+    def global_map_area(self):
+        r"""The area that is spanned by the global map defined as ((x_min, x_max), (y_min, y_max))"""
+        return self._global_map_area
+
+
+    @property
+    def global_map_shape(self):
+        r"""The shape of the global map defined as (width, height)."""
+        return self._global_map_shape
+
+
+    @property
+    def local_map(self):
+        r"""Get the local social map that is centered and oriented around the object of the component."""
+        if self.state.local_map is None:
+            return None
+        else:
+            return self.update_local_map()
+
+
+    @property
+    def local_map_area(self):
+        r"""The area that is spanned by the local map defined as ((x_min, x_max), (y_min, y_max)) in reference frame of the local map."""
+        return self._local_map_area
+
+
+    @property
+    def local_map_shape(self):
+        r"""The shape of the local map defined as (width, height)."""
+        return self._local_map_shape
+
+    
+    def set_global_map(self, global_map):
+        self.state.global_map = mpi_sim.utils.SocialMap(
+                map=global_map,
+                area=self._global_map_area,
+                resolution=self._resolution
+            )
+
+    def __init__(self, config=None, **kwargs):
+        super().__init__(config=config, **kwargs)
+
+        
+        self._resolution = self.config.resolution
+        self._global_map_area = self.config.area
+
+        self._global_map_shape = None
+        self.state.global_map = None
+
+        # is the global map initially created, if not do it before creating the local map
+        self.state.is_global_map_initialized = False
+
+        # initialize the global map if possible
+        if self._global_map_area is not None:
+            self._global_map_shape = mpi_sim.utils.get_social_map_shape(area=self._global_map_area, resolution=self._resolution)
+            self.state.global_map = mpi_sim.utils.SocialMap(
+                map=np.zeros(self._global_map_shape),
+                area=self._global_map_area,
+                resolution=self._resolution
+            )
+
+        self._local_map_area = ((-self.config.depth, self.config.depth), (-self.config.depth, self.config.depth))
+        local_map_length = int((self.config.depth / self._resolution) * 2)
+        self._local_map_shape = (local_map_length, local_map_length)
+        self.state.local_map = mpi_sim.utils.SocialMap(
+            map=np.zeros(self._local_map_shape),
+            area=None,
+            resolution=self._resolution
+        )
+
+
+    def _add_to_simulation(self):
+
+        # if the global map area should span the whole simulation (config.area=None), then read visible_area from the simulation when added to it
+        if self._global_map_area is None:
+            self._global_map_area = self.simulation.visible_area
+            self._global_map_shape = mpi_sim.utils.get_social_map_shape(area=self._global_map_area, resolution=self._resolution)
+            self.state.global_map = mpi_sim.utils.SocialMap(
+                map=np.zeros(self._global_map_shape),
+                area=self._global_map_area,
+                resolution=self._resolution
+            )
+
+
+    def update_global_map(self):
+        
+        
+        self.state.global_map = mpi_sim.utils.create_social_map(
+                self.simulation,
+                area=self._global_map_area,
+                resolution=self._resolution
+        ) 
+
+        return self.state.global_map.map
+
+
+    def update_local_map(self):
+        if self.state.global_map is None:
+            self.update_global_map()
+        self.state.local_map = mpi_sim.utils.create_local_perspective_social_map(
+            global_map=self.state.global_map,
+            position=self.object.position,
+            orientation=constraint_angle(self.object.orientation),
+            depth=self.config.depth,
+        )
+
+        return self.state.local_map.map
+
+
+    def _step(self):
+        if self.config.update_global_map_automatically or not self.state.is_global_map_initialized:
+            self.update_global_map()
+            self.state.is_global_map_initialized = True
+        if self.config.update_local_map_automatically :
+            self.update_local_map()
diff --git a/mpi_sim/utils/__init__.py b/mpi_sim/utils/__init__.py
index 7e7acdd70e3414a480b37257d8e12b5fbcc6d6da..250982463fd5dcabfde2d63ba8257a5738736e26 100644
--- a/mpi_sim/utils/__init__.py
+++ b/mpi_sim/utils/__init__.py
@@ -40,3 +40,13 @@ from mpi_sim.utils.occupancy_grid_map import transform_position_to_map_coordinat
 from mpi_sim.utils.occupancy_grid_map import transform_map_coordinate_to_position
 from mpi_sim.utils.occupancy_grid_map import OccupancyGridMap
 from mpi_sim.utils.occupancy_grid_transform import RTABMAPlikeTransform
+
+# social map
+
+# occupancy grid
+from mpi_sim.utils.social_map import create_social_map
+from mpi_sim.utils.social_map import create_local_perspective_social_map
+from mpi_sim.utils.social_map import get_social_map_shape
+from mpi_sim.utils.social_map import transform_position_to_map_coordinate
+from mpi_sim.utils.social_map import transform_map_coordinate_to_position
+from mpi_sim.utils.social_map import SocialMap
diff --git a/mpi_sim/utils/social_map.py b/mpi_sim/utils/social_map.py
new file mode 100644
index 0000000000000000000000000000000000000000..b63427a1a92e3af5d596a88f420e7614d0801366
--- /dev/null
+++ b/mpi_sim/utils/social_map.py
@@ -0,0 +1,219 @@
+import mpi_sim
+import numpy as np
+import cv2
+from collections import namedtuple
+import mpi_sim.utils.truong_2018 as ssn
+
+
+SocialMap = namedtuple('SocialMap', ['map', 'area', 'resolution'])
+
+def get_social_map_shape(area, resolution):
+    r"""Calculates the size of the social map that can be created via create_social_map().
+
+   Args:
+        area (tuple of floats): Area of the simulation that should be mapped in ((x_min, x_max), (y_min, y_max)).
+        resolution (float): Resolution in meter per cell.
+
+    Returns:
+        Shape of the map as a tuple: (width, height).
+    """
+
+    x_width = area[0][1] - area[0][0]
+    y_width = area[1][1] - area[1][0]
+
+    width = int(x_width / resolution)
+    height = int(y_width / resolution)
+    shape = (width, height)
+
+    return shape
+
+def get_social_map_resolution(area, shape):
+    r"""Calculates the resolution of the social map that can be created via create_social_map().
+
+   Args:
+        area (tuple of floats): Area of the simulation that should be mapped in ((x_min, x_max), (y_min, y_max)).
+        shape (tuple): Shape of the map as a tuple: (width, height)
+
+    Returns:
+        Resolution in meter per cell. (float)
+    """
+
+    x_width = area[0][1] - area[0][0]
+    #y_width = area[1][1] - area[1][0]
+
+    (width, height) = shape
+    
+    resolution = int(x_width / width)
+    #resolution = int(y_width / height)
+
+    return resolution
+
+
+def transform_position_to_map_coordinate(position, map_area, map_resolution):
+    r"""Transform a position (x, y) in the simulation to a coordinate (x, y) of a map of the simulation.
+
+    Args:
+        position (x,y): x,y - Position that should be transformed.
+        map_area (tuple): Area ((x_min, x_max), (y_min, y_max)) of the simulation that the map is depicting.
+        map_resolution (float): Resolution of the map in m per cells.
+
+    Returns: Map coordinate as a tuple (x, y).
+    """
+    x_min = map_area[0][0]
+    y_min = map_area[1][0]
+    x = int((position[0] - x_min) / map_resolution)
+    y = int((position[1] - y_min) / map_resolution)
+    return x, y
+
+
+def transform_map_coordinate_to_position(map_coordinate, map_area, map_resolution):
+    r"""Transform a map coordinate (x, y) of a map to a position (x, y) in the simulation.
+
+    Args:
+        map_coordinate (x,y): x,y - Map coordinate that should be transformed.
+        map_area (tuple): Area ((x_min, x_max), (y_min, y_max)) of the simulation that the map is depicting.
+        map_area (float): Resolution of the map in m per cells.
+
+    Returns: Position as a tuple (x, y).
+    """
+    x_min = map_area[0][0]
+    y_min = map_area[1][0]
+    x = map_coordinate[0] * map_resolution + x_min
+    y = map_coordinate[1] * map_resolution + y_min
+    return x, y
+
+
+def create_social_map(simulation, area=None, resolution=0.05):
+    r"""Creates an social map.
+
+    The size of the map depends on the visible_map_area of the simulation and the resolution of the map.
+    The map encodes the x-y position via map[x,y].
+
+    Args:
+        simulation (mpi_sim.Simulation): Simulation for which a map should be created.
+        area (tuple):   Area of the simulation that should be mapped in ((x_min, x_max), (y_min, y_max)). (Default: None)
+                        If None, it uses the simulation.visible_area.
+        resolution (float): Resolution in meter per cell (Default: 0.05).
+        object_filter: Defines which objects should be on the map. (Default: None)
+                                 If None, then all objects.
+                                 Otherwise, a dictionary that can have the following fields: properties, ids, types
+                                    properties: List of object properties (strings). See Object.properties.
+                                    ids: List of object ids (ints).
+                                    types: List of object types (classes).
+                                    operator: Either 'or' or 'and'. Defines how the different filters are combined. (Default: 'or')
+                                 All objects that fulfill these requirements are added.
+        default_occupancy_value (float): Occupancy value of objects (Default: 1.0)
+        occupancy_value_depth (integer): Number of bits used to encode the occupancy value (Default: 32).
+
+    Returns:
+        social map in form of a SocialMap (namedtuple with map, area, and resolution).
+    """
+
+    if area is None:
+        area = simulation.visible_area
+
+    # the size of the map depends on the world size and the resolution
+
+    # visible_area: ((min_x, max_x), (min_y, max_y))
+    x_width = area[0][1] - area[0][0]
+    y_width = area[1][1] - area[1][0]
+
+    width = x_width / resolution
+    height = y_width / resolution
+
+    object_filter = dict(types=mpi_sim.objects.Human)
+
+    bodies = simulation.box2d_simulation.get_bodies(
+        object_types=object_filter.get('types', None),
+        object_ids=object_filter.get('ids', None),
+        object_properties= object_filter.get('properties', None),
+        )
+    
+    persons = []
+    for bodie in bodies :
+        x, y = np.array(bodie.position)
+        theta = bodie.angle
+        # TODO : Study why the speed is zero
+        v = 2 + np.sqrt(np.array(bodie.linearVelocity)[0]**2 + np.array(bodie.linearVelocity)[1]**2)
+        persons.append((x ,y, theta, v, False))
+
+
+    x_coordinates = np.linspace(area[0][1], area[0][0], int(width))
+    y_coordinates = np.linspace(area[1][1], area[1][0], int(height))
+    x_mesh, y_mesh = np.meshgrid(x_coordinates, y_coordinates)
+    
+    global_social_map = ssn.calc_eps(x_mesh, y_mesh, persons)
+
+    map = SocialMap(map=global_social_map, area=area, resolution=resolution)
+
+    return map
+
+
+def create_local_perspective_social_map(global_map, depth=5.0, position=(0,0), orientation=0.0, from_given_center=None):
+    r"""Creates a local perspective of an social map.
+
+    Args:
+        global_map (SocialMap): Global social map which is the source for the local perspective.
+        depth (float): Viewing depth of the map in meters. The map size is 2 * depth. (Default: 5)
+        position (tuple of floats): Position (x,y) of the local map center in the global map. (Default: (0,0))
+        orientation (float): Orientation of the map in rad. (Default: 0)
+                             0 for north, -pi/2 for east, pi/2 for west, pi for south
+
+    Returns: Local perspective of the social map in form of a SocialMap (named tuple with map, area, and resolution)
+
+    """
+
+    # see https://theailearner.com/tag/cv2-warpperspective/
+    # Attention: opencv uses a different coordinate frame to us, thus the code additionally transforms the coordinate frames by
+    # having the opposite rotation direction, and using transpose operations
+
+    # TODO: there should be a way to avoid the transpose operations, by choosing a different points that should be
+
+    local_map_length = int(depth * 2 / global_map.resolution)
+    
+    if from_given_center is None :
+        pos_in_map = transform_position_to_map_coordinate(
+            position,
+            map_area=global_map.area,
+            map_resolution=global_map.resolution
+        )
+    else :
+        center_x, center_y = from_given_center
+        position_px = position/global_map.resolution
+        pos_in_map = np.array([center_x - np.array(position_px)[1], center_y + np.array(position_px)[0]])
+
+    orientation = mpi_sim.utils.constraint_angle(orientation)
+
+    # rotation in for cv operator is in opposite direction
+    hvect = np.array([np.cos(-orientation), -np.sin(-orientation)])
+    vvect = np.array([np.sin(-orientation), np.cos(-orientation)])
+    # print('hvect', hvect)
+    # print('vvect', vvect)
+
+    # source points in pixel space
+    rect_ll = pos_in_map - 0.5 * hvect * local_map_length - 0.5 * vvect * local_map_length
+    rect_ul = pos_in_map - 0.5 * hvect * local_map_length + 0.5 * vvect * local_map_length
+    rect_ur = pos_in_map + 0.5 * hvect * local_map_length + 0.5 * vvect * local_map_length
+    rect_lr = pos_in_map + 0.5 * hvect * local_map_length - 0.5 * vvect * local_map_length
+    src_pts = np.vstack([rect_ll, rect_ul, rect_ur, rect_lr]).astype(np.float32)
+    # print('src_pts', src_pts)
+
+    # destination points in pixel space
+    dst_pts = np.array(
+        [[0, 0],
+         [0, local_map_length],
+         [local_map_length, local_map_length],
+         [local_map_length, 0]],
+        dtype=np.float32
+    )
+    # print('dst_pts', dst_pts)
+
+    m = cv2.getPerspectiveTransform(src_pts, dst_pts)
+    # use transpose operations to bring image into coordinate frame of cv and then back to our coordinate system
+    local_map = cv2.warpPerspective(
+        global_map.map.T,
+        m,
+        (local_map_length, local_map_length)
+    ).T
+
+    return SocialMap(map=local_map, resolution=global_map.resolution, area=None)
diff --git a/mpi_sim/utils/truong_2018.py b/mpi_sim/utils/truong_2018.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb2860dbb927eb2d3f27704d65bc4784187f6ee0
--- /dev/null
+++ b/mpi_sim/utils/truong_2018.py
@@ -0,0 +1,853 @@
+import numpy as np
+import matplotlib.pyplot as plt
+import matplotlib.patches
+from scipy import optimize
+from scipy import ndimage
+import scipy.interpolate
+import skfmm
+
+
+def calc_group_circle(persons):
+    persons_coordinates = [[person[0], person[1]] for person in persons]
+    x_g, y_g, r_g, _ = least_squares_circle(persons_coordinates)
+    return x_g, y_g, r_g
+
+
+def least_squares_circle(coords):
+    """
+    Circle fit using least-squares solver.
+    Inputs:
+
+        - coords, list or numpy array with len>2 of the form:
+        [
+    [x_coord, y_coord],
+    ...,
+    [x_coord, y_coord]
+    ]
+
+    Outputs:
+
+        - xc : x-coordinate of solution center (float)
+        - yc : y-coordinate of solution center (float)
+        - R : Radius of solution (float)
+        - residu : MSE of solution against training data (float)
+
+    Taken from package circle-fit.
+    """
+
+    def calc_R(x, y, xc, yc):
+        """
+        calculate the distance of each 2D points from the center (xc, yc)
+        """
+        return np.sqrt((x - xc) ** 2 + (y - yc) ** 2)
+
+    def f(c, x, y):
+        """
+        calculate the algebraic distance between the data points
+        and the mean circle centered at c=(xc, yc)
+        """
+        Ri = calc_R(x, y, *c)
+        return Ri - Ri.mean()
+
+    # coordinates of the barycenter
+    x = np.array([x[0] for x in coords])
+    y = np.array([x[1] for x in coords])
+    x_m = np.mean(x)
+    y_m = np.mean(y)
+    center_estimate = x_m, y_m
+    center, ier = optimize.leastsq(f, center_estimate, args=(x,y))
+    xc, yc = center
+    Ri = calc_R(x, y, *center)
+    R = Ri.mean()
+    residu = np.sum((Ri - R)**2)
+    return xc, yc, R, residu
+
+
+def calc_default_hand_positions(person, **parameters):
+    '''Calculates the default postion for the hands of a person, so that they are on the side of og'''
+
+    default_hand_distance = parameters.get('default_hand_distance', 0.2)
+
+    x_p, y_p, theta_p, _, _ = person
+
+    # define position as if person has a 0 angle orientation at position [0,0]
+    pos_lh = np.array([[0,  default_hand_distance]]).transpose()
+    pos_rh = np.array([[0, -default_hand_distance]]).transpose()
+
+    # rotate the positions according
+    rotation_mat = np.array([[np.cos(theta_p), -np.sin(theta_p)],
+                             [np.sin(theta_p),  np.cos(theta_p)]])
+
+    pos_lh = np.matmul(rotation_mat, pos_lh).transpose()[0]
+    pos_rh = np.matmul(rotation_mat, pos_rh).transpose()[0]
+
+    # translate according to position of person
+    x_lh = pos_lh[0] + x_p
+    y_lh = pos_lh[1] + y_p
+    x_rh = pos_rh[0] + x_p
+    y_rh = pos_rh[1] + y_p
+
+    return x_lh, y_lh, x_rh, y_rh
+
+
+def _calc_single_eps(x, y, person, d_po=None, **parameters):
+
+    A_p = parameters.get('A_p', 1.0)
+    sigma_p_x = parameters.get('sigma_p_x', 0.45)
+    sigma_p_y = parameters.get('sigma_p_y', 0.45)
+    alpha = parameters.get('alpha', np.pi / 2)
+    fov_angle = parameters.get('fov_angle', 60 * np.pi / 180)
+    f_v = parameters.get('f_v', 0.8)
+    f_front = parameters.get('f_front', 0.2)
+    f_fov = parameters.get('f_fov', 0)
+    F_sit = parameters.get('F_sit', 2.5)
+
+    f_h = parameters.get('f_h', 0.0)
+    sigma_h_x = parameters.get('sigma_h_x', 0.28)
+    sigma_h_y = parameters.get('sigma_h_y', 0.28)
+
+    w_1 = parameters.get('w_1', 1.0)
+    w_2 = parameters.get('w_2', 1.0)
+
+    if len(person) == 5:
+        x_p, y_p, theta_p, v_p, is_sitting = person
+        x_lh, y_lh, x_rh, y_rh = calc_default_hand_positions(person)
+    else:
+        x_p, y_p, theta_p, v_p, is_sitting, x_lh, y_lh, x_rh, y_rh = person
+
+    f_sit = F_sit if is_sitting else 1.0
+
+    sigma_p_y_i = sigma_p_y
+
+    # personal space f_p
+    d = np.sqrt((x - x_p) ** 2 + (y - y_p) ** 2)
+
+    # Algorithm 1
+    sigma_p_x_i = np.full_like(x, sigma_p_x)
+
+    # angle according to the perspective of the person
+    theta = np.arctan2(y - y_p, x - x_p)
+    theta_diff = np.abs(np.arctan2(np.sin(theta - theta_p), np.cos(theta - theta_p)))
+
+    # check if an object needs to be considered
+    if d_po is None:
+        in_field_of_view_of_p_inds = theta_diff <= fov_angle
+        in_frontal_area_of_p_inds = (theta_diff <= alpha) & ~in_field_of_view_of_p_inds
+
+        sigma_p_x_i[in_field_of_view_of_p_inds] = (1 + v_p * f_v + f_front * f_sit + f_fov) * sigma_p_x
+        sigma_p_x_i[in_frontal_area_of_p_inds] = (1 + v_p * f_v + f_front  * f_sit) * sigma_p_x
+    else:
+        in_frontal_area_of_p_inds = (theta_diff <= alpha)
+
+        sigma_p_x_i[in_frontal_area_of_p_inds] = d_po / 2.0
+
+    f_p = A_p * np.exp(-(d * np.cos(theta_diff) / (np.sqrt(2) * sigma_p_x_i)) ** 2 -
+                        (d * np.sin(theta_diff) / (np.sqrt(2) * sigma_p_y_i)) ** 2)
+
+    # left hand space
+    d_lh = np.sqrt((x_p - x_lh) ** 2 + (y_p - y_lh) ** 2)
+    theta_lh = np.arctan2(y_lh - y_p, x_lh - x_p)
+    sigma_lh_x = (1 + f_h * d_lh) * sigma_h_x
+
+    f_lh = A_p * np.exp(-(d * np.cos(theta - theta_lh) / (np.sqrt(2) * sigma_lh_x)) ** 2 -
+                        (d * np.sin(theta - theta_lh) / (np.sqrt(2) * sigma_h_y)) ** 2)
+
+    # right hand space
+    d_rh = np.sqrt((x_p - x_rh) ** 2 + (y_p - y_rh) ** 2)
+    theta_rh = np.arctan2(y_rh - y_p, x_rh - x_p)
+    sigma_rh_x = (1 + f_h * d_rh) * sigma_h_x
+
+    f_rh = A_p * np.exp(-(d * np.cos(theta - theta_rh) / (np.sqrt(2) * sigma_rh_x)) ** 2 -
+                        (d * np.sin(theta - theta_rh) / (np.sqrt(2) * sigma_h_y)) ** 2)
+
+    # combine hand and person spaces
+    f_hands = np.maximum(f_lh, f_rh)
+    f_cur_eps = np.maximum(w_1 * f_p, w_2 * f_hands)
+
+    return f_cur_eps
+
+
+def calc_eps(x, y, persons, objects=None, **parameters):
+    '''Calculates the extended personal space of a human.'''
+
+    if not isinstance(persons, list): persons = [persons]
+    if objects is not None and not isinstance(objects, list): objects = [objects]
+
+    T_dis = parameters.get('T_dis', 3.6)
+    T_ang = parameters.get('T_ang', 10 * np.pi / 180)
+
+    f_eps = np.zeros_like(x)
+
+    for person in persons:
+        x_p, y_p, theta_p, v_p, is_sitting = person[0:5]
+
+        is_incorporated_object = False
+
+        if objects is not None:
+            for object in objects:
+
+                # check if the object should be considered
+                x_ob, y_ob = object
+                # check if object is in interaction with the person
+                d_po = np.sqrt((x_ob - x_p) ** 2 + (y_ob - y_p) ** 2)
+                theta_po = np.arctan2((y_ob - y_p), (x_ob - x_p))
+                theta_diff = np.abs(np.arctan2(np.sin(theta_po - theta_p), np.cos(theta_po - theta_p)))
+
+                # check if the object should be incorporated into eps
+                if d_po < T_dis and theta_diff < T_ang:
+                    cur_f_eps = _calc_single_eps(x, y, person, d_po=d_po, **parameters)
+                    f_eps = np.maximum(f_eps, cur_f_eps)
+
+                    is_incorporated_object = True
+
+        # if no object was incorporated into the eps, then calculate one without an object
+        if not is_incorporated_object:
+            cur_f_eps = _calc_single_eps(x, y, person, **parameters)
+            f_eps = np.maximum(f_eps, cur_f_eps)
+
+    return f_eps
+
+
+def calc_sis(x, y, persons, theta_g=0.0, v_g= 0.0, **parameters):
+
+    if not isinstance(persons, list): persons = [persons]
+
+    if len(persons) == 1:
+        f_group = np.zeros_like(x)
+    else:
+        A_p = parameters.get('A_p', 1.0)
+        f_g = parameters.get('f_g', 0.0)
+        f_v = parameters.get('f_v', 0.8)
+
+        # use a simple circle approximation if the center and radius of the group are not given
+        x_g, y_g, r_g = calc_group_circle(persons)
+
+        sigma_g_x = (r_g / 2) * (1 + f_g)
+        sigma_g_y = (r_g / 2) * (1 + f_g)
+
+        # if the group moves then differentiate between area in movement direction and area oposite to the mvoement direction
+        if v_g > 0.0:
+            sigma_g_x = np.full_like(x, sigma_g_x)
+
+            theta = np.arctan2(y - y_g, x - x_g)
+            theta_diff = np.abs(np.arctan2(np.sin(theta - theta_g), np.cos(theta - theta_g)))
+
+            sigma_g_x[theta_diff < np.pi/2] = (r_g / 2) * (1 + f_g + f_v * v_g)
+
+        d = np.sqrt((x - x_g) ** 2 + (y - y_g) ** 2)
+        theta = np.arctan2(y - y_g, x - x_g)
+
+        f_group = A_p * np.exp(-(d * np.cos(theta - theta_g) / (np.sqrt(2) * sigma_g_x)) ** 2 -
+                                (d * np.sin(theta - theta_g) / (np.sqrt(2) * sigma_g_y)) ** 2)
+
+    return f_group
+
+
+def calc_dsz(x, y, persons, groups=None, objects=None, **parameters):
+
+    if groups is not None and not isinstance(groups, list): groups = [groups]
+
+    w_3 = parameters.get('w_3', 1.0)
+    w_4 = parameters.get('w_4', 1.0)
+
+    # calc eps
+    f_eps = calc_eps(x, y, persons, objects, **parameters)
+
+    # calc sis for all groups
+    f_sis = np.zeros_like(x)
+    if groups is not None:
+        for group in groups:
+
+            # get persons of the group and the direction and velocity of the group
+            person_idxs, theta_g, v_g = group
+            cur_persons = [persons[p_idx] for p_idx in person_idxs]
+
+            cur_f_sis = calc_sis(x, y, cur_persons, theta_g, v_g, **parameters)
+            f_sis = np.maximum(f_sis, cur_f_sis)
+
+    # combine both
+    f_dsz = np.maximum(w_3 * f_eps, w_4 * f_sis)
+
+    return f_dsz
+
+
+def calc_goal_pos(x_mesh, y_mesh, f_dsz, robot, persons, **parameters):
+
+    if not isinstance(persons, list): persons = [persons]
+
+    fov_angle = parameters.get('fov_angle', 60 * np.pi / 180)
+    r_step = parameters.get('r_step', 0.1)
+    r_p_0 = parameters.get('r_p_0', 0.1)
+    dsz_border = parameters.get('dsz_border', 0.03)
+
+    # helper function to define the points on a circle
+    def calc_circle_points(x_mesh, y_mesh, x_c, y_c, r_c):
+        return (np.abs(np.hypot(x_c - x_mesh, y_c - y_mesh) - r_c) < 0.04)
+
+    if len(persons) == 1:
+        # single person
+        x_c, y_c = persons[0][0], persons[0][1]
+        r_c = r_p_0
+    else:
+        # group of people
+        x_c, y_c, r_c = calc_group_circle(persons)
+
+    A_out = np.full_like(x_mesh, False, dtype=bool)
+
+    while True:
+        r_c += r_step
+        A_in = calc_circle_points(x_mesh, y_mesh, x_c, y_c, r_c)
+
+        A_in_1 = A_in.copy()
+        A_out_1 = np.full_like(x_mesh, False, dtype=bool)
+
+        # check for circle points that are within all persons viewing point
+        for person in persons:
+
+            x_p, y_p, theta_p = person[0], person[1], person[2]
+
+            # calculate the angle of each point on the circle in relation to the person
+            theta = np.arctan2(y_mesh[A_in_1] - y_p, x_mesh[A_in_1] - x_p)
+            # angle according to the perspective of the person
+            theta_diff = np.abs(np.arctan2(np.sin(theta - theta_p), np.cos(theta - theta_p)))
+
+            # check if circle points are in the viewing angle of the person
+            A_out_1[A_in_1] = (theta_diff <= fov_angle)
+
+            # if there is no point that is viewed stop
+            if not np.any(A_out_1):
+                break
+            else:
+                # only check further points that are viewed by all people
+                A_in_1 = A_out_1.copy()
+
+        if np.any(A_out_1):
+            # check if the points are outside the DMZ
+            points_outside_DSZ_inds = f_dsz[A_out_1] <= dsz_border
+            A_out[A_out_1] = points_outside_DSZ_inds
+        else:
+            # there are no points on the circle that can be viewed by all people
+            # --> just take points into account that are inside the DSZ
+            points_outside_DSZ_inds = f_dsz[A_in] <= dsz_border
+            A_out[A_in] = points_outside_DSZ_inds
+
+        if np.any(A_out):
+            break
+
+    A_out, goal = _filter_approaching_areas(x_mesh, y_mesh, A_out, robot, (x_c, y_c))
+
+    return goal, A_out
+
+
+def _filter_approaching_areas(x_mesh, y_mesh, A_out, robot, target):
+
+    A_out = A_out.astype(np.float)
+
+    x_r, y_r = robot[0], robot[1]
+    x_t, y_t = target[0], target[1]
+
+    # identify connected areas in A_out
+    labeled_A_out, n_areas = ndimage.measurements.label(A_out, np.ones((3,3), dtype=np.int))
+
+    min_d_r = np.inf
+    x_q, y_q = np.nan, np.nan
+
+    # ignore area 1
+    for area_id in range(1, n_areas+1):
+
+        label_inds = (labeled_A_out == area_id)
+
+        # center point of area (true center of mass)
+        center_x, center_y = ndimage.measurements.center_of_mass(label_inds)
+
+        # identify the point in the 2d matrix that is the closest to the true center
+        grid_x, grid_y = np.indices(A_out.shape).astype(np.float)
+        d = np.sqrt((grid_x - center_x) ** 2 + (grid_y - center_y) ** 2)
+
+        # replace A_out with the distance
+        A_out[label_inds] = d[label_inds]
+
+        # find coordinates with the shortest distance
+        min_d_grid_idx = np.where(d == np.min(d))
+        x_a_idx = min_d_grid_idx[0][0]
+        y_a_idx = min_d_grid_idx[1][0]
+
+        x_a = x_mesh[x_a_idx, y_a_idx]
+        y_a = y_mesh[x_a_idx, y_a_idx]
+
+        # distance to the robot
+        d_r = np.sqrt((x_a - x_r) ** 2 + (y_a - y_r) ** 2)
+
+        if d_r < min_d_r:
+            min_d_r = d_r
+            x_q, y_q = x_a, y_a
+
+    # calculate the goal orientation
+    theta_q = np.arctan2(y_t - y_q, x_t - x_q)
+
+    goal = (x_q, y_q, theta_q)
+
+    return A_out, goal
+
+
+def calc_path(x_mesh, y_mesh, velocity_map, robot, goal, **parameters):
+
+    max_steps = parameters.get('max_steps', 100)
+    position_step_width = parameters.get('position_step_width', 0.1) # in meter
+    rotation_step_width = parameters.get('rotation_step_width', np.pi*0.1)  # in rad
+    min_goal_position_dist = parameters.get('min_goal_position_dist', 0.05) # in meter
+    min_goal_rotation_dist = parameters.get('min_goal_rotation_dist', 0.05)  # in rad
+
+    x_goal, y_goal, theta_goal = goal
+    x_robot, y_robot = robot[0], robot[1]
+
+    # TODO: also update the rotation of the robot
+    theta_robot = theta_goal
+
+    # compute the shortest travel time map based on the velocity map
+    # TODO: if the goal location is not reachable, then select the most nearby point that is reachable
+
+    # define the goal location to the point in the world grid that is closest to the goal location
+    # phi there is -1, otherwise it is 1
+    distance_to_goal = np.sqrt((x_mesh - x_goal) ** 2 + (y_mesh - y_goal) ** 2)
+    phi = np.ones_like(x_mesh)
+    phi[np.unravel_index(np.argmin(distance_to_goal), phi.shape)] = -1
+
+    travel_time_map = skfmm.travel_time(phi, velocity_map, order=1)
+
+    # calc gradient iver travel_time map and create interpolation functions
+    grad_t_y, grad_t_x = np.gradient(travel_time_map)
+
+    # interpolate the gradients and velocities
+    gradx_interp = scipy.interpolate.RectBivariateSpline(x_mesh[0, :], y_mesh[:, 0], grad_t_x)
+    grady_interp = scipy.interpolate.RectBivariateSpline(x_mesh[0, :], y_mesh[:, 0], grad_t_y)
+    velocity_interp = scipy.interpolate.RectBivariateSpline(x_mesh[0, :], y_mesh[:, 0], velocity_map)
+
+    path = []
+    path.append((x_robot, y_robot, theta_robot))
+
+    # create path by following the gradient of the travel time
+    for step in range(max_steps):
+
+        position_goal_distance = np.sqrt((x_robot - x_goal) ** 2 + (y_robot - y_goal) ** 2)
+        rotation_goal_distance = np.abs(np.arctan2(np.sin(theta_robot - theta_goal), np.cos(theta_robot - theta_goal)))
+
+        if position_goal_distance <= min_goal_position_dist and rotation_goal_distance <= min_goal_rotation_dist:
+            break
+
+        movement_direction = np.array([-gradx_interp(y_robot, x_robot)[0][0],
+                                       -grady_interp(y_robot, x_robot)[0][0]])
+
+        movement_speed = velocity_interp(y_robot, x_robot)[0][0]
+
+        # create normalized movement direction vector and change its size according to the movement speed
+        f_position = movement_direction / np.linalg.norm(movement_direction) * movement_speed
+
+        # update the position of the robot
+        x_robot += f_position[0] * position_step_width
+        y_robot += f_position[1] * position_step_width
+
+        f_orientation = 0.0
+        # TODO: adapt the rotation of the robot
+        path.append((x_robot, y_robot, theta_robot))
+
+    return path
+
+
+def occupancy_map_to_velocity_map(x_mesh, y_mesh, occupancy_map, **parameters):
+    '''Create a velocity map for a given occupancy map.'''
+
+    sigma = parameters.get('sigma', 0.15)
+
+    d_x = [x_mesh[0, 1] - x_mesh[0, 0], y_mesh[1, 0] - y_mesh[0, 0]]
+
+    # use fast marching method to compute the min distance from free to occupied areas
+    phi = np.ones_like(occupancy_map, dtype=int)
+    phi[occupancy_map == 1] = -1
+
+    distance_map = skfmm.distance(phi, d_x, order=1)
+
+    # compute velocity based on distance
+    velocity_map = 1.0 - np.exp(-distance_map/(np.sqrt(2)*sigma))
+    velocity_map[occupancy_map == 1] = 0
+
+    return velocity_map
+
+
+def get_default_occupany_map(x_mesh, y_mesh):
+
+    occupancy_map = np.full_like(x_mesh, False, dtype=bool)
+
+    # borders
+    occupancy_map[0, :] = True
+    occupancy_map[-1, :] = True
+    occupancy_map[:, 0] = True
+    occupancy_map[:, -1] = True
+
+    return occupancy_map
+
+
+def add_box_to_occupancy_map(x_mesh, y_mesh, occupancy_map, box):
+
+    occupancy_map = np.copy(occupancy_map)
+
+    x_box, y_box, width, height = box
+
+    # identify the starting in x
+    x_start = x_box
+    x_end = x_start + width
+    y_start = y_box
+    y_end = y_box + height
+
+    d_1_start_index = np.argmin(np.abs(x_mesh[0, :] - x_start))
+    d_1_end_index = np.argmin(np.abs(x_mesh[0, :] - x_end))
+
+    d_0_start_index = np.argmin(np.abs(y_mesh[:, 0] - y_start))
+    d_0_end_index = np.argmin(np.abs(y_mesh[:, 0] - y_end))
+
+    occupancy_map[d_0_start_index:d_0_end_index, d_1_start_index:d_1_end_index] = True
+
+    return occupancy_map
+
+
+def get_default_occupany_map(x_mesh, y_mesh):
+
+    occupancy_map = np.full_like(x_mesh, False, dtype=bool)
+
+    # borders
+    occupancy_map[0, :] = True
+    occupancy_map[-1, :] = True
+    occupancy_map[:, 0] = True
+    occupancy_map[:, -1] = True
+
+    return occupancy_map
+
+
+def plot_2d(x_mesh, y_mesh, space_model, occupancy_map=None, approaching_areas=None, persons=None, objects=None, goal=None, robot=None, path=None, title='', is_contur_lines=False):
+
+    if persons is None:
+        persons = []
+    elif not isinstance(persons, list):
+        persons = [persons]
+
+    if objects is None:
+        objects = []
+    elif not isinstance(objects, list):
+        objects = [objects]
+
+    origin = 'lower'
+
+    fig, ax = plt.subplots(constrained_layout=True)
+
+    CS = ax.contourf(x_mesh, y_mesh, space_model, 20, cmap=plt.cm.binary, origin=origin)
+
+    # Make a colorbar for the ContourSet returned by the contourf call.
+    cbar = fig.colorbar(CS)
+    cbar.ax.set_ylabel('space model value')
+    # Add the contour line levels to the colorbar
+    if is_contur_lines:
+        # Note that in the following, we explicitly pass in a subset of
+        # the contour levels used for the filled contours.  Alternatively,
+        # We could pass in additional levels to provide extra resolution,
+        # or leave out the levels kwarg to use all of the original levels.
+        CS2 = ax.contour(CS, levels=CS.levels[::2], colors='r', origin=origin)
+        cbar.add_lines(CS2)
+
+    if approaching_areas is not None:
+        inds = approaching_areas > 0
+        ax.scatter(x_mesh[inds], y_mesh[inds], s=1, c=-approaching_areas[inds])
+
+    if robot is not None:
+        ax.scatter(robot[0], robot[1], s=150, c='g')
+
+    if goal is not None:
+        # print goal position and orientation
+        ax.scatter(goal[0], goal[1], c='r')
+        plt.arrow(goal[0], goal[1], np.cos(goal[2]) * 0.1, np.sin(goal[2]) * 0.1, edgecolor='r')
+
+    # draw persons
+    for person in persons:
+        if len(person) == 5:
+            x_p, y_p, theta_p, v_p, is_sitting = person
+            x_lh, y_lh, x_rh, y_rh = calc_default_hand_positions(person)
+        else:
+            x_p, y_p, theta_p, v_p, is_sitting, x_lh, y_lh, x_rh, y_rh = person
+
+        # person
+        e_p = matplotlib.patches.Ellipse((x_p, y_p), 0.15, 0.25,
+                                         angle=theta_p*180/np.pi, linewidth=2, fill=False, zorder=2, edgecolor='b')
+        ax.add_patch(e_p)
+
+        # left hand
+        e_lh = matplotlib.patches.Ellipse((x_lh, y_lh), 0.05, 0.05, linewidth=1, fill=False, zorder=2, edgecolor='b')
+        ax.add_patch(e_lh)
+
+        # right hand
+        e_rh = matplotlib.patches.Ellipse((x_rh, y_rh), 0.05, 0.05, linewidth=1, fill=False, zorder=2, edgecolor='b')
+        ax.add_patch(e_rh)
+
+        plt.arrow(x_p, y_p, np.cos(theta_p) * 0.1, np.sin(theta_p) * 0.1, edgecolor='b')
+
+    # draw objects
+    for object in objects:
+        x_ob, y_ob = object
+        e = matplotlib.patches.Rectangle((x_ob-0.1, y_ob-0.1), 0.2, 0.2, edgecolor='r')
+        ax.add_patch(e)
+
+    if occupancy_map is not None:
+        ax.scatter(x_mesh[occupancy_map == 1], y_mesh[occupancy_map == 1], c='k')
+
+    if path is not None:
+        path_position = np.array([[point[0], point[1]] for point in path])
+        ax.plot(path_position[:,0], path_position[:,1])
+
+    ax.set_title(title)
+    ax.set_xlabel('x')
+    ax.set_ylabel('y')
+
+    plt.show()
+
+
+if __name__ == '__main__':
+
+    # TODO: the current group identification algorithm does not take into account the orientation of people
+
+    # ###############################
+    # # EPS
+    #
+    # x_coordinates = np.linspace(0, 4, 400)
+    # y_coordinates = np.linspace(0, 4, 400)
+    # x_mesh, y_mesh = np.meshgrid(x_coordinates, y_coordinates)
+    #
+    # # FIGURE 2. b
+    # person = (2.0, 2.0, -np.pi/2, 0.0, False) # (x, y, theta, v, is_sitting)
+    # f_eps = calc_eps(x_mesh, y_mesh, person, f_fov=0.4, f_front=0.0)
+    # plot_2d(x_mesh, y_mesh, f_eps, persons=person, title='FOV based EPS (Fig. 2, b)')
+    #
+    # # FIGURE 2. c
+    # person = (2.0, 2.0, -np.pi/2, 0.0, False) # (x, y, theta, v, is_sitting)
+    # f_eps = calc_eps(x_mesh, y_mesh, person)
+    # plot_2d(x_mesh, y_mesh, f_eps, persons=person, title='Human frontal area-based EPS (Fig. 2, c)')
+    #
+    # # FIGURE 2. d
+    # person = (2.0, 2.0, -np.pi/2, 0.0, True) # (x, y, theta, v, is_sitting)
+    # #person = (2.0, 2.0, 0, 0.0, True) # (x, y, theta, v, is_sitting)
+    # f_eps = calc_eps(x_mesh, y_mesh, person)
+    # plot_2d(x_mesh, y_mesh, f_eps, persons=person, title='EPS of sitting person (Fig. 2, d)')
+    #
+    # # FIGURE 2. e
+    # person = (2.0, 2.0, -np.pi/2, 0.6, False) # (x, y, theta, v, is_sitting)
+    # f_eps = calc_eps(x_mesh, y_mesh, person)
+    # plot_2d(x_mesh, y_mesh, f_eps, persons=person, title='EPS of moving person (Fig. 2, e)')
+    #
+    # # FIGURE 2. f
+    # person = (2.0, 2.0, -np.pi/2, 0, False) # (x, y, theta, v, is_sitting)
+    # object = (2.0, 0.5)
+    # f_eps = calc_eps(x_mesh, y_mesh, person, object)
+    # plot_2d(x_mesh, y_mesh, f_eps, persons=person, objects=object, title='EPS of human with object (Fig. 2, f)')
+
+
+    # ###############################
+    # # SIS
+    #
+    # x_coordinates = np.linspace(0, 4, 400)
+    # y_coordinates = np.linspace(0, 4, 400)
+    # x_mesh, y_mesh = np.meshgrid(x_coordinates, y_coordinates)
+    #
+    # # # FIGURE 3. a
+    # persons = [(1.0, 2.0, 0.0, 0, False),  # (x, y, theta, v, is_sitting)
+    #            (3.0, 2.0, np.pi, 0, False)]
+    # theta_g, v_g = 0.0, 0.0
+    # f_sis = calc_sis(x_mesh, y_mesh, persons, theta_g=theta_g, v_g=v_g)
+    # plot_2d(x_mesh, y_mesh, f_sis, persons=persons, title='SIS vis-a-vis (Fig. 3, a)')
+    #
+    # # FIGURE 3. b
+    # persons = [(1.5, 1.5, np.pi*0.4, 0, False),  # (x, y, theta, v, is_sitting)
+    #            (2.5, 1.5, np.pi*0.6, 0, False)]
+    # theta_g, v_g = 0.0, 0.0
+    # f_sis = calc_sis(x_mesh, y_mesh, persons, theta_g=theta_g, v_g=v_g)
+    # plot_2d(x_mesh, y_mesh, f_sis, persons=persons, title='SIS v-shape (Fig. 3, b)')
+    #
+    # # FIGURE 3. c
+    # persons = [(1.5, 1.0, np.pi*0.4, 0, False),  # (x, y, theta, v, is_sitting)
+    #            (2.5, 1.0, np.pi*0.6, 0, False),
+    #            (2.0, 3.0, np.pi*-0.5, 0, False)]
+    # theta_g, v_g = 0.0, 0.0
+    # f_sis = calc_sis(x_mesh, y_mesh, persons, theta_g=theta_g, v_g=v_g)
+    # plot_2d(x_mesh, y_mesh, f_sis, persons=persons, title='SIS close group (Fig. 3, c)')
+    #
+    # # FIGURE 3. d
+    # persons = [(1.75, 1.3, np.pi*0.45, 0.5, False),  # (x, y, theta, v, is_sitting)
+    #            (2.25, 1.3, np.pi*0.55, 0.5, False)]
+    # theta_g, v_g = np.pi*0.5, 0.5
+    # f_sis = calc_sis(x_mesh, y_mesh, persons, theta_g=theta_g, v_g=v_g)
+    # plot_2d(x_mesh, y_mesh, f_sis, persons=persons, title='SIS - Two people moving in abreast-shape (Fig. 3, d)')
+    #
+    # # FIGURE 3. e
+    # persons = [(1.5, 1.8, np.pi*0.45, 0, False),  # (x, y, theta, v, is_sitting)
+    #            (2.0, 1.2, np.pi*0.5, 0, False),
+    #            (2.5, 1.8, np.pi*0.55, 0, False)]
+    # theta_g, v_g = np.pi * 0.5, 0.5
+    # f_sis = calc_sis(x_mesh, y_mesh, persons, theta_g=theta_g, v_g=v_g)
+    # plot_2d(x_mesh, y_mesh, f_sis, persons=persons, title='SIS - Three people moving in v-shape (Fig. 3, e)')
+
+
+    # ###############################
+    # # DSZ
+    #
+    # x_coordinates = np.linspace(0, 6, 700)
+    # y_coordinates = np.linspace(0, 6, 700)
+    # x_mesh, y_mesh = np.meshgrid(x_coordinates, y_coordinates)
+    #
+    # # Fig 4 (a)
+    # persons = [(2.5, 1.8, np.pi * 0.45, 0.0, False),
+    #            (3.5, 1.8, np.pi * 0.55, 0.0, False)]
+    # object = (3.0, 5.0)
+    # group = ([0, 1], 0.0, 0.0)
+    # f_dsz = calc_dsz(x_mesh, y_mesh, persons, groups=group, objects=object)
+    # plot_2d(x_mesh, y_mesh, f_dsz, persons=persons, objects=object, title='DSZ (Fig. 4, a)')
+    #
+    # # Fig 4 (b)
+    # persons = [(2.5, 1.9, np.pi * 0.25, 0.0, False),
+    #            (3.0, 3.8, np.pi * -0.5, 0.0, False),
+    #            (3.5, 1.9, np.pi * 0.75, 0.0, False)]
+    # group = ([0, 1, 2], 0.0, 0.0)
+    # f_dsz = calc_dsz(x_mesh, y_mesh, persons, groups=group)
+    # plot_2d(x_mesh, y_mesh, f_dsz, persons=persons, title='DSZ (Fig. 4, b)')
+    #
+    #
+    # # Fig 4 (c)
+    # persons = [(2.5, 3.0, np.pi * 0.5, 0.8, False),
+    #            (3.0, 2.5, np.pi * 0.5, 0.8, False),
+    #            (3.5, 3.0, np.pi * 0.5, 0.8, False)]
+    # group = ([0, 1, 2], np.pi * 0.5, 0.8)
+    # f_dsz = calc_dsz(x_mesh, y_mesh, persons, groups=group)
+    # plot_2d(x_mesh, y_mesh, f_dsz, persons=persons, title='DSZ (Fig. 4, c)')
+    #
+    #
+    # # Fig 4 (d)
+    # persons = [(2.5, 2.5, np.pi * 0.25, 0.0, False),
+    #            (3.5, 2.5, np.pi * 0.75, 0.0, True)]
+    # group = ([0, 1], 0.0, 0.0)
+    # f_dsz = calc_dsz(x_mesh, y_mesh, persons, groups=group)
+    # plot_2d(x_mesh, y_mesh, f_dsz, persons=persons, title='DSZ (Fig. 4, d)')
+
+
+    #################################
+    # Figure 5 - Approach areas
+
+    # x_coordinates = np.linspace(0, 6, 700)
+    # y_coordinates = np.linspace(0, 6, 700)
+    # x_mesh, y_mesh = np.meshgrid(x_coordinates, y_coordinates)
+    #
+    # # Fig 5 d top - right
+    # robot = (5.0, 5.0)
+    # persons = [(2.5, 1.8, np.pi * 0.45, 0.0, False),
+    #            (3.5, 1.8, np.pi * 0.55, 0.0, False)]
+    # object = (3.0, 5.0)
+    # group = ([0, 1], 0.0, 0.0)
+    # f_dsz = calc_dsz(x_mesh, y_mesh, persons, groups=group, objects=object)
+    # goal, A_out = calc_goal_pos(x_mesh, y_mesh, f_dsz, robot, [persons[p_idx] for p_idx in group[0]])
+    # plot_2d(x_mesh, y_mesh, f_dsz, robot=robot, persons=persons, objects=object, approaching_areas=A_out, goal=goal,
+    #         title='Approach area (Fig. 5, d, top right)')
+    #
+    # # Fig 5 d bottom right
+    # robot = (5.0, 5.0)
+    # persons = [(2.5, 1.9, np.pi * 0.25, 0.0, False),
+    #            (3.0, 3.8, np.pi * -0.5, 0.0, False),
+    #            (3.5, 1.9, np.pi * 0.75, 0.0, False)]
+    # group = ([0, 1, 2], 0.0, 0.0)
+    # f_dsz = calc_dsz(x_mesh, y_mesh, persons, groups=group)
+    # goal, A_out = calc_goal_pos(x_mesh, y_mesh, f_dsz, robot, [persons[p_idx] for p_idx in group[0]])
+    # plot_2d(x_mesh, y_mesh, f_dsz, robot=robot, persons=persons, approaching_areas=A_out, goal=goal,
+    #         title='Approach area (Fig. 5, d, bottom right)')
+    #
+    # # Fig 5 d top left
+    # robot = (5.0, 5.0)
+    # persons = [(2.5, 3.0, np.pi * 0.5, 0.8, False),
+    #            (3.0, 2.5, np.pi * 0.5, 0.8, False),
+    #            (3.5, 3.0, np.pi * 0.5, 0.8, False)]
+    # group = ([0, 1, 2], np.pi * 0.5, 0.8)
+    # f_dsz = calc_dsz(x_mesh, y_mesh, persons, groups=group)
+    # goal, A_out = calc_goal_pos(x_mesh, y_mesh, f_dsz, robot, [persons[p_idx] for p_idx in group[0]])
+    # plot_2d(x_mesh, y_mesh, f_dsz, robot=robot, persons=persons, approaching_areas=A_out, goal=goal,
+    #         title='Approach area (Fig. 5, d, top left)')
+    #
+    # # Fig 5 d bottom left
+    # robot = (5.0, 5.0)
+    # persons = [(2.5, 2.5, np.pi * 0.25, 0.0, False),
+    #            (3.5, 2.5, np.pi * 0.75, 0.0, True)]
+    # group = ([0, 1], 0.0, 0.0)
+    # f_dsz = calc_dsz(x_mesh, y_mesh, persons, groups=group)
+    # goal, A_out = calc_goal_pos(x_mesh, y_mesh, f_dsz, robot, [persons[p_idx] for p_idx in group[0]])
+    # plot_2d(x_mesh, y_mesh, f_dsz, robot=robot, persons=persons, approaching_areas=A_out, goal=goal,
+    #         title='Approach area (Fig. 5, d, bottom left)')
+
+
+
+    # ##########################################
+    # # PATH PLANNER
+    #
+    # x_coordinates = np.linspace(0, 6, 700)
+    # y_coordinates = np.linspace(0, 6, 700)
+    #
+    # x_mesh, y_mesh = np.meshgrid(x_coordinates, y_coordinates)
+    #
+    # # Fig 5 d bottom left
+    # robot = (3.0, 1.0, 0.0)
+    # persons = [(2.5, 2.5, np.pi * 0.25, 0.0, False),
+    #            (3.5, 2.5, np.pi * 0.75, 0.0, True)]
+    # group = ([0, 1], 0.0, 0.0)
+    # f_dsz = calc_dsz(x_mesh, y_mesh, persons, groups=group)
+    # goal, A_out = calc_goal_pos(x_mesh, y_mesh, f_dsz, robot, [persons[p_idx] for p_idx in group[0]])
+    # # define velocity map
+    # velocity_map = 1.0 - f_dsz
+    # path = calc_path(x_mesh,y_mesh, velocity_map, robot, goal)
+    # plot_2d(x_mesh, y_mesh, f_dsz, robot=robot, persons=persons, approaching_areas=A_out, goal=goal, path=path,
+    #         title='Robot path')
+
+
+    ##########################################
+    # OBSTACLES
+
+    x_coordinates = np.linspace(0, 6, 600)
+    y_coordinates = np.linspace(0, 6, 600)
+    x_mesh, y_mesh = np.meshgrid(x_coordinates, y_coordinates)
+
+    # default occupancy map
+    occupancy_map = get_default_occupany_map(x_mesh, y_mesh)
+    occupancy_map = add_box_to_occupancy_map(x_mesh, y_mesh, occupancy_map, (1.0, 1, 300, 0.2))
+    velocity_map = occupancy_map_to_velocity_map(x_mesh, y_mesh, occupancy_map)
+
+    robot = (3.0, 0.5, 0.0)
+    persons = [(2.5, 3.5, np.pi * 0.25, 0.0, False),
+               (3.5, 3.5, np.pi * 0.75, 0.0, True),
+               (1.0, 3.0, np.pi * -0.5, 0.0, False)]
+    group = ([0, 1], 0.0, 0.0)
+
+    f_dsz = calc_dsz(x_mesh, y_mesh, persons, groups=group)
+
+    goal, A_out = calc_goal_pos(x_mesh, y_mesh, f_dsz, robot, [persons[p_idx] for p_idx in group[0]])
+
+    # define velocity map
+    velocity_map = np.minimum(velocity_map, 1.0 - f_dsz)
+
+
+    path = calc_path(x_mesh,y_mesh, velocity_map, robot, goal)
+
+    plot_2d(x_mesh, y_mesh, f_dsz, robot=robot, persons=persons, approaching_areas=A_out, goal=goal, path=path,
+            occupancy_map=occupancy_map, title='Robot path')
+
+
+    # TODO:
+    #   allow different groups
+    #   define which person or group to join
+
+
+    # TODO: group identification
+
+    # TODO: Simulation
+
+    # TODO: incorporate movement
+
+    # TODO: compute the travel time to different possible goals to select one the with the optimal travel time and not the one that is closest to the robot
diff --git a/requirements.txt b/requirements.txt
index c9dc155b5a098ade921a826f32d6b8338e312b39..550d4c7f2692ab606969cc05c6a220856c523433 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -8,7 +8,7 @@ attrs==21.4.0
 backcall==0.2.0
 beautifulsoup4==4.11.1
 bleach==5.0.1
-Box2D==2.3.10
+Box2D==2.3.10 -f https://github.com/pybox2d/pybox2d.git
 cachetools==5.2.0
 certifi==2022.5.18.1
 cffi==1.15.1
@@ -43,7 +43,7 @@ ipython-genutils==0.2.0
 ipywidgets==7.6.5
 isort==5.10.1
 jax==0.2.20
-jaxlib==0.1.71
+jaxlib==0.1.71 -f https://storage.googleapis.com/jax-releases/jax_releases.html
 jedi==0.18.1
 Jinja2==3.1.2
 joblib==1.1.0
diff --git a/setup.cfg b/setup.cfg
index e04c913cb334971f7c1f703965502e3887c23038..214125efafb54e1eaa020cf44549253f13c27ee1 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -11,7 +11,7 @@ install_requires =
     six >= 1.16.0
     scipy >= 1.8.1
     opencv-python >= 4.6.0.66
-python_requires =
-    >=3.8, <=3.9.16
+#python_requires =
+#    >=3.8, <=3.9.16
 
 # required packages are in the setup.py
\ No newline at end of file