From 4281c4094f41757c2fb204f49c55675eb53f9d62 Mon Sep 17 00:00:00 2001
From: Alex AUTERNAUD <alex.auternaud@inria.fr>
Date: Tue, 13 Dec 2022 13:31:52 +0100
Subject: [PATCH] recreate_state + naming changes

---
 examples/scene_01.py                 |  4 +-
 examples/welcome_scenario_02.py      |  4 +-
 mpi_sim/components/ari_navigation.py | 58 +++++++++++++++-------------
 test/objects/test_ari_robot.py       | 19 +++++----
 4 files changed, 44 insertions(+), 41 deletions(-)

diff --git a/examples/scene_01.py b/examples/scene_01.py
index 5535db0..9ce07d4 100755
--- a/examples/scene_01.py
+++ b/examples/scene_01.py
@@ -61,9 +61,9 @@ def main():
     simulation.add_object(human_11)
 
     ari_robot = simulation.objects[50]
-    # ari_robot.navigation.set_human_go_towards(30)
+    # ari_robot.navigation.set_go_towards_human(30)
     # ari_robot.navigation.set_human_look_at(30)
-    ari_robot.navigation.set_goal_position([-4., 3.], 0.)
+    ari_robot.navigation.set_go_towards_position([-4., 3.], 0.)
 
     group_1 = sim.scripts.GroupNavigation()
     simulation.add_script(group_1, id=1001)
diff --git a/examples/welcome_scenario_02.py b/examples/welcome_scenario_02.py
index 2bc764e..4c0c9f5 100755
--- a/examples/welcome_scenario_02.py
+++ b/examples/welcome_scenario_02.py
@@ -25,7 +25,7 @@ class GroupDiscussionScript(mpi_sim.Script):
 
         # define a goal for the group and let ari join the group by giving on person as target
         self.group.set_group_center([2.0, 3.0])
-        self.ari.navigation.set_human_go_towards(self.paul)
+        self.ari.navigation.set_go_towards_human(self.paul)
 
         self.status = 'grouping'
 
@@ -55,7 +55,7 @@ class GroupDiscussionScript(mpi_sim.Script):
         # finally, let ARI leave
         elif self.status == 'thank_ari' and self.irene.speech.previous_speech_act == 'GOODBYE':
             self.ari.speech.say('Good Bye', act='GOODBYE')
-            self.ari.navigation.set_goal_position([4., 4.], -3 / 4 * np.pi)
+            self.ari.navigation.set_go_towards_position([4., 4.], -3 / 4 * np.pi)
             self.status = 'leave'
 
         # stop the simulation after ari left
diff --git a/mpi_sim/components/ari_navigation.py b/mpi_sim/components/ari_navigation.py
index 9d4992e..aa0615c 100644
--- a/mpi_sim/components/ari_navigation.py
+++ b/mpi_sim/components/ari_navigation.py
@@ -127,7 +127,7 @@ class ARINavigation(GeneratorComponent):
         self.mpc_controller.controller_config.human_target_id = -1
         self.mpc_controller.controller_config.pan_target_id = -1
         self.mpc_controller.controller_config.goto_target_pose = np.zeros_like(self.mpc_controller.controller_config.goto_target_pose)
-        self._reset_robot_state()
+        self._recreate_state()
         self.mpc_controller.step(self.state.robot_state)
 
         self.action_idx = 0
@@ -253,31 +253,7 @@ class ARINavigation(GeneratorComponent):
         #####################################
         # recreate the robot state
 
-        self._reset_robot_state()
-        angle = constraint_angle(self.object.orientation)
-        self.state.robot_state.robot_pose = np.array([*self.object.position, angle])
-        v = np.array([-np.sin(angle), np.cos(angle)])
-        self.state.robot_state.robot_velocity[0] = np.dot(np.asarray(self.object.position_velocity), v)
-        self.state.robot_state.robot_velocity[1] = (self.object.orientation_velocity)
-        self.state.robot_state.joint_angles[:] = constraint_angle(-self.object.joints[-1].angle)
-        self.state.robot_state.joint_velocities[:] = -self.object.joints[-1].speed
-        self.state.robot_state.global_map = self.ari_mapping.global_map
-        self.state.robot_state.local_map = self.ari_mapping.local_map
-
-        humans = self.simulation.get_objects_by_type(mpi_sim.objects.Human)
-
-        for human_idx, human in enumerate(humans):
-            if human_idx >= self.max_human_nb:
-                warnings.warn('Number of humans in simulation is larger than limit for ARI\'s SocialMPC. Set max_human_nb of the ARI Navigation component higher.')
-                break
-            self.state.robot_state.humans_id[human_idx] = human.id
-            angle = constraint_angle(human.orientation)
-            self.state.robot_state.humans_pose[human_idx, :] = [*human.position, angle]
-            v = np.array([-np.sin(angle), np.cos(angle)])
-            self.state.robot_state.humans_velocity[human_idx, :] = [np.dot(human.position_velocity, v), human.orientation_velocity]
-            self.state.robot_state.humans_visible[human_idx] = 0.
-            if self.object.raycast_obs:
-                self.state.robot_state.humans_visible[human_idx] = self.object.raycast_obs[human.id]
+        self._recreate_state()
 
         #####################################
         # use the mpc controller for the navigation       
@@ -320,7 +296,35 @@ class ARINavigation(GeneratorComponent):
 
             self.object.pan_vel = -action[0]
             self.object.forward_velocity = action[2]
-            self.object.orientation_velocity = action[1]   
+            self.object.orientation_velocity = action[1]
+
+    
+    def _recreate_state(self):
+        self._reset_robot_state()
+        angle = constraint_angle(self.object.orientation)
+        self.state.robot_state.robot_pose = np.array([*self.object.position, angle])
+        v = np.array([-np.sin(angle), np.cos(angle)])
+        self.state.robot_state.robot_velocity[0] = np.dot(np.asarray(self.object.position_velocity), v)
+        self.state.robot_state.robot_velocity[1] = (self.object.orientation_velocity)
+        self.state.robot_state.joint_angles[:] = constraint_angle(-self.object.joints[-1].angle)
+        self.state.robot_state.joint_velocities[:] = -self.object.joints[-1].speed
+        self.state.robot_state.global_map = np.flip(self.ari_mapping.global_map.T, axis=0)
+        self.state.robot_state.local_map = np.flip(self.ari_mapping.local_map.T, axis=0)
+
+        humans = self.simulation.get_objects_by_type(mpi_sim.objects.Human)
+
+        for human_idx, human in enumerate(humans):
+            if human_idx >= self.max_human_nb:
+                warnings.warn('Number of humans in simulation is larger than limit for ARI\'s SocialMPC. Set max_human_nb of the ARI Navigation component higher.')
+                break
+            self.state.robot_state.humans_id[human_idx] = human.id
+            angle = constraint_angle(human.orientation)
+            self.state.robot_state.humans_pose[human_idx, :] = [*human.position, angle]
+            v = np.array([-np.sin(angle), np.cos(angle)])
+            self.state.robot_state.humans_velocity[human_idx, :] = [np.dot(human.position_velocity, v), human.orientation_velocity]
+            self.state.robot_state.humans_visible[human_idx] = 0.
+            if self.object.raycast_obs:
+                self.state.robot_state.humans_visible[human_idx] = self.object.raycast_obs[human.id]
 
 
     def _close(self):
diff --git a/test/objects/test_ari_robot.py b/test/objects/test_ari_robot.py
index 467846c..0ad2ead 100644
--- a/test/objects/test_ari_robot.py
+++ b/test/objects/test_ari_robot.py
@@ -23,7 +23,7 @@ def test_ari_navigation_go_to_human_simple():
         orientation=np.pi,
     )
     simulation.add_object(ari)
-    ari.navigation.set_human_go_towards(human)
+    ari.navigation.set_go_towards_human(human)
 
     is_human_reached = False
     for step in range(700):
@@ -46,7 +46,8 @@ def test_ari_navigation_go_to_human_with_obstacle():
             {'type': 'Wall', 'position': [6., 3.], 'orientation': 0., 'height': 6.},  # east
             {'type': 'Wall', 'position': [0., 3.], 'orientation': 0., 'height': 6.},  # west
             {'type': 'Wall', 'position': [3., 6.0], 'orientation': np.pi / 2, 'height': 6.2},  # south
-            {'type': 'Wall', 'position': [3., 0.0], 'orientation': np.pi / 2, 'height': 6.2},  # north
+            {'type': 'Wall', 'position': [3., 0.0], 'orientation': np.pi / 2, 'height': 6.2},  # north,
+            # {'type': 'Bench', 'position': [2., 4.7]},
             {'type': 'Bench', 'position': [2., 3.7]}
         ],
         processes=[{'type': 'GUI'}]
@@ -56,14 +57,14 @@ def test_ari_navigation_go_to_human_with_obstacle():
     simulation.add_object(human)
 
     ari = mpi_sim.objects.ARIRobot(
-        position=[4., 4.],
-        orientation=- 3 / 4 * np.pi,
+        position=[3., 5.],
+        orientation=0., #- 3 / 4 * np.pi,
     )
     simulation.add_object(ari)
-    ari.navigation.set_human_go_towards(human)
+    ari.navigation.set_go_towards_human(human)
 
     is_human_reached = False
-    for step in range(700):
+    for step in range(5000):
         simulation.step()
 
         if mpi_sim.utils.measure_center_distance(ari, human) < 1.5:
@@ -105,7 +106,7 @@ def test_ari_navigation_go_to_group():
 
     # define a goal for the group and let ari join the group by giving on person as target
     group.set_group_center([2.0, 3.0])
-    ari.navigation.set_human_go_towards(paul)
+    ari.navigation.set_go_towards_human(paul)
 
     # run the simulation, until the script stops the simulation
     simulation.run(50)
@@ -135,14 +136,12 @@ def test_ari_navigation_set_goal_position():
         orientation=- 3 / 4 * np.pi,
     )
     simulation.add_object(ari)
-    ari.navigation.set_goal_position([2., 2.], 0.)
+    ari.navigation.set_go_towards_position([2., 2.], 0.)
 
     is_goal_reached = False
     for step in range(2000):
         simulation.step()
 
-        # TODO: test also the goal orientation
-
         if mpi_sim.utils.measure_center_distance(ari, [2., 2.]) < 0.2 and np.abs(mpi_sim.utils.angle_difference(ari.orientation, ari.navigation.state.goal_orientation)) < 0.2:
             is_goal_reached = True
             break
-- 
GitLab