diff --git a/services/camera/src/camera.py b/services/camera/src/camera.py
index 16c3f9c8d1e8884c3b10da994413cbafc34cc9de..1eb82756121f2b06cf7f1c7cad69f0aad37147dd 100644
--- a/services/camera/src/camera.py
+++ b/services/camera/src/camera.py
@@ -43,6 +43,11 @@ sleep_time_between_frame = meter.create_gauge(
     description="sleep_time_between_frame",
     unit="ms"
 )
+send_time = meter.create_gauge(
+    name="send_time",
+    description="send_time",
+    unit="ms"
+)
 
 frame_rate:int = 10
 MIN_FRAME_TIME:float = 1.0 / frame_rate
@@ -57,9 +62,9 @@ port = int(os.getenv("MDPORT", 9998))
 
 # Map animal to the appropriate video filenames
 animal_map = {
-    'bear': ('../footage/bear/no_bear.mp4', '../footage/bear/with_bear.mp4'),
-    'tiger': ('../footage/tiger/no_tiger.mp4', '../footage/tiger/with_tiger.mp4'),
-    'wolf': ('../footage/wolf/no_wolf.mp4', '../footage/wolf/with_wolf.mp4')
+    'bear': ('footage/bear/no_bear.mp4', 'footage/bear/with_bear.mp4'),
+    'tiger': ('footage/tiger/no_tiger.mp4', 'footage/tiger/with_tiger.mp4'),
+    'wolf': ('footage/wolf/no_wolf.mp4', 'footage/wolf/with_wolf.mp4')
 }
 
 if animal_name not in animal_map:
@@ -114,15 +119,13 @@ def stream_video_clip(socket):
                 for no_motion_clip_length in generate_random_intervals(appearance_rate):
                     # Alternate between video with motion and video without motion
                     for motion in (False, True):
-
-
                         frame_number = 0
 
                         # Open the appropriate video file
                         video_path = with_animal_video if motion else no_animal_video
                         with videocapture(video_path) as video_capture:
 
-                            if(video_capture is None):
+                            if video_capture is None:
                                 logging.error("Could not open video capture.")
                                 exit(1)
 
@@ -157,8 +160,11 @@ def stream_video_clip(socket):
                                 frame = imutils.resize(frame, width=640)
 
                                 packet= CameraToMotionDetectorPacket(frame, str(camera_index), carrier)
+                                send_time_start = perf_counter()
                                 socket.send(packet.pickle())
-                                logging.info(f"Frame sent: {frame_number}, {motion}, {frame_beginning_time}")
+                                elapsed_time = perf_counter() - send_time_start
+                                send_time.set(elapsed_time)
+                                logging.info(f"Frame number {frame_number}, has motio: {motion} , network send time {elapsed_time}")
 
                                 frame_number += 1
 
@@ -167,9 +173,8 @@ def stream_video_clip(socket):
                                 logging.info(f"Frame time: {frametime}")
                                 frametime_histogram.set(frametime)
 
-
                                 sleep_time = MIN_FRAME_TIME - frametime
-                                logging.info(f"fps: {1/frametime}, sleep time: {sleep_time}, fps at sleep time: {1/sleep_time}")
+                                logging.info(f"fps: {frametime}, sleep time: {sleep_time}, fps at sleep time: {1/sleep_time}")
                                 sleep_time_between_frame.set(sleep_time)
                                 sleep(max(0.0, sleep_time))
             except Exception as error:
@@ -178,7 +183,5 @@ def stream_video_clip(socket):
                 return
 
 
-
-
 if __name__ == "__main__":
     main()
diff --git a/services/motion_detector/src/motion_detection.py b/services/motion_detector/src/motion_detection.py
index 6a4189accf54f00a9adf8e04e8542784492c5e4b..b5a3fcbed4c55540053fa570b97dc961bdbecc03 100644
--- a/services/motion_detector/src/motion_detection.py
+++ b/services/motion_detector/src/motion_detection.py
@@ -3,12 +3,14 @@ import datetime
 import logging  # Import the logging module
 import os
 import pickle
+import socket
 
 import time
 from time import perf_counter
 
 import cv2
 import imutils
+import numpy as np
 import psutil
 import zmq
 from opentelemetry.propagate import extract, inject
@@ -63,88 +65,94 @@ def get_cpu_usage():
     logging.debug(f"CPU usage retrieved: {usage}%")
     return usage
 
+previous_frame = None
 def detect_motion(frame, source_camera_identifier, trace_carrier):
-    first_frame = None
     fps_frame_count = 0
     md_detected_motion_nbr_rec=0
     fps_start_time = time.time()
-
-    while True:
-        extracted_context = extract(trace_carrier)
-        trace_carrier = {}
-        with tracer.start_as_current_span("processing the frame",context=extracted_context) as span:
-            try:
-                inject(trace_carrier)
-                logging.debug(f"FRAME RECEIVE FROM CLIENT: {source_camera_identifier} | TIME: {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
-
-                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
-                gray = cv2.GaussianBlur(gray, (21, 21), 0)
-                logging.debug("Frame converted to grayscale and blurred.")
-
-                if first_frame is None:
-                    first_frame = gray
-                    logging.debug("Initialized reference frame for motion detection.")
+    extracted_context = extract(trace_carrier)
+    trace_carrier = {}
+    global previous_frame
+
+    with tracer.start_as_current_span("processing the frame",context=extracted_context) as span:
+        try:
+            inject(trace_carrier)
+            logging.debug(f"FRAME RECEIVE FROM CLIENT: {source_camera_identifier} | TIME: {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
+
+            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
+            gray = cv2.GaussianBlur(gray, (21, 21), 0)
+            logging.debug("Frame converted to grayscale and blurred.")
+
+            if previous_frame is None:
+                previous_frame = gray
+                logging.debug("Initialized reference frame for motion detection.")
+
+            starting_processing_time = time.time()
+            fps_frame_count += 1
+            elapsed_time = time.time() - fps_start_time
+            if elapsed_time >= 10.0:
+                fps = round(fps_frame_count / elapsed_time, 2)
+                fps_count.set(fps)
+                logging.info(f"FPS: {fps}")
+                fps_frame_count = 0
+                fps_start_time = time.time()
+
+            frame_delta = cv2.absdiff(previous_frame, gray)
+
+            thresh = cv2.threshold(frame_delta, 1, 255, cv2.THRESH_BINARY)[1]
+            previous_frame = gray
+
+            thresh = cv2.dilate(thresh,  np.ones((5, 5), np.uint8), iterations=2)
+            contours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
+                                    cv2.CHAIN_APPROX_SIMPLE)
+            contours = imutils.grab_contours(contours)
+
+            #cv2.imshow("thres", thresh)
+            #cv2.waitKey(1)
+            if len(contours) == 0:
+                logging.debug("No contours detected.")
+
+            detected_cnt = 0
+            detected = False
+            bounding_rectangle = ()
+
+            for contour in contours:
+                if cv2.contourArea(contour) < 10000:
                     continue
-
-                starting_processing_time = time.time()
-                fps_frame_count += 1
-                elapsed_time = time.time() - fps_start_time
-                if elapsed_time >= 10.0:
-                    fps = round(fps_frame_count / elapsed_time, 2)
-                    fps_count.set(fps)
-                    logging.info(f"FPS: {fps}")
-                    fps_frame_count = 0
-                    fps_start_time = time.time()
-
-                frame_delta = cv2.absdiff(first_frame, gray)
-                thresh = cv2.threshold(frame_delta, 25, 255, cv2.THRESH_BINARY)[1]
-                thresh = cv2.dilate(thresh, None, iterations=2)
-                contours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
-                                        cv2.CHAIN_APPROX_SIMPLE)
-                contours = imutils.grab_contours(contours)
-
-                if len(contours) == 0:
-                    logging.debug("No contours detected.")
-
-                detected_cnt = 0
-                detected = False
-                bounding_rectangle = ()
-                for contour in contours:
-                    if cv2.contourArea(contour) < 10000:
-                        continue
-                    detected = True
-                    detected_cnt += 1
-                    logging.info(f"Motion detected. Contour area: {cv2.contourArea(contour)}.")
-
-                    (x, y, w, h) = cv2.boundingRect(contour)
-                    bounding_rectangle = (x, y, w, h)
-                    #cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
-                    "md: motion detected"
-                if detected:
-                    logging.info("Motion detected, updating metric.")
-                    md_detected_motion_nbr_rec += 1
-                    md_detected_motion_nbr.set(md_detected_motion_nbr_rec)
-                    md_detected_motion.set(1)
-                else:
-                    md_detected_motion.set(0)
-
-                return detected, bounding_rectangle
-            except Exception as e:
-                logging.error(f"Error processing frame: {e}")
-                break
+                detected = True
+                detected_cnt += 1
+                logging.info(f"Motion detected. Contour area: {cv2.contourArea(contour)}.")
+
+                (x, y, w, h) = cv2.boundingRect(contour)
+                bounding_rectangle = (x, y, w, h)
+                #cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
+                #"md: motion detected"
+                #cv2.imshow("delta", frame)
+                #cv2.waitKey(1)
+            if detected:
+                logging.info("Motion detected, updating metric.")
+                md_detected_motion_nbr_rec += 1
+                md_detected_motion_nbr.set(md_detected_motion_nbr_rec)
+                md_detected_motion.set(1)
+            else:
+                md_detected_motion.set(0)
+
+            return detected, bounding_rectangle
+        except Exception as e:
+            logging.error(f"Error processing frame: {e}")
 
 def main():
     # Retrieve environment variables instead of command-line arguments
     or_host_ip = os.getenv('OR_HOST', 'localhost')  # Default to 'localhost'
     or_port = int(os.getenv('OR_PORT', 9999))  # Default to 9999
-    #host_name = socket.gethostname()
-    #host_ip = socket.gethostbyname(host_name)
-    #logging.info(f'HOST IP: {host_ip}')
+    host_name = socket.gethostname()
+    host_ip = socket.gethostbyname(host_name)
+    logging.info(f'HOST IP: {host_ip}')
     host_port = 9998
 
     context = zmq.Context()
     camera_socket = context.socket(zmq.PAIR)
-    camera_socket.bind(f"tcp://localhost:{host_port}")
+    camera_socket.bind(f"tcp://{host_name}:{host_port}")
     logging.info(f'Camera socket bound to {host_port}')
 
     or_socket = context.socket(zmq.PAIR)
@@ -164,8 +172,6 @@ def main():
         starting_processing_time = perf_counter()
 
         frame = camera_packet.decode_frame()
-        cv2.imshow("Frame", frame)
-        cv2.waitKey(1)
 
         if frame is None:
             logging.error("Invalid frame received.")
diff --git a/services/object_recognizer/src/object_recognizer.py b/services/object_recognizer/src/object_recognizer.py
index 15c08d6780d148a4ddba4be6f58b482c5229be7f..34dc0589ddee14e40091b6ed9c5b8136677afe91 100644
--- a/services/object_recognizer/src/object_recognizer.py
+++ b/services/object_recognizer/src/object_recognizer.py
@@ -87,6 +87,7 @@ def handle_client(frame_queue: queue.Queue[MotionDetectorToObjectRecognizerPacke
 def main():
     context = zmq.Context()
     server_socket = context.socket(zmq.PAIR)
+    server_socket.setsockopt(zmq.RCVHWM, 1) # Since we already have a queue on this side, we disable the zmq queu
     host_name = socket.gethostname()
     host_ip = socket.gethostbyname(host_name)
     server_address = f"tcp://{host_ip}:{9999}"