diff --git a/deploy.sh b/deploy.sh index 9b2747188a457504782eb8986f99852712d537a8..7642e270c2ab7fa77368603682b6164a05b7e489 100644 --- a/deploy.sh +++ b/deploy.sh @@ -4,13 +4,14 @@ DOCKER_USERNAME="medkaddour" SERVICES=("camera" "motion_detector" "object_recognizer") TAG="latest" -DEPLOY_ENV=${1:-cloud} # Default to local if no argument is provided +DEPLOY_ENV=${1:-local} # Default to 'cloud' if no argument is provided # Function to build and push a Docker image build_and_push() { local service=$1 - echo "Building Docker image for ${service}..." - docker build -t ${DOCKER_USERNAME}/${service}:${TAG} ./${service} + local service_path="./services/${service}" # Service directory path + echo "Building Docker image for ${service} from ${service_path}..." + docker build -t ${DOCKER_USERNAME}/${service}:${TAG} ${service_path} if [ "$DEPLOY_ENV" == "cloud" ]; then echo "Pushing Docker image for ${service} to Docker Hub..." @@ -24,13 +25,19 @@ for service in "${SERVICES[@]}"; do done # Step 2: Deploy the images +DOCKER_COMPOSE_PATH="./deploy/docker-compose/docker-compose.yml" if [ "$DEPLOY_ENV" == "local" ]; then + echo "Stopping and removing existing containers..." + docker-compose -f ${DOCKER_COMPOSE_PATH} down --volumes --remove-orphans echo "Running Docker images locally with Docker Compose..." - docker-compose up -d --build + docker-compose -f ${DOCKER_COMPOSE_PATH} up -d --build --force-recreate elif [ "$DEPLOY_ENV" == "cloud" ]; then + echo "Stopping and removing existing containers..." + docker-compose -f ${DOCKER_COMPOSE_PATH} down --volumes --remove-orphans echo "Pulling and running Docker images from Docker Hub with Docker Compose..." - #docker-compose pull - # docker-compose up -d --build + # Force repull the latest images + docker-compose -f ${DOCKER_COMPOSE_PATH} pull --ignore-pull-failures + docker-compose -f ${DOCKER_COMPOSE_PATH} up -d --build --force-recreate else echo "Invalid DEPLOY_ENV value. Please use 'local' or 'cloud'." exit 1 diff --git a/docker-compose.yml b/deploy/docker-compose/docker-compose.yml similarity index 71% rename from docker-compose.yml rename to deploy/docker-compose/docker-compose.yml index 465bddc224e95829d06e3a2ac2739ecdde632a87..a3f0947b27403a7a48daedaff9839f0bcd97f7b1 100644 --- a/docker-compose.yml +++ b/deploy/docker-compose/docker-compose.yml @@ -5,6 +5,7 @@ services: image: medkaddour/object_recognizer ports: - "9999:9999" + - "5000:5000" motion_detector_1: image: medkaddour/motion_detector @@ -13,7 +14,11 @@ services: depends_on: - object_recognizer - jaeger - command: /bin/sh -c "echo 1 > /app/index.txt && python motion_detection.py --host object_recognizer --port 9999" + environment: + - INDEX=1 + - OR_HOST=object_recognizer + - OR_PORT=9999 + command: python src/motion_detection.py motion_detector_2: image: medkaddour/motion_detector @@ -22,7 +27,11 @@ services: depends_on: - object_recognizer - jaeger - command: /bin/sh -c "echo 2 > /app/index.txt && python motion_detection.py --host object_recognizer --port 9999" + environment: + - INDEX=2 + - OR_HOST=object_recognizer + - OR_PORT=9999 + command: python src/motion_detection.py motion_detector_3: image: medkaddour/motion_detector @@ -31,26 +40,54 @@ services: depends_on: - object_recognizer - jaeger - command: /bin/sh -c "echo 3 > /app/index.txt && python motion_detection.py --host object_recognizer --port 9999" + environment: + - INDEX=3 + - OR_HOST=object_recognizer + - OR_PORT=9999 + command: python src/motion_detection.py camera_1: image: medkaddour/camera depends_on: - motion_detector_1 - otel-collector - command: /bin/sh -c "echo 1 > /app/index.txt && python camera.py --mdhost motion_detector_1 --mdport 9998" + environment: + - CAMERA=true + - ANIMAL_NAME=tiger + - APPEARANCE_RATE=600 + - MDHOST=motion_detector_1 + - MDPORT=9998 + - INDEX=1 + command: /bin/sh -c "python src/camera.py" + camera_2: image: medkaddour/camera depends_on: - motion_detector_2 - otel-collector - command: /bin/sh -c "echo 2 > /app/index.txt && python camera.py --mdhost motion_detector_2 --mdport 9998" + environment: + - CAMERA=true + - ANIMAL_NAME=bear + - APPEARANCE_RATE=500 + - MDHOST=motion_detector_2 + - MDPORT=9998 + - INDEX=2 + command: /bin/sh -c "python src/camera.py" + camera_3: image: medkaddour/camera depends_on: - motion_detector_3 - otel-collector - command: /bin/sh -c "echo 3 > /app/index.txt && python camera.py --mdhost motion_detector_3 --mdport 9998" + environment: + - CAMERA=true + - ANIMAL_NAME=wolf + - APPEARANCE_RATE=700 + - MDHOST=motion_detector_3 + - MDPORT=9998 + - INDEX=3 + command: /bin/sh -c "python src/camera.py" + jaeger: image: jaegertracing/all-in-one ports: @@ -59,8 +96,6 @@ services: - "14268" - "14250" - - # Zipkin zipkin-all-in-one: image: openzipkin/zipkin:latest @@ -87,6 +122,7 @@ services: depends_on: - jaeger - zipkin-all-in-one + cadvisor: image: gcr.io/cadvisor/cadvisor:latest hostname: cadvisor @@ -99,6 +135,7 @@ services: - "/dev/disk/:/dev/disk:ro" ports: - "8080:8080" + prometheus: image: prom/prometheus:latest restart: always @@ -107,6 +144,7 @@ services: - ./rules.yml:/etc/prometheus/rules.yml ports: - "9090:9090" + node-exporter: image: prom/node-exporter:latest container_name: node-exporter @@ -114,11 +152,11 @@ services: volumes: - /proc:/host/proc:ro - /sys:/host/sys:ro - - /:/rootfs:ro + - /:/rootfs:ro" command: - '--path.procfs=/host/proc' - '--path.rootfs=/rootfs' - '--path.sysfs=/host/sys' - '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)' expose: - - 9100 \ No newline at end of file + - 9100 diff --git a/otel-collector-config.yaml b/deploy/docker-compose/otel-collector-config.yaml similarity index 100% rename from otel-collector-config.yaml rename to deploy/docker-compose/otel-collector-config.yaml diff --git a/prometheus.yaml b/deploy/docker-compose/prometheus.yaml similarity index 100% rename from prometheus.yaml rename to deploy/docker-compose/prometheus.yaml diff --git a/kubernetees.ipynb b/kubernetees.ipynb deleted file mode 100644 index 9e3f12416c52df333defbc0f12f65862e62a21eb..0000000000000000000000000000000000000000 --- a/kubernetees.ipynb +++ /dev/null @@ -1,212 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "id": "5c61d749-73cb-4982-a0a5-bd0d09f4e1d8", - "metadata": { - "ExecuteTime": { - "end_time": "2024-07-10T10:33:38.448735Z", - "start_time": "2024-07-10T10:33:37.827646Z" - } - }, - "source": [ - "import enoslib as en\n", - "\n", - "# Enable rich logging\n", - "_ = en.init_logging()\n", - "\n", - "\n", - "# claim the resources\n", - "conf = (\n", - " en.VMonG5kConf\n", - " .from_settings(job_name=\"enoslib_providers\")\n", - " .add_machine(\n", - " roles=[\"master\"],\n", - " cluster=\"econome\",\n", - " number=1,\n", - " flavour=\"large\"\n", - " )\n", - " .add_machine(\n", - " roles=[\"agent\",\"cloud\"],\n", - " cluster=\"econome\",\n", - " number=1,\n", - " flavour=\"large\"\n", - " )\n", - " .add_machine(\n", - " roles=[\"agent\",\"edge\"],\n", - " cluster=\"econome\",\n", - " number=5,\n", - " flavour=\"large\"\n", - " )\n", - " .finalize()\n", - ")\n", - "\n", - "\n", - "provider = en.VMonG5k(conf)\n", - "\n", - "roles, networks = provider.init()\n", - "\n", - "roles " - ], - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[WARNING]: failed to patch stdout/stderr for fork-safety: 'OutStream' object\n", - "has no attribute 'buffer'\n", - "[WARNING]: failed to reconfigure stdout/stderr with the replace error handler:\n", - "'OutStream' object has no attribute 'reconfigure'\n", - "/Users/sidimohammedkaddour/PycharmProjects/Contract Net/.venv/lib/python3.9/site-packages/urllib3/__init__.py:35: NotOpenSSLWarning: urllib3 v2 only supports OpenSSL 1.1.1+, currently the 'ssl' module is compiled with 'LibreSSL 2.8.3'. See: https://github.com/urllib3/urllib3/issues/3020\n", - " warnings.warn(\n" - ] - }, - { - "data": { - "text/plain": [ - "\u001B[31mWARNING \u001B[0m \u001B[1m[\u001B[0mErrno \u001B[1;36m2\u001B[0m\u001B[1m]\u001B[0m No such file or directory: \u001B[2m__init__.py\u001B[0m\u001B[2m:\u001B[0m\u001B[2m133\u001B[0m\n", - " \u001B[32m'/Users/sidimohammedkaddour/.python-grid5000.yaml'\u001B[0m \u001B[2m \u001B[0m\n" - ], - "text/html": [ - "<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"color: #800000; text-decoration-color: #800000\">WARNING </span> <span style=\"font-weight: bold\">[</span>Errno <span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">2</span><span style=\"font-weight: bold\">]</span> No such file or directory: <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">__init__.py:133</span>\n", - " <span style=\"color: #008000; text-decoration-color: #008000\">'/Users/sidimohammedkaddour/.python-grid5000.yaml'</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\"> </span>\n", - "</pre>\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "\u001B[34mINFO \u001B[0m \u001B[33m...\u001B[0mFalling back to anonymous connection \u001B[2m__init__.py\u001B[0m\u001B[2m:\u001B[0m\u001B[2m134\u001B[0m\n" - ], - "text/html": [ - "<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"color: #000080; text-decoration-color: #000080\">INFO </span> <span style=\"color: #808000; text-decoration-color: #808000\">...</span>Falling back to anonymous connection <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">__init__.py:134</span>\n", - "</pre>\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "ename": "Grid5000AuthenticationError", - "evalue": "401: <!DOCTYPE HTML PUBLIC \"-//IETF//DTD HTML 2.0//EN\">\n<html><head>\n<title>401 Unauthorized</title>\n</head><body>\n<h1>Unauthorized</h1>\n<p>This server could not verify that you\nare authorized to access the document\nrequested. Either you supplied the wrong\ncredentials (e.g., bad password), or your\nbrowser doesn't understand how to supply\nthe credentials required.</p>\n<hr>\n<address>Apache/2.4.59 (Debian) Server at <a href=\"mailto:support-staff@lists.grid5000.fr\">api.grid5000.fr</a> Port 443</address>\n</body></html>\n", - "output_type": "error", - "traceback": [ - "\u001B[0;31m---------------------------------------------------------------------------\u001B[0m", - "\u001B[0;31mGrid5000AuthenticationError\u001B[0m Traceback (most recent call last)", - "Cell \u001B[0;32mIn[2], line 35\u001B[0m\n\u001B[1;32m 8\u001B[0m conf \u001B[38;5;241m=\u001B[39m (\n\u001B[1;32m 9\u001B[0m en\u001B[38;5;241m.\u001B[39mVMonG5kConf\n\u001B[1;32m 10\u001B[0m \u001B[38;5;241m.\u001B[39mfrom_settings(job_name\u001B[38;5;241m=\u001B[39m\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124menoslib_providers\u001B[39m\u001B[38;5;124m\"\u001B[39m)\n\u001B[0;32m (...)\u001B[0m\n\u001B[1;32m 29\u001B[0m \u001B[38;5;241m.\u001B[39mfinalize()\n\u001B[1;32m 30\u001B[0m )\n\u001B[1;32m 33\u001B[0m provider \u001B[38;5;241m=\u001B[39m en\u001B[38;5;241m.\u001B[39mVMonG5k(conf)\n\u001B[0;32m---> 35\u001B[0m roles, networks \u001B[38;5;241m=\u001B[39m \u001B[43mprovider\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43minit\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 37\u001B[0m roles \n", - "File \u001B[0;32m~/PycharmProjects/Contract Net/.venv/lib/python3.9/site-packages/enoslib/infra/enos_vmong5k/provider.py:360\u001B[0m, in \u001B[0;36mVMonG5k.init\u001B[0;34m(self, force_deploy, start_time, **kwargs)\u001B[0m\n\u001B[1;32m 358\u001B[0m _force_deploy \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mprovider_conf\u001B[38;5;241m.\u001B[39mforce_deploy\n\u001B[1;32m 359\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mprovider_conf\u001B[38;5;241m.\u001B[39mforce_deploy \u001B[38;5;241m=\u001B[39m _force_deploy \u001B[38;5;129;01mor\u001B[39;00m force_deploy\n\u001B[0;32m--> 360\u001B[0m g5k_conf \u001B[38;5;241m=\u001B[39m \u001B[43m_build_g5k_conf\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mprovider_conf\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 361\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_g5k_provider \u001B[38;5;241m=\u001B[39m g5kprovider\u001B[38;5;241m.\u001B[39mG5k(g5k_conf)\n\u001B[1;32m 362\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m start_time:\n", - "File \u001B[0;32m~/PycharmProjects/Contract Net/.venv/lib/python3.9/site-packages/enoslib/infra/enos_vmong5k/provider.py:206\u001B[0m, in \u001B[0;36m_build_g5k_conf\u001B[0;34m(vmong5k_conf)\u001B[0m\n\u001B[1;32m 204\u001B[0m \u001B[38;5;66;03m# first of all, make sure we don't mutate the vmong5k_conf\u001B[39;00m\n\u001B[1;32m 205\u001B[0m vmong5k_conf \u001B[38;5;241m=\u001B[39m copy\u001B[38;5;241m.\u001B[39mdeepcopy(vmong5k_conf)\n\u001B[0;32m--> 206\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43m_do_build_g5k_conf\u001B[49m\u001B[43m(\u001B[49m\u001B[43mvmong5k_conf\u001B[49m\u001B[43m)\u001B[49m\n", - "File \u001B[0;32m~/PycharmProjects/Contract Net/.venv/lib/python3.9/site-packages/enoslib/infra/enos_vmong5k/provider.py:170\u001B[0m, in \u001B[0;36m_do_build_g5k_conf\u001B[0;34m(vmong5k_conf)\u001B[0m\n\u001B[1;32m 167\u001B[0m subnet_networks \u001B[38;5;241m=\u001B[39m {}\n\u001B[1;32m 169\u001B[0m \u001B[38;5;28;01mfor\u001B[39;00m _, machine \u001B[38;5;129;01min\u001B[39;00m \u001B[38;5;28menumerate\u001B[39m(vmong5k_conf\u001B[38;5;241m.\u001B[39mmachines):\n\u001B[0;32m--> 170\u001B[0m site \u001B[38;5;241m=\u001B[39m \u001B[43mmachine\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43msite\u001B[49m\n\u001B[1;32m 171\u001B[0m \u001B[38;5;66;03m# first check if there's a prod network demand\u001B[39;00m\n\u001B[1;32m 172\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m site \u001B[38;5;129;01mnot\u001B[39;00m \u001B[38;5;129;01min\u001B[39;00m prod_networks:\n", - "File \u001B[0;32m~/PycharmProjects/Contract Net/.venv/lib/python3.9/site-packages/enoslib/infra/enos_vmong5k/configuration.py:144\u001B[0m, in \u001B[0;36mMachineConfiguration.site\u001B[0;34m(self)\u001B[0m\n\u001B[1;32m 142\u001B[0m \u001B[38;5;129m@property\u001B[39m\n\u001B[1;32m 143\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21msite\u001B[39m(\u001B[38;5;28mself\u001B[39m) \u001B[38;5;241m-\u001B[39m\u001B[38;5;241m>\u001B[39m \u001B[38;5;28mstr\u001B[39m:\n\u001B[0;32m--> 144\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43mget_cluster_site\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mcluster\u001B[49m\u001B[43m)\u001B[49m\n", - "File \u001B[0;32m~/PycharmProjects/Contract Net/.venv/lib/python3.9/site-packages/enoslib/infra/enos_g5k/g5k_api_utils.py:519\u001B[0m, in \u001B[0;36mget_cluster_site\u001B[0;34m(cluster)\u001B[0m\n\u001B[1;32m 510\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21mget_cluster_site\u001B[39m(cluster: \u001B[38;5;28mstr\u001B[39m) \u001B[38;5;241m-\u001B[39m\u001B[38;5;241m>\u001B[39m \u001B[38;5;28mstr\u001B[39m:\n\u001B[1;32m 511\u001B[0m \u001B[38;5;250m \u001B[39m\u001B[38;5;124;03m\"\"\"Get the site of a given cluster.\u001B[39;00m\n\u001B[1;32m 512\u001B[0m \n\u001B[1;32m 513\u001B[0m \u001B[38;5;124;03m Args:\u001B[39;00m\n\u001B[0;32m (...)\u001B[0m\n\u001B[1;32m 517\u001B[0m \u001B[38;5;124;03m The corresponding site(str)\u001B[39;00m\n\u001B[1;32m 518\u001B[0m \u001B[38;5;124;03m \"\"\"\u001B[39;00m\n\u001B[0;32m--> 519\u001B[0m match \u001B[38;5;241m=\u001B[39m \u001B[43mget_clusters_sites\u001B[49m\u001B[43m(\u001B[49m\u001B[43m[\u001B[49m\u001B[43mcluster\u001B[49m\u001B[43m]\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 520\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m match[cluster]\n", - "File \u001B[0;32m~/PycharmProjects/Contract Net/.venv/lib/python3.9/site-packages/enoslib/infra/enos_g5k/g5k_api_utils.py:506\u001B[0m, in \u001B[0;36mget_clusters_sites\u001B[0;34m(clusters)\u001B[0m\n\u001B[1;32m 497\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21mget_clusters_sites\u001B[39m(clusters: Iterable[\u001B[38;5;28mstr\u001B[39m]) \u001B[38;5;241m-\u001B[39m\u001B[38;5;241m>\u001B[39m Dict[\u001B[38;5;28mstr\u001B[39m, \u001B[38;5;28mstr\u001B[39m]:\n\u001B[1;32m 498\u001B[0m \u001B[38;5;250m \u001B[39m\u001B[38;5;124;03m\"\"\"Get the corresponding sites of given clusters.\u001B[39;00m\n\u001B[1;32m 499\u001B[0m \n\u001B[1;32m 500\u001B[0m \u001B[38;5;124;03m Args:\u001B[39;00m\n\u001B[0;32m (...)\u001B[0m\n\u001B[1;32m 504\u001B[0m \u001B[38;5;124;03m dict of corresponding to the mapping cluster -> site\u001B[39;00m\n\u001B[1;32m 505\u001B[0m \u001B[38;5;124;03m \"\"\"\u001B[39;00m\n\u001B[0;32m--> 506\u001B[0m clusters_sites \u001B[38;5;241m=\u001B[39m \u001B[43mget_all_clusters_sites\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 507\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m {c: clusters_sites[c] \u001B[38;5;28;01mfor\u001B[39;00m c \u001B[38;5;129;01min\u001B[39;00m clusters}\n", - "File \u001B[0;32m~/PycharmProjects/Contract Net/.venv/lib/python3.9/site-packages/enoslib/infra/enos_g5k/g5k_api_utils.py:488\u001B[0m, in \u001B[0;36mget_all_clusters_sites\u001B[0;34m()\u001B[0m\n\u001B[1;32m 486\u001B[0m result: Dict \u001B[38;5;241m=\u001B[39m {}\n\u001B[1;32m 487\u001B[0m gk \u001B[38;5;241m=\u001B[39m get_api_client()\n\u001B[0;32m--> 488\u001B[0m sites \u001B[38;5;241m=\u001B[39m \u001B[43mgk\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43msites\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mlist\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 489\u001B[0m \u001B[38;5;28;01mfor\u001B[39;00m site \u001B[38;5;129;01min\u001B[39;00m sites:\n\u001B[1;32m 490\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m site\u001B[38;5;241m.\u001B[39muid \u001B[38;5;129;01mnot\u001B[39;00m \u001B[38;5;129;01min\u001B[39;00m gk\u001B[38;5;241m.\u001B[39mexcluded_sites:\n", - "File \u001B[0;32m~/PycharmProjects/Contract Net/.venv/lib/python3.9/site-packages/grid5000/exceptions.py:82\u001B[0m, in \u001B[0;36mon_http_error.<locals>.wrap.<locals>.wrapped_f\u001B[0;34m(*args, **kwargs)\u001B[0m\n\u001B[1;32m 79\u001B[0m \u001B[38;5;129m@functools\u001B[39m\u001B[38;5;241m.\u001B[39mwraps(f)\n\u001B[1;32m 80\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21mwrapped_f\u001B[39m(\u001B[38;5;241m*\u001B[39margs, \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mkwargs):\n\u001B[1;32m 81\u001B[0m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[0;32m---> 82\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43mf\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 83\u001B[0m \u001B[38;5;28;01mexcept\u001B[39;00m Grid5000HttpError \u001B[38;5;28;01mas\u001B[39;00m e:\n\u001B[1;32m 84\u001B[0m \u001B[38;5;28;01mraise\u001B[39;00m error(e\u001B[38;5;241m.\u001B[39merror_message, e\u001B[38;5;241m.\u001B[39mresponse_code, e\u001B[38;5;241m.\u001B[39mresponse_body)\n", - "File \u001B[0;32m~/PycharmProjects/Contract Net/.venv/lib/python3.9/site-packages/grid5000/mixins.py:103\u001B[0m, in \u001B[0;36mListMixin.list\u001B[0;34m(self, **kwargs)\u001B[0m\n\u001B[1;32m 101\u001B[0m \u001B[38;5;66;03m# Allow to overwrite the path, handy for custom listings\u001B[39;00m\n\u001B[1;32m 102\u001B[0m path \u001B[38;5;241m=\u001B[39m data\u001B[38;5;241m.\u001B[39mpop(\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mpath\u001B[39m\u001B[38;5;124m\"\u001B[39m, \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mpath)\n\u001B[0;32m--> 103\u001B[0m l_obj \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mgrid5000\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mhttp_list\u001B[49m\u001B[43m(\u001B[49m\u001B[43mpath\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mdata\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 105\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28misinstance\u001B[39m(l_obj, \u001B[38;5;28mlist\u001B[39m):\n\u001B[1;32m 106\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m [\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_obj_cls(\u001B[38;5;28mself\u001B[39m, item) \u001B[38;5;28;01mfor\u001B[39;00m item \u001B[38;5;129;01min\u001B[39;00m l_obj]\n", - "File \u001B[0;32m~/PycharmProjects/Contract Net/.venv/lib/python3.9/site-packages/grid5000/__init__.py:349\u001B[0m, in \u001B[0;36mGrid5000.http_list\u001B[0;34m(self, path, query_data, **kwargs)\u001B[0m\n\u001B[1;32m 331\u001B[0m \u001B[38;5;250m\u001B[39m\u001B[38;5;124;03m\"\"\"Make a GET request to the Grid5000 API for list-oriented queries.\u001B[39;00m\n\u001B[1;32m 332\u001B[0m \n\u001B[1;32m 333\u001B[0m \u001B[38;5;124;03mArgs:\u001B[39;00m\n\u001B[0;32m (...)\u001B[0m\n\u001B[1;32m 345\u001B[0m \u001B[38;5;124;03m Grid5000ParsingError: If the json data could not be parsed\u001B[39;00m\n\u001B[1;32m 346\u001B[0m \u001B[38;5;124;03m\"\"\"\u001B[39;00m\n\u001B[1;32m 347\u001B[0m url \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_build_url(path)\n\u001B[0;32m--> 349\u001B[0m result \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mhttp_request\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[38;5;124;43mget\u001B[39;49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43murl\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mquery_data\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mquery_data\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 351\u001B[0m \u001B[38;5;66;03m# NOTE(msimonin): in the future we may want to support automatic\u001B[39;00m\n\u001B[1;32m 352\u001B[0m \u001B[38;5;66;03m# pagination Thus we'll need to return an iterator here (in a generic\u001B[39;00m\n\u001B[1;32m 353\u001B[0m \u001B[38;5;66;03m# way ...)\u001B[39;00m\n\u001B[1;32m 354\u001B[0m \n\u001B[1;32m 355\u001B[0m \u001B[38;5;66;03m# NOTE(msimonin): here we hit the HATEOAS vs non HATEOAS hell\u001B[39;00m\n\u001B[1;32m 356\u001B[0m result \u001B[38;5;241m=\u001B[39m result\u001B[38;5;241m.\u001B[39mjson()\n", - "File \u001B[0;32m~/PycharmProjects/Contract Net/.venv/lib/python3.9/site-packages/grid5000/__init__.py:278\u001B[0m, in \u001B[0;36mGrid5000.http_request\u001B[0;34m(self, verb, path, query_data, post_data, header_data, streamed, content_type, accept, **kwargs)\u001B[0m\n\u001B[1;32m 275\u001B[0m \u001B[38;5;28;01mpass\u001B[39;00m\n\u001B[1;32m 277\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m result\u001B[38;5;241m.\u001B[39mstatus_code \u001B[38;5;241m==\u001B[39m \u001B[38;5;241m401\u001B[39m:\n\u001B[0;32m--> 278\u001B[0m \u001B[38;5;28;01mraise\u001B[39;00m Grid5000AuthenticationError(\n\u001B[1;32m 279\u001B[0m response_code\u001B[38;5;241m=\u001B[39mresult\u001B[38;5;241m.\u001B[39mstatus_code,\n\u001B[1;32m 280\u001B[0m error_message\u001B[38;5;241m=\u001B[39merror_message,\n\u001B[1;32m 281\u001B[0m response_body\u001B[38;5;241m=\u001B[39mresult\u001B[38;5;241m.\u001B[39mcontent,\n\u001B[1;32m 282\u001B[0m )\n\u001B[1;32m 284\u001B[0m \u001B[38;5;28;01mraise\u001B[39;00m Grid5000HttpError(\n\u001B[1;32m 285\u001B[0m response_code\u001B[38;5;241m=\u001B[39mresult\u001B[38;5;241m.\u001B[39mstatus_code,\n\u001B[1;32m 286\u001B[0m error_message\u001B[38;5;241m=\u001B[39merror_message,\n\u001B[1;32m 287\u001B[0m response_body\u001B[38;5;241m=\u001B[39mresult\u001B[38;5;241m.\u001B[39mcontent,\n\u001B[1;32m 288\u001B[0m )\n", - "\u001B[0;31mGrid5000AuthenticationError\u001B[0m: 401: <!DOCTYPE HTML PUBLIC \"-//IETF//DTD HTML 2.0//EN\">\n<html><head>\n<title>401 Unauthorized</title>\n</head><body>\n<h1>Unauthorized</h1>\n<p>This server could not verify that you\nare authorized to access the document\nrequested. Either you supplied the wrong\ncredentials (e.g., bad password), or your\nbrowser doesn't understand how to supply\nthe credentials required.</p>\n<hr>\n<address>Apache/2.4.59 (Debian) Server at <a href=\"mailto:support-staff@lists.grid5000.fr\">api.grid5000.fr</a> Port 443</address>\n</body></html>\n" - ] - } - ], - "execution_count": 2 - }, - { - "cell_type": "code", - "id": "1be51270-85a2-442d-9ab2-6cc8b7a5d5a4", - "metadata": {}, - "source": [ - "# wait for the nodes\n", - "en.wait_for(roles)" - ], - "outputs": [], - "execution_count": null - }, - { - "cell_type": "code", - "id": "1c8d37bb-748e-4716-85b9-5f4fb8290d2f", - "metadata": {}, - "source": [ - "k3s = en.K3s(master=roles[\"master\"], agent=roles[\"agent\"])\n", - "\n", - "k3s.deploy()" - ], - "outputs": [], - "execution_count": null - }, - { - "cell_type": "code", - "id": "83fc9b33-859b-423b-bcdb-b6e165822a8d", - "metadata": { - "scrolled": true - }, - "source": [ - "print(\"Create a tunnel from your local machine to the head node:\")\n", - "print(f\"ssh -NL 8001:{roles['master'][0].address}:8001 skaddour@access.grid5000.fr\")" - ], - "outputs": [], - "execution_count": null - }, - { - "cell_type": "code", - "id": "59e57e73-3140-4c8b-8bb5-238130e7b358", - "metadata": {}, - "source": [ - "import subprocess\n", - "subprocess.run(\"scp otel-collector-config.yaml skaddour@\"+str(roles['master'][0].address))" - ], - "outputs": [], - "execution_count": null - }, - { - "cell_type": "code", - "id": "b7198604-e8c6-47b0-8f85-e7afcc505d04", - "metadata": {}, - "source": [ - "provider.destroy() " - ], - "outputs": [], - "execution_count": null - }, - { - "cell_type": "code", - "id": "76996be3-b9ae-49c4-ac68-30f1df9055dd", - "metadata": {}, - "source": [], - "outputs": [], - "execution_count": null - }, - { - "cell_type": "code", - "id": "d22f906b-a7ea-40fa-bd9c-af59eda93032", - "metadata": {}, - "source": [], - "outputs": [], - "execution_count": null - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/motion_detector/index.txt b/motion_detector/index.txt deleted file mode 100644 index 62f9457511f879886bb7728c986fe10b0ece6bcb..0000000000000000000000000000000000000000 --- a/motion_detector/index.txt +++ /dev/null @@ -1 +0,0 @@ -6 \ No newline at end of file diff --git a/object-detection.jpg b/object-detection.jpg deleted file mode 100644 index 714b7208da494d30e7a1f094a196a286a044d445..0000000000000000000000000000000000000000 Binary files a/object-detection.jpg and /dev/null differ diff --git a/object_recognizer/object_recognizer.py b/object_recognizer/object_recognizer.py deleted file mode 100644 index 9ca2322e6cb703217231a5ad62fe4076c1243e92..0000000000000000000000000000000000000000 --- a/object_recognizer/object_recognizer.py +++ /dev/null @@ -1,204 +0,0 @@ -import datetime -import socket -import time -import threading -import queue -import pickle -import struct -import cv2 -import numpy as np -from tracerprovider import tracer, meter -import pyshine as ps # pip install pyshine - - -# Load YOLO model (if needed) and other initialization -# model = YOLO('yolov8n.pt') - -# Function to detect objects in a frame -def get_output_layers(net): - layer_names = net.getLayerNames() - try: - output_layers = [layer_names[i - 1] for i in net.getUnconnectedOutLayers()] - except: - output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()] - return output_layers - - -def draw_prediction(img, class_id, confidence, x, y, x_plus_w, y_plus_h, classes, COLORS): - label = str(classes[class_id]) - print(label + " detected") - color = COLORS[class_id] - cv2.rectangle(img, (x, y), (x_plus_w, y_plus_h), color, 2) - cv2.putText(img, label, (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2) - framename = "res.jpg" - print(framename) - cv2.imwrite(framename, img) - - -def process_frame(mddata): - frame = mddata['frame'] - Width = frame.shape[1] - Height = frame.shape[0] - scale = 0.00392 - - classes = None - with open("yolov3.txt", 'r') as f: - classes = [line.strip() for line in f.readlines()] - - COLORS = np.random.uniform(0, 255, size=(len(classes), 3)) - net = cv2.dnn.readNet("yolov3.weights", "yolov3.cfg") - blob = cv2.dnn.blobFromImage(frame, scale, (416, 416), (0, 0, 0), True, crop=False) - net.setInput(blob) - outs = net.forward(get_output_layers(net)) - - class_ids = [] - confidences = [] - boxes = [] - conf_threshold = 0.5 - nms_threshold = 0.4 - - for out in outs: - for detection in out: - scores = detection[5:] - class_id = np.argmax(scores) - confidence = scores[class_id] - if confidence > 0.5: - center_x = int(detection[0] * Width) - center_y = int(detection[1] * Height) - w = int(detection[2] * Width) - h = int(detection[3] * Height) - x = center_x - w / 2 - y = center_y - h / 2 - class_ids.append(class_id) - confidences.append(float(confidence)) - boxes.append([x, y, w, h]) - - indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold, nms_threshold) - for i in indices: - try: - box = boxes[i] - except: - i = i[0] - box = boxes[i] - x = box[0] - y = box[1] - w = box[2] - h = box[3] - draw_prediction(frame, class_ids[i], confidences[i], round(x), round(y), round(x + w), round(y + h), classes, - COLORS) - - -def receive_frames(client_socket, frame_queue): - data = b"" - payload_size = struct.calcsize("Q") - - while True: - try: - while len(data) < payload_size: - packet = client_socket.recv(4 * 1024) # 4K - if not packet: - return - data += packet - packed_msg_size = data[:payload_size] - data = data[payload_size:] - if len(data) == 0: - break - msg_size = struct.unpack("Q", packed_msg_size)[0] - while len(data) < msg_size: - data += client_socket.recv(4 * 1024) - frame_data = data[:msg_size] - data = data[msg_size:] - frame_queue.put(pickle.loads(frame_data)) - len_q_gauge.set(frame_queue.qsize()) - transmission_time = datetime.datetime.now() - datetime.datetime.strptime(pickle.loads(frame_data)['sentfromedgetime'], - "%Y-%m-%d %H:%M:%S") - transmission_time_in_seconds = transmission_time.total_seconds() - md_e2c_transmission_time.set(transmission_time_in_seconds) - except Exception as e: - print(f"Error receiving frame: {e}") - return - - -def handle_client(frame_queue): - firstFrame = None - fps_start_time = time.time() - fps_frame_count = 0 - detected = False - orclient_socket = None - - - - while True: - if frame_queue.empty(): - continue - - mddata = frame_queue.get() - len_q_gauge.set(frame_queue.qsize()) - frame = mddata['frame'] - Width = frame.shape[1] - Height = frame.shape[0] - - starting_processing_time = datetime.datetime.now() - transmission_time = datetime.datetime.now() - datetime.datetime.strptime(mddata['sentfromedgetime'], - "%Y-%m-%d %H:%M:%S") - transmission_time_in_seconds = transmission_time.total_seconds() - #md_e2c_transmission_time.set(transmission_time_in_seconds) - - - # Process the frame - process_frame(mddata) - time_tmp=datetime.datetime.now()-starting_processing_time - time_tmp=time_tmp.total_seconds() - processing_time.set(time.time() - time_tmp) - response_time_s = datetime.datetime.now() - datetime.datetime.strptime(mddata['capture_time'], - "%Y-%m-%d %H:%M:%S") - response_time_s_in_seconds = response_time_s.total_seconds() - response_time.set(response_time_s_in_seconds) - - -# Setup server socket -server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) -host_name = socket.gethostname() -host_ip = socket.gethostbyname(host_name) -socket_address = (host_ip, 9999) -server_socket.bind(socket_address) -server_socket.listen() -print("Listening at", socket_address) -len_q_gauge = meter.create_gauge( - name="or_len_q", - description="queue length in object recignizer", - unit="f" - ) -processing_time = meter.create_gauge( - name="or_processing_time", - description="processing time of one frame", - unit="s" -) -md_e2c_transmission_time = meter.create_gauge( - name="md_e2c_transmission_time", - description="transmission time of one frame from edge to cloud", - unit="s" -) -response_time = meter.create_gauge( - name="responstime", - description="response time of one frame", - unit="s" -) -# Main server loop -client_threads = [] -frame_queue = queue.Queue() -process_thread=[] -for i in range(0,1): - process_thread.append( threading.Thread(target=handle_client, args=(frame_queue,))) - process_thread[-1].start() -while True: - try: - client_socket, addr = server_socket.accept() - print(f"CLIENT {addr} CONNECTED!") - receive_frames(client_socket, frame_queue) - - except Exception as e: - print(f"Exception: {e}") - continue - -server_socket.close() diff --git a/object_recognizer/res.jpg b/object_recognizer/res.jpg deleted file mode 100644 index 0008406a673a335ae7528163f239c6b76b74ad04..0000000000000000000000000000000000000000 Binary files a/object_recognizer/res.jpg and /dev/null differ diff --git a/object_recognizer/yolo_opencv.py b/object_recognizer/yolo_opencv.py deleted file mode 100644 index 25d3e901744275edcc322357612e6f2e281e1891..0000000000000000000000000000000000000000 --- a/object_recognizer/yolo_opencv.py +++ /dev/null @@ -1,101 +0,0 @@ -############################################# -# Object detection - YOLO - OpenCV -# Author : Arun Ponnusamy (July 16, 2018) -# Website : http://www.arunponnusamy.com -############################################ - - -import cv2 -import argparse -import numpy as np - - - - -def get_output_layers(net): - - layer_names = net.getLayerNames() - try: - output_layers = [layer_names[i - 1] for i in net.getUnconnectedOutLayers()] - except: - output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()] - - return output_layers - - -def draw_prediction(img, class_id, confidence, x, y, x_plus_w, y_plus_h): - - label = str(classes[class_id]) - - color = COLORS[class_id] - - cv2.rectangle(img, (x,y), (x_plus_w,y_plus_h), color, 2) - - cv2.putText(img, label, (x-10,y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2) - - -image = cv2.imread("../dog.jpg") - -Width = image.shape[1] -Height = image.shape[0] -scale = 0.00392 - -classes = None - -with open("yolov3.txt", 'r') as f: - classes = [line.strip() for line in f.readlines()] - -COLORS = np.random.uniform(0, 255, size=(len(classes), 3)) - -net = cv2.dnn.readNet("yolov3.weights","yolov3.cfg") - -blob = cv2.dnn.blobFromImage(image, scale, (416,416), (0,0,0), True, crop=False) - -net.setInput(blob) - -outs = net.forward(get_output_layers(net)) - -class_ids = [] -confidences = [] -boxes = [] -conf_threshold = 0.5 -nms_threshold = 0.4 - - -for out in outs: - for detection in out: - scores = detection[5:] - class_id = np.argmax(scores) - confidence = scores[class_id] - if confidence > 0.5: - center_x = int(detection[0] * Width) - center_y = int(detection[1] * Height) - w = int(detection[2] * Width) - h = int(detection[3] * Height) - x = center_x - w / 2 - y = center_y - h / 2 - class_ids.append(class_id) - confidences.append(float(confidence)) - boxes.append([x, y, w, h]) - - -indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold, nms_threshold) - -for i in indices: - try: - box = boxes[i] - except: - i = i[0] - box = boxes[i] - - x = box[0] - y = box[1] - w = box[2] - h = box[3] - draw_prediction(image, class_ids[i], confidences[i], round(x), round(y), round(x+w), round(y+h)) - -cv2.imshow("object detection", image) -cv2.waitKey() - -cv2.imwrite("../object-detection.jpg", image) -cv2.destroyAllWindows() diff --git a/rules.yml b/rules.yml deleted file mode 100644 index 25cd29e4535b73d688ecf36ae24165670016ea43..0000000000000000000000000000000000000000 --- a/rules.yml +++ /dev/null @@ -1,5 +0,0 @@ -groups: - - name: example - rules: - - record: container_cpu_usage_rate_per_second - expr: rate(container_cpu_usage_seconds_total[1s]) diff --git a/deploy on k3s.py b/services/__init__.py similarity index 100% rename from deploy on k3s.py rename to services/__init__.py diff --git a/camera/Dockerfile b/services/camera/Dockerfile similarity index 56% rename from camera/Dockerfile rename to services/camera/Dockerfile index 3c1e4976f64fe27b752999db7f8ffa5009dca763..092db59c37d1662ca79e9f5c08db39ecc4ed556d 100644 --- a/camera/Dockerfile +++ b/services/camera/Dockerfile @@ -1,7 +1,5 @@ -# Dockerfile for client microservice - # Use the official Python image as base -FROM --platform=linux/amd64 python:3.8-slim as build +FROM --platform=linux/arm64 python:3.8-slim as build # Set the working directory inside the container WORKDIR /app @@ -12,8 +10,15 @@ COPY . . # Install required Python packages RUN pip install -r requirements.txt +# Set environment variables with default values +ENV CAMERA="false" \ + ANIMAL_NAME="tiger" \ + APPEARANCE_RATE=600 \ + MDHOST="localhost" \ + MDPORT=9998 + # Expose the port (if needed) # EXPOSE <port_number> # Command to run the client code -CMD ["python", "camera.py"] +CMD ["python", "src/camera.py"] diff --git a/services/camera/__init__.py b/services/camera/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a9ace3a2ae3c5c821d3268333c1db331cdc3dbdc --- /dev/null +++ b/services/camera/__init__.py @@ -0,0 +1 @@ +# Empty __init__.py \ No newline at end of file diff --git a/camera/footage/bear/no_bear.mp4 b/services/camera/footage/bear/no_bear.mp4 similarity index 100% rename from camera/footage/bear/no_bear.mp4 rename to services/camera/footage/bear/no_bear.mp4 diff --git a/camera/footage/bear/with_bear.mp4 b/services/camera/footage/bear/with_bear.mp4 similarity index 100% rename from camera/footage/bear/with_bear.mp4 rename to services/camera/footage/bear/with_bear.mp4 diff --git a/camera/footage/tiger/no_tiger.mp4 b/services/camera/footage/tiger/no_tiger.mp4 similarity index 100% rename from camera/footage/tiger/no_tiger.mp4 rename to services/camera/footage/tiger/no_tiger.mp4 diff --git a/camera/footage/tiger/with_tiger.mp4 b/services/camera/footage/tiger/with_tiger.mp4 similarity index 100% rename from camera/footage/tiger/with_tiger.mp4 rename to services/camera/footage/tiger/with_tiger.mp4 diff --git a/camera/footage/wolf/no_wolf.mp4 b/services/camera/footage/wolf/no_wolf.mp4 similarity index 100% rename from camera/footage/wolf/no_wolf.mp4 rename to services/camera/footage/wolf/no_wolf.mp4 diff --git a/camera/footage/wolf/with_wolf.mp4 b/services/camera/footage/wolf/with_wolf.mp4 similarity index 100% rename from camera/footage/wolf/with_wolf.mp4 rename to services/camera/footage/wolf/with_wolf.mp4 diff --git a/camera/requirements.txt b/services/camera/requirements.txt similarity index 96% rename from camera/requirements.txt rename to services/camera/requirements.txt index 14671f87e0bd27bc2ddefc1f9a7887eb6ab8c5a8..109b87e46f96b2c9ff740f7fc7b5e8d4ddbb8b4a 100644 --- a/camera/requirements.txt +++ b/services/camera/requirements.txt @@ -1,5 +1,6 @@ opencv-python-headless imutils +psutil pyshine opentelemetry-api opentelemetry-sdk diff --git a/services/camera/src/__init__.py b/services/camera/src/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/camera/camera.py b/services/camera/src/camera.py similarity index 66% rename from camera/camera.py rename to services/camera/src/camera.py index 261106d7f67ef9f62bb2c6db451c441106cfe124..2158f1435caa529469bc72b1e792614acd7063e3 100644 --- a/camera/camera.py +++ b/services/camera/src/camera.py @@ -6,11 +6,21 @@ import pickle import struct import time import datetime +import os # Import os for environment variables from tracerprovider import tracer, meter -from array import array - import pyshine as ps # pip install pyshine import imutils # pip install imutils +import logging + +# Configure logging +logging.basicConfig( + format='%(asctime)s - %(levelname)s - %(message)s', + level=logging.INFO, # You can change this to DEBUG, ERROR, etc. + handlers=[ + logging.StreamHandler(), # Log to console + logging.FileHandler("output/camera.log") # Log to a file + ] +) def generate_random_intervals(events_per_hour): @@ -29,38 +39,43 @@ def generate_random_intervals(events_per_hour): # Return intervals rounded to a few decimal points (optional) result = [round(interval, 2) for interval in normalized_intervals] - print(str(result)) + logging.info(f"Generated intervals: {result}") return result -def main(camera=False, animal_name="bear", frequency=0, host_ip="motion_detector", port=9998): +def main(): + # Get environment variables (with defaults if not set) + camera = os.getenv("CAMERA", "false").lower() == "true" + animal_name = os.getenv("ANIMAL_NAME", "tiger") + appearance_rate = int(os.getenv("APPEARANCE_RATE", 600)) + host_ip = os.getenv("MDHOST", "localhost") + port = int(os.getenv("MDPORT", 9998)) + # Map animal to the appropriate video filenames animal_map = { 'bear': ('footage/bear/no_bear.mp4', 'footage/bear/with_bear.mp4'), 'tiger': ('footage/tiger/no_tiger.mp4', 'footage/tiger/with_tiger.mp4'), 'wolf': ('footage/wolf/no_wolf.mp4', 'footage/wolf/with_wolf.mp4') } - if animal_name: - if animal_name not in animal_map: - print(f"No video available for {animal_name}") - return + + if animal_name not in animal_map: + logging.error(f"No video available for {animal_name}") + return + no_animal_video, with_animal_video = animal_map[animal_name] while True: - client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - host_ip = host_ip # Here according to your server IP write the address - port = port while True: try: - print(host_ip+":"+str(port)) + logging.info(f"Attempting to connect to {host_ip}:{port}") client_socket.connect((host_ip, port)) - + logging.info(f"Connected to {host_ip}:{port}") break - except: + except Exception as e: + logging.warning(f"Cannot connect to motion detector: {e}") time.sleep(1) - print("cannot connect to motion detector") continue # Initialize FPS calculation @@ -80,36 +95,30 @@ def main(camera=False, animal_name="bear", frequency=0, host_ip="motion_detector frame_rate = 30.0 frame_interval = 1.0 / frame_rate - fps_start_time=time.time() - motion=False + motion = False if client_socket: - while True: - for interval in generate_random_intervals(frequency) : + for interval in generate_random_intervals(appearance_rate): interval_frame_count = interval * 30 frame_number = 0 - if motion: + if motion: vid = cv2.VideoCapture(with_animal_video) motion = False else: vid = cv2.VideoCapture(no_animal_video) motion = True - print(motion) - while (frame_number<interval_frame_count) or (not motion): - # if not vid.isOpened() and not motion: - # vid= cv2.VideoCapture(no_animal_video) - frame_number+=1 + logging.info(f"Motion: {motion}") + while (frame_number < interval_frame_count) or (not motion): + frame_number += 1 with tracer.start_as_current_span("sending frame") as span: try: - - img, frame = vid.read() - if not img : + if not img: if motion: - vid= cv2.VideoCapture(no_animal_video) + vid = cv2.VideoCapture(no_animal_video) continue else: - print("motion frames count:"+str(frame_number)) + logging.info(f"Motion frames count: {frame_number}") break frame = imutils.resize(frame, width=640) @@ -130,7 +139,7 @@ def main(camera=False, animal_name="bear", frequency=0, host_ip="motion_detector elapsed_time = time.time() - fps_start_time if elapsed_time >= 10.0: fps = round(fps_frame_count / elapsed_time, 2) - print(str(fps) + "/" + str(fps_frame_count) + "/" + str(elapsed_time)) + logging.info(f"FPS: {fps} (Total frames: {fps_frame_count}, Time: {elapsed_time})") fps_histo.record(fps) fps_count.set(fps) fps_frame_count = 0 @@ -140,21 +149,13 @@ def main(camera=False, animal_name="bear", frequency=0, host_ip="motion_detector end_time = time.time() elapsed_time = end_time - start_time if elapsed_time < frame_interval: - #print(str(frame_interval-elapsed_time)) - time.sleep(0.025)#frame_interval - elapsed_time) + time.sleep(0.025) # frame_interval - elapsed_time except Exception as e: - print(f"Error sending frame: {e}") + logging.error(f"Error sending frame: {e}") client_socket.close() break if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Camera") - parser.add_argument("--camera", action="store_true", help="Use webcam") - parser.add_argument("--animal", type=str, default='tiger', choices=['bear', 'tiger', 'wolf'], help="Name of the animal") - parser.add_argument("--frequency", type=int, default=600, help="frequency for animal appearance in one hour") - parser.add_argument("--mdhost", type=str, default="motion_detector", help="Motion Detector IP address") - parser.add_argument("--mdport", type=int, default=9998, help="Motion detector port number") - args = parser.parse_args() - main(camera=args.camera, animal_name=args.animal, frequency=args.frequency, host_ip=args.mdhost, port=args.mdport) \ No newline at end of file + main() diff --git a/services/camera/src/config.py b/services/camera/src/config.py new file mode 100644 index 0000000000000000000000000000000000000000..5bbbafbc10cd780928fc7bfcddcb81b70feaa7ab --- /dev/null +++ b/services/camera/src/config.py @@ -0,0 +1,11 @@ +import os + +def get_config(): + config = { + 'camera': os.getenv("CAMERA", "false").lower() == "true", + 'animal_name': os.getenv("ANIMAL_NAME", "tiger"), + 'appearance_rate': max(1, int(os.getenv("APPEARANCE_RATE", 600))), + 'host_ip': os.getenv("MDHOST", "localhost"), + 'port': int(os.getenv("MDPORT", 9998)) + } + return config diff --git a/services/camera/src/logger.py b/services/camera/src/logger.py new file mode 100644 index 0000000000000000000000000000000000000000..0791e3daaf2a054259ba572a3e2e448af51a897f --- /dev/null +++ b/services/camera/src/logger.py @@ -0,0 +1,19 @@ +import logging +import os + +def setup_logger(): + # Get the log file path from the environment variable, defaulting to "output/camera.log" if not set + log_file = os.getenv("LOG_FILE", "output/camera.log") + + # Create the log directory if it doesn't exist + os.makedirs(os.path.dirname(log_file), exist_ok=True) + + logging.basicConfig( + format='%(asctime)s - %(levelname)s - %(message)s', + level=logging.INFO, + handlers=[ + logging.StreamHandler(), # Log to console + logging.FileHandler(log_file) # Log to file defined by the environment variable + ] + ) + return logging.getLogger() diff --git a/services/camera/src/network.py b/services/camera/src/network.py new file mode 100644 index 0000000000000000000000000000000000000000..576df37345d168bbd6eb4c5c72f1ce86d7c1099e --- /dev/null +++ b/services/camera/src/network.py @@ -0,0 +1,27 @@ +import socket +import pickle +import struct +import time +import logging + +def connect_to_server(host_ip, port, max_retries=5): + client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + for retry in range(max_retries): + try: + logging.info(f"Attempting to connect to {host_ip}:{port}") + client_socket.connect((host_ip, port)) + logging.info(f"Connected to {host_ip}:{port}") + return client_socket + except Exception as e: + if retry == max_retries - 1: + logging.error(f"Failed to connect to motion detector: {e}") + return None + wait_time = 2 ** retry + logging.warning(f"Connection failed. Retrying in {wait_time} seconds...") + time.sleep(wait_time) + return None + +def send_frame_to_server(client_socket, frame_data): + serialized_data = pickle.dumps(frame_data) + message = struct.pack("Q", len(serialized_data)) + serialized_data + client_socket.sendall(message) diff --git a/camera/tracerprovider.py b/services/camera/src/tracerprovider.py similarity index 84% rename from camera/tracerprovider.py rename to services/camera/src/tracerprovider.py index de011f54e8a95d405590e5a90889e3a7fd136965..784a8a42609db39b1a0ced93206a52028f37295c 100644 --- a/camera/tracerprovider.py +++ b/services/camera/src/tracerprovider.py @@ -1,3 +1,4 @@ +import os import random from opentelemetry import trace, metrics @@ -9,12 +10,13 @@ from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExport from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.exporter.otlp.proto.grpc.metric_exporter import OTLPMetricExporter file_name = "Camera " -try: - with open('/app/index.txt', 'r') as file: - index_value = int(file.read().strip()) -except (FileNotFoundError, ValueError) as e: - print(f"Error reading index.txt: {e}") - index_value = random.randint(1, 10) +# try: +# with open('../index.txt', 'r') as file: +# index_value = int(file.read().strip()) +# except (FileNotFoundError, ValueError) as e: +# print(f"Error reading index.txt: {e}") +# index_value = random.randint(1, 10) +index_value = os.environ.get("INDEX", str(random.randint(1, 10))) # Generate a random integer resource=Resource.create({SERVICE_NAME: f"{file_name}{index_value}"}) # Set up the tracer provider for tracing diff --git a/services/camera/src/utils.py b/services/camera/src/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2d07d613bb68cb99fa1388aefff9653b42e7a64f --- /dev/null +++ b/services/camera/src/utils.py @@ -0,0 +1,12 @@ +import random +import logging + +def generate_random_intervals(events_per_hour): + total_time = 3600 # seconds in an hour + avg_interval = total_time / events_per_hour + random_factors = [random.uniform(0.5, 1.5) for _ in range(events_per_hour)] + total_factor = sum(random_factors) + normalized_intervals = [avg_interval * (factor / total_factor) for factor in random_factors] + result = [round(interval, 2) for interval in normalized_intervals] + logging.info(f"Generated intervals: {result}") + return result \ No newline at end of file diff --git a/services/camera/src/video.py b/services/camera/src/video.py new file mode 100644 index 0000000000000000000000000000000000000000..6921424edc96403a2954d053395de1d679f033d4 --- /dev/null +++ b/services/camera/src/video.py @@ -0,0 +1,21 @@ +import cv2 +import logging + +def load_video(animal_name): + video_map = { + 'bear': ('footage/bear/no_bear.mp4', 'footage/bear/with_bear.mp4'), + 'tiger': ('footage/tiger/no_tiger.mp4', 'footage/tiger/with_tiger.mp4'), + 'wolf': ('footage/wolf/no_wolf.mp4', 'footage/wolf/with_wolf.mp4') + } + if animal_name not in video_map: + logging.error(f"No video available for {animal_name}") + return None, None + return video_map[animal_name] + +def capture_frame(video_path): + vid = cv2.VideoCapture(video_path) + ret, frame = vid.read() + if not ret: + logging.warning(f"Frame read failed for {video_path}") + return None + return frame diff --git a/object_recognizer/Dockerfile b/services/motion_detector/Dockerfile similarity index 65% rename from object_recognizer/Dockerfile rename to services/motion_detector/Dockerfile index 65c2ff27116daa9baa590a99b06ef9a8d8447119..4f19877e62e813695af195955a3b5fae9e4324c0 100644 --- a/object_recognizer/Dockerfile +++ b/services/motion_detector/Dockerfile @@ -1,7 +1,7 @@ # Dockerfile for server microservice # Use the official Python image as base -FROM --platform=linux/amd64 python:3.8-slim as build +FROM --platform=linux/arm64 python:3.8-slim as build # Set the working directory inside the container WORKDIR /app @@ -9,13 +9,17 @@ WORKDIR /app # Copy the server code into the container COPY . . - # Install required Python packages -RUN ls RUN pip install -r requirements.txt +# Set environment variables +ENV OR_HOST_IP=localhost +ENV OR_PORT=9999 +ENV LOG_LEVEL=INFO +ENV TZ=UTC + # Expose the port used by the server (if needed) # EXPOSE <port_number> # Command to run the server code -CMD ["python", "object_recognizer.py"] +CMD ["python", "src/motion_detection.py"] diff --git a/services/motion_detector/__init__.py b/services/motion_detector/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/motion_detector/requirements.txt b/services/motion_detector/requirements.txt similarity index 100% rename from motion_detector/requirements.txt rename to services/motion_detector/requirements.txt diff --git a/services/motion_detector/src/__init__.py b/services/motion_detector/src/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/motion_detector/motion_detection.py b/services/motion_detector/src/motion_detection.py similarity index 61% rename from motion_detector/motion_detection.py rename to services/motion_detector/src/motion_detection.py index c8d43c76dd41276ee1acebb631c65df9a6644bbf..667d52afc2d199bd1a62455e3935fcac15f58a71 100644 --- a/motion_detector/motion_detection.py +++ b/services/motion_detector/src/motion_detection.py @@ -1,4 +1,5 @@ -import argparse +# Add logging for key events and metrics +import os import datetime import socket import cv2 @@ -8,21 +9,34 @@ import time import threading import queue import psutil -import pyshine as ps import imutils +import logging # Import the logging module from tracerprovider import tracer, meter +# Configure logging +logging.basicConfig( + format='%(asctime)s - %(levelname)s - %(message)s', + level=logging.INFO, # You can change this to DEBUG, ERROR, etc. + handlers=[ + logging.StreamHandler(), # Log to console + logging.FileHandler("output/motion_detector.log") # Log to a file + ] +) + def get_cpu_usage(): - return psutil.cpu_percent(interval=1) + usage = psutil.cpu_percent(interval=1) + logging.debug(f"CPU usage retrieved: {usage}%") + return usage def receive_frames(client_socket, frame_queue): + logging.info("Starting frame reception thread.") data = b"" payload_size = struct.calcsize("Q") c2e_transmission_time = meter.create_gauge( name="c2e_transmission_time", - description="transmission time of one frame frm camera to md", + description="transmission time of one frame from camera to motion detection", unit="s" ) while True: @@ -30,32 +44,40 @@ def receive_frames(client_socket, frame_queue): while len(data) < payload_size: packet = client_socket.recv(4 * 1024) # 4K if not packet: + logging.warning("Client disconnected or no data received.") return data += packet packed_msg_size = data[:payload_size] data = data[payload_size:] msg_size = struct.unpack("Q", packed_msg_size)[0] + logging.debug(f"Expected frame size: {msg_size} bytes.") + while len(data) < msg_size: data += client_socket.recv(4 * 1024) frame_data = data[:msg_size] data = data[msg_size:] + transmission_time = datetime.datetime.now() - datetime.datetime.strptime( pickle.loads(frame_data)['capture_time'], "%Y-%m-%d %H:%M:%S") transmission_time_in_seconds = transmission_time.total_seconds() c2e_transmission_time.set(transmission_time_in_seconds) + logging.info(f"Frame received with transmission time: {transmission_time_in_seconds} seconds.") + frame_queue.put(pickle.loads(frame_data)) except Exception as e: - print(f"Error receiving frame: {e}") + logging.error(f"Error receiving frame: {e}") return + def process_frames(addr, frame_queue, or_host_ip, or_port): + logging.info(f"Starting frame processing thread for client: {addr}.") firstFrame = None fps_start_time = time.time() fps_frame_count = 0 detected = False - orclient_socket = None + orclient_socket = None fps_histo = meter.create_histogram( name="md_fps_histo", description="Frames per second", @@ -68,38 +90,43 @@ def process_frames(addr, frame_queue, or_host_ip, or_port): ) processing_time = meter.create_gauge( name="md_processing_time", - description="processing time of one frame", + description="Processing time of one frame", unit="s" ) md_detected_motion = meter.create_gauge( name="md_detected_motion", - description="detected motions 1 or 0", + description="Detected motions 1 or 0", ) - fps_start_time=time.time() + fps_start_time = time.time() while True: - if frame_queue.empty(): + try: + # Wait for an item in the queue with a timeout + recieveddata = frame_queue.get(timeout=1) # Waits for 1 second + logging.debug("Frame dequeued for processing.") + except queue.Empty: + logging.info(f"Queue is empty for {addr}, waiting for frames...") continue - recieveddata = frame_queue.get() frame = recieveddata['frame'] + logging.debug("Frame loaded for processing.") text = f"CLIENT: {addr}" - frame = ps.putBText(frame, text, 10, 10, vspace=10, hspace=1, font_scale=0.7, - background_RGB=(255, 0, 0), text_RGB=(255, 250, 250)) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (21, 21), 0) + logging.debug("Frame converted to grayscale and blurred.") if firstFrame is None: firstFrame = gray + logging.info("Initialized reference frame for motion detection.") continue - starting_processsing_time = time.time() + starting_processing_time = time.time() fps_frame_count += 1 elapsed_time = time.time() - fps_start_time if elapsed_time >= 10.0: fps = round(fps_frame_count / elapsed_time, 2) fps_count.set(fps) - print("FPS:", fps) + logging.info(f"FPS: {fps}") fps_frame_count = 0 fps_start_time = time.time() @@ -114,18 +141,16 @@ def process_frames(addr, frame_queue, or_host_ip, or_port): detected = False for c in cnts: if cv2.contourArea(c) < 10000: - #if detected: - # orclient_socket.close() - # detected = False continue - detected=True + detected = True detected_cnt += 1 + logging.debug(f"Motion detected. Contour area: {cv2.contourArea(c)}.") (x, y, w, h) = cv2.boundingRect(c) cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) text = "md: motion detected" - if detected: - print("Motion detected") + if detected: + logging.info("Motion detected, preparing to send frame to Object Recognizer.") md_detected_motion.set(1) orclient_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) orclient_socket.connect((or_host_ip, or_port)) @@ -138,42 +163,43 @@ def process_frames(addr, frame_queue, or_host_ip, or_port): try: a = pickle.dumps(ordata) message = struct.pack("Q", len(a)) + a - print("Sending frame to object_recognizer") + logging.info(f"Packet size: {len(message)} bytes.") # Log the size of the packet orclient_socket.sendall(message) + logging.info("Frame successfully sent to Object Recognizer.") break - except : - print("Sending frame to object_recognizer failed") + except Exception as e: + logging.error(f"Sending frame to Object Recognizer failed: {e}") time.sleep(1) continue orclient_socket.close() - else : - print("No motion detected") md_detected_motion.set(0) - processing_time.set(time.time() - starting_processsing_time) + processing_time.set(time.time() - starting_processing_time) + logging.debug("Processing time logged for frame.") + +def main(): + # Retrieve environment variables instead of command-line arguments + or_host_ip = os.getenv('OR_HOST', 'localhost') # Default to 'localhost' + or_port = int(os.getenv('OR_PORT', 9999)) # Default to 9999 -def main(or_host_ip=None, or_port=None): server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) host_name = socket.gethostname() - host_ip =socket.gethostbyname(host_name) - print('HOST IP:', host_ip) + host_ip = socket.gethostbyname(host_name) + logging.info(f'HOST IP: {host_ip}') port = 9998 socket_address = (host_ip, port) server_socket.bind(socket_address) server_socket.listen() - print("Listening at", socket_address) + logging.info(f"Listening at {socket_address}") while True: client_socket, addr = server_socket.accept() + logging.info(f"Accepted connection from {addr}.") frame_queue = queue.Queue() threading.Thread(target=receive_frames, args=(client_socket, frame_queue)).start() threading.Thread(target=process_frames, args=(addr, frame_queue, or_host_ip, or_port)).start() if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Motion detector") - parser.add_argument("--host", type=str, default="object_recognizer", help="Object Recognizer IP address") - parser.add_argument("--port", type=int, default=9999, help="Object Recognizer port number") - args = parser.parse_args() - main(or_host_ip=args.host, or_port=args.port) + main() diff --git a/motion_detector/tracerprovider.py b/services/motion_detector/src/tracerprovider.py similarity index 84% rename from motion_detector/tracerprovider.py rename to services/motion_detector/src/tracerprovider.py index 095d95d0028afd12fb65be60ae02586f7ec9caed..68cb24d89702fe33fa301c44edbc0531e7fc0d03 100644 --- a/motion_detector/tracerprovider.py +++ b/services/motion_detector/src/tracerprovider.py @@ -1,3 +1,4 @@ +import os import random from opentelemetry import trace, metrics @@ -10,12 +11,13 @@ from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.exporter.otlp.proto.grpc.metric_exporter import OTLPMetricExporter # Extract the file name without extension file_name = "Motion Detector " -try: - with open('/app/index.txt', 'r') as file: - index_value = int(file.read().strip()) -except (FileNotFoundError, ValueError) as e: - print(f"Error reading index.txt: {e}") - index_value = random.randint(1, 10) +# try: +# with open('../index.txt', 'r') as file: +# index_value = int(file.read().strip()) +# except (FileNotFoundError, ValueError) as e: +# print(f"Error reading index.txt: {e}") +# index_value = random.randint(1, 10) +index_value = os.environ.get("INDEX", str(random.randint(1, 10))) resource=Resource.create({SERVICE_NAME: f"{file_name}{index_value}"}) # Set up the tracer provider for tracing trace.set_tracer_provider( diff --git a/motion_detector/Dockerfile b/services/object_recognizer/Dockerfile similarity index 56% rename from motion_detector/Dockerfile rename to services/object_recognizer/Dockerfile index 4f17978432bdda3ea1c02e58911871929d837f46..62fd939e1897adad8b65df34206ae6370303e00a 100644 --- a/motion_detector/Dockerfile +++ b/services/object_recognizer/Dockerfile @@ -1,7 +1,5 @@ -# Dockerfile for server microservice - # Use the official Python image as base -FROM --platform=linux/amd64 python:3.8-slim as build +FROM --platform=linux/arm64 python:3.8-slim as build # Set the working directory inside the container WORKDIR /app @@ -12,8 +10,8 @@ COPY . . # Install required Python packages RUN pip install -r requirements.txt -# Expose the port used by the server (if needed) -# EXPOSE <port_number> +# Expose port 5000 +EXPOSE 5000 # Command to run the server code -CMD ["python", "motion_detection.py"] +CMD ["python", "src/object_recognizer.py"] diff --git a/services/object_recognizer/__init__.py b/services/object_recognizer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/services/object_recognizer/output/res.jpg b/services/object_recognizer/output/res.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d063447abd224208d2d26a0af9ac62951ba85cd2 Binary files /dev/null and b/services/object_recognizer/output/res.jpg differ diff --git a/object_recognizer/requirements.txt b/services/object_recognizer/requirements.txt similarity index 93% rename from object_recognizer/requirements.txt rename to services/object_recognizer/requirements.txt index 94a7282c4a5938495c667c38d1f36f0927fbe007..2628b9976f1bf091d34c6d4ca95cf2b597c8b57a 100644 --- a/object_recognizer/requirements.txt +++ b/services/object_recognizer/requirements.txt @@ -6,4 +6,5 @@ opentelemetry-sdk opentelemetry-exporter-jaeger opentelemetry-exporter-otlp opentelemetry-exporter-otlp-proto-grpc -docker \ No newline at end of file +docker +flask \ No newline at end of file diff --git a/services/object_recognizer/src/__init__.py b/services/object_recognizer/src/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/services/object_recognizer/src/model/model.py b/services/object_recognizer/src/model/model.py new file mode 100644 index 0000000000000000000000000000000000000000..c909a7054ad05f6e6c3219bb6902858f8def2e6b --- /dev/null +++ b/services/object_recognizer/src/model/model.py @@ -0,0 +1,88 @@ +import logging +import cv2 +import numpy as np +import os +def get_output_layers(net): + layer_names = net.getLayerNames() + try: + output_layers = [layer_names[i - 1] for i in net.getUnconnectedOutLayers()] + except Exception as e: + logging.error(f"Error getting output layers: {e}") + output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()] + return output_layers + +def draw_prediction(img, class_id, confidence, x, y, x_plus_w, y_plus_h, classes, COLORS): + try: + label = str(classes[class_id]) + logging.info(f"{label} detected with confidence {confidence:.2f}") + color = COLORS[class_id] + cv2.rectangle(img, (x, y), (x_plus_w, y_plus_h), color, 2) + cv2.putText(img, label, (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2) + framename = "output/res.jpg" + cv2.imwrite(framename, img) + logging.info(f"Frame saved with detection: {framename}") + except Exception as e: + logging.error(f"An error occurred while drawing prediction: {e}") + + +def recognize(image): + Width = image.shape[1] + Height = image.shape[0] + scale = 0.00392 + + classes = None + # Define the base path for the yolov3 files + base_path = "src/model/yolov3" + + # Use os.path.join to build the paths + yolov3_txt_path = os.path.join(base_path, "yolov3.txt") + yolov3_weights_path = os.path.join(base_path, "yolov3.weights") + yolov3_cfg_path = os.path.join(base_path, "yolov3.cfg") + + # Load the class names and network configuration + with open(yolov3_txt_path, 'r') as f: + classes = [line.strip() for line in f.readlines()] + + COLORS = np.random.uniform(0, 255, size=(len(classes), 3)) + + COLORS = np.random.uniform(0, 255, size=(len(classes), 3)) + net = cv2.dnn.readNet(yolov3_weights_path, yolov3_cfg_path) + blob = cv2.dnn.blobFromImage(image, scale, (416, 416), (0, 0, 0), True, crop=False) + net.setInput(blob) + outs = net.forward(get_output_layers(net)) + + class_ids = [] + confidences = [] + boxes = [] + conf_threshold = 0.5 + nms_threshold = 0.4 + + for out in outs: + for detection in out: + scores = detection[5:] + class_id = np.argmax(scores) + confidence = scores[class_id] + if confidence > 0.5: + center_x = int(detection[0] * Width) + center_y = int(detection[1] * Height) + w = int(detection[2] * Width) + h = int(detection[3] * Height) + x = center_x - w / 2 + y = center_y - h / 2 + class_ids.append(class_id) + confidences.append(float(confidence)) + boxes.append([x, y, w, h]) + + indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold, nms_threshold) + for i in indices: + try: + box = boxes[i] + except: + i = i[0] + box = boxes[i] + x = box[0] + y = box[1] + w = box[2] + h = box[3] + draw_prediction(image, class_ids[i], confidences[i], round(x), round(y), round(x + w), round(y + h), classes, + COLORS) diff --git a/object_recognizer/yolov3.cfg b/services/object_recognizer/src/model/yolov3/yolov3.cfg similarity index 100% rename from object_recognizer/yolov3.cfg rename to services/object_recognizer/src/model/yolov3/yolov3.cfg diff --git a/object_recognizer/yolov3.txt b/services/object_recognizer/src/model/yolov3/yolov3.txt similarity index 100% rename from object_recognizer/yolov3.txt rename to services/object_recognizer/src/model/yolov3/yolov3.txt diff --git a/object_recognizer/yolov3.weights b/services/object_recognizer/src/model/yolov3/yolov3.weights similarity index 100% rename from object_recognizer/yolov3.weights rename to services/object_recognizer/src/model/yolov3/yolov3.weights diff --git a/object_recognizer/yolov8n.pt b/services/object_recognizer/src/model/yolov3/yolov8n.pt similarity index 100% rename from object_recognizer/yolov8n.pt rename to services/object_recognizer/src/model/yolov3/yolov8n.pt diff --git a/services/object_recognizer/src/object_recognizer.py b/services/object_recognizer/src/object_recognizer.py new file mode 100644 index 0000000000000000000000000000000000000000..c906ddfc31ff879c43089430a93e807d7ba89449 --- /dev/null +++ b/services/object_recognizer/src/object_recognizer.py @@ -0,0 +1,199 @@ +import datetime +import socket +import time +import threading +import queue +import pickle +import struct +import cv2 +import numpy as np +import logging + +from model.model import recognize +from webserver import run_flask +from tracerprovider import tracer, meter + +# Configure logging +logging.basicConfig( + format='%(asctime)s - %(levelname)s - %(message)s', + level=logging.INFO, # Change to DEBUG for more detailed logs + handlers=[ + logging.StreamHandler(), # Log to console + logging.FileHandler("output/object_recognizer.log") # Log to a file + ] +) + + +# Function to detect objects in a frame +def get_output_layers(net): + layer_names = net.getLayerNames() + try: + output_layers = [layer_names[i - 1] for i in net.getUnconnectedOutLayers()] + except Exception as e: + logging.error(f"Error getting output layers: {e}") + output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()] + return output_layers + + +def draw_prediction(img, class_id, confidence, x, y, x_plus_w, y_plus_h, classes, COLORS): + try: + label = str(classes[class_id]) + logging.info(f"{label} detected with confidence {confidence:.2f}") + color = COLORS[class_id] + cv2.rectangle(img, (x, y), (x_plus_w, y_plus_h), color, 2) + cv2.putText(img, label, (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2) + framename = "output/res.jpg" + cv2.imwrite(framename, img) + logging.info(f"Frame saved with detection: {framename}") + except Exception as e: + logging.error(f"An error occurred while drawing prediction: {e}") + + +def process_frame(mddata): + recognize(mddata['frame']) + # frame = mddata['frame'] + # Width = frame.shape[1] + # Height = frame.shape[0] + # scale = 0.00392 + # + # classes = None + # with open("src/yolov3.txt", 'r') as f: + # classes = [line.strip() for line in f.readlines()] + # + # COLORS = np.random.uniform(0, 255, size=(len(classes), 3)) + # net = cv2.dnn.readNet("src/yolov3.weights", "src/yolov3.cfg") + # blob = cv2.dnn.blobFromImage(frame, scale, (416, 416), (0, 0, 0), True, crop=False) + # net.setInput(blob) + # outs = net.forward(get_output_layers(net)) + # + # class_ids = [] + # confidences = [] + # boxes = [] + # conf_threshold = 0.5 + # nms_threshold = 0.4 + # + # for out in outs: + # for detection in out: + # scores = detection[5:] + # class_id = np.argmax(scores) + # confidence = scores[class_id] + # if confidence > 0.5: + # center_x = int(detection[0] * Width) + # center_y = int(detection[1] * Height) + # w = int(detection[2] * Width) + # h = int(detection[3] * Height) + # x = center_x - w / 2 + # y = center_y - h / 2 + # class_ids.append(class_id) + # confidences.append(float(confidence)) + # boxes.append([x, y, w, h]) + # + # indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold, nms_threshold) + # for i in indices: + # try: + # box = boxes[i] + # except: + # i = i[0] + # box = boxes[i] + # x = box[0] + # y = box[1] + # w = box[2] + # h = box[3] + # draw_prediction(frame, class_ids[i], confidences[i], round(x), round(y), round(x + w), round(y + h), classes, + # COLORS) + +def receive_frames(client_socket, frame_queue): + data = b"" + payload_size = struct.calcsize("Q") + + while True: + try: + while len(data) < payload_size: + packet = client_socket.recv(4 * 1024) # 4KB + if not packet: + logging.warning("No more packets received, closing connection.") + return + data += packet + + packed_msg_size = data[:payload_size] + data = data[payload_size:] + msg_size = struct.unpack("Q", packed_msg_size)[0] + while len(data) < msg_size: + data += client_socket.recv(4 * 1024) + + frame_data = data[:msg_size] + data = data[msg_size:] + mddata = pickle.loads(frame_data) + frame_queue.put(mddata) + + len_q_gauge.set(frame_queue.qsize()) + transmission_time = datetime.datetime.now() - datetime.datetime.strptime(mddata['sentfromedgetime'], + "%Y-%m-%d %H:%M:%S") + transmission_time_in_seconds = transmission_time.total_seconds() + md_e2c_transmission_time.set(transmission_time_in_seconds) + logging.info(f"Frame received. Transmission time: {transmission_time_in_seconds:.2f} seconds") + + except Exception as e: + logging.error(f"Error receiving frame: {e}") + return + + +def handle_client(frame_queue): + while True: + try: + mddata = frame_queue.get(timeout=1) # Waits for 1 second + logging.debug(f"Processing frame from queue. Queue size: {frame_queue.qsize()}") + len_q_gauge.set(frame_queue.qsize()) + + starting_processing_time = datetime.datetime.now() + process_frame(mddata) + + processing_duration = (datetime.datetime.now() - starting_processing_time).total_seconds() + processing_time.set(processing_duration) + logging.info(f"Frame processed. Processing time: {processing_duration:.2f} seconds") + + response_time_duration = (datetime.datetime.now() - datetime.datetime.strptime(mddata['capture_time'], + "%Y-%m-%d %H:%M:%S")).total_seconds() + response_time.set(response_time_duration) + logging.info(f"Response time for frame: {response_time_duration:.2f} seconds") + + except queue.Empty: + logging.debug("Frame queue is empty, waiting for new frames...") + except Exception as e: + logging.error(f"Error processing frame: {e}") + + +# Setup server socket +server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) +host_name = socket.gethostname() +host_ip = socket.gethostbyname(host_name) +socket_address = (host_ip, 9999) +server_socket.bind(socket_address) +server_socket.listen() +logging.info(f"Server started, listening at {socket_address}") + +flask_thread = threading.Thread(target=run_flask) +flask_thread.daemon = True +flask_thread.start() + +len_q_gauge = meter.create_gauge(name="or_len_q", description="Queue length in object recognizer", unit="f") +processing_time = meter.create_gauge(name="or_processing_time", description="Processing time of one frame", unit="s") +md_e2c_transmission_time = meter.create_gauge(name="md_e2c_transmission_time", + description="Transmission time from edge to cloud", unit="s") +response_time = meter.create_gauge(name="response_time", description="Response time for frames", unit="s") + +client_threads = [] +frame_queue = queue.Queue() + +process_thread = threading.Thread(target=handle_client, args=(frame_queue,)) +process_thread.start() + +while True: + try: + client_socket, addr = server_socket.accept() + logging.info(f"New client connected: {addr}") + threading.Thread(target=receive_frames, args=(client_socket, frame_queue)).start() + except Exception as e: + logging.error(f"Error in server loop: {e}") + +server_socket.close() diff --git a/object_recognizer/tracerprovider.py b/services/object_recognizer/src/tracerprovider.py similarity index 81% rename from object_recognizer/tracerprovider.py rename to services/object_recognizer/src/tracerprovider.py index 486c58f14c1d4530adb012489b330f16e1cc9af5..011ef6fd94dcdd8e83d6d0e48c4062dad0b4b570 100644 --- a/object_recognizer/tracerprovider.py +++ b/services/object_recognizer/src/tracerprovider.py @@ -1,3 +1,4 @@ +import os import random from opentelemetry import trace, metrics @@ -10,9 +11,14 @@ from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.exporter.otlp.proto.grpc.metric_exporter import OTLPMetricExporter file_name = "Object Recognizer " -# Generate a random integer -random_int =""# random.randint(1, 10) -resource=Resource.create({SERVICE_NAME: f"{file_name}{random_int}"}) +# try: +# with open('../index.txt', 'r') as file: +# index_value = int(file.read().strip()) +# except (FileNotFoundError, ValueError) as e: +# print(f"Error reading index.txt: {e}") +# index_value = random.randint(1, 10) +index_value = os.environ.get("INDEX", str(random.randint(1, 10))) +resource=Resource.create({SERVICE_NAME: f"{file_name}{index_value}"}) # Set up the tracer provider for tracing trace.set_tracer_provider( TracerProvider( diff --git a/services/object_recognizer/src/webserver.py b/services/object_recognizer/src/webserver.py new file mode 100644 index 0000000000000000000000000000000000000000..37a550c9982c6a5e5bd02adc34f5f2a44635985f --- /dev/null +++ b/services/object_recognizer/src/webserver.py @@ -0,0 +1,30 @@ +from flask import Flask, send_file +import threading +import os +import time + +# Flask application setup +app = Flask(__name__) +IMAGE_PATH = "/app/output/res.jpg" + +# Disable browser caching by adding cache headers +@app.after_request +def add_no_cache_headers(response): + response.cache_control.no_store = True + return response + +@app.route("/") +def home(): + return "<h1>Object Recognizer</h1><p>Go to <a href='/image'>/image</a> to see the latest detection.</p>" + +@app.route("/image") +def serve_image(): + if os.path.exists(IMAGE_PATH): + return send_file(IMAGE_PATH, mimetype='image/jpeg') + else: + return "<h1>No image available</h1>", 404 + + +# Flask server runner +def run_flask(): + app.run(host="0.0.0.0", port=5000, debug=False, threaded=True) \ No newline at end of file diff --git a/usecase.png b/usecase.png deleted file mode 100644 index 782522569076a5187571bdf70815ecd6c59c6772..0000000000000000000000000000000000000000 Binary files a/usecase.png and /dev/null differ