Commit 58ed7756 authored by Juan Diego's avatar Juan Diego
Browse files

Update detection with new openvino release

parent 12859742
......@@ -14,6 +14,8 @@ if [ "$app" = "/root/omz_demos_build/intel64/Release/interactive_face_detection_
$app -i $input -d $device -m $model -d_ag $device -m_ag $agM -d_em $device -m_em $emM -d_hp $device -m_hp $hpM
elif [ "$app" = "/root/omz_demos_build/intel64/Release/human_pose_estimation_demo" ]; then
$app -i $input -at openpose -d $device -m $model -send $send -broker $broker -client $client
elif [ "$app" = "/root/omz_demos_build/intel64/Release/object_detection_demo" ]; then
$app -i $input -at $detection -d $device -m $model
elif [ "$app" = "/root/omz_demos_build/intel64/Release/pedestrian_tracker_demo" ]; then
$app -i $input -d_det $device -d_reid $device -m_det $m_det -m_reid $m_reid -send $send -broker $broker -client $client
elif [ "$app" = "/opt/intel/openvino/deployment_tools/open_model_zoo/demos/human_pose_estimation_3d_demo/python/human_pose_estimation_3d_demo.py" ]; then
......
......@@ -27,16 +27,16 @@ models_dir=/root/openvino_models/ir
input=/videos/test.mp4
# device: CPU or MYRIAD
device=CPU
detection=ssd
app=${examples_dir}/object_detection_demo
####### CPU Object Detection ###
if [ "$device" == CPU ]; then
app=${examples_dir}/object_detection_demo_ssd_async
model=${models_dir}/ssd300/FP32/ssd300.xml
model=${models_dir}/public/ssd_mobilenet_v1_coco/FP32/ssd_mobilenet_v1_coco.xml
fi
###### NCS2 Object Detection ####
if [ "$device" == MYRIAD ]; then
app=${examples_dir}/object_detection_demo_ssd_async
model=${models_dir}/ssd300/FP16/ssd300.xml
model=${models_dir}/public/ssd_mobilenet_v1_coco/FP16/ssd_mobilenet_v1_coco.xml
fi
opts="--entrypoint=/entrypoint.sh"
......@@ -47,13 +47,13 @@ docker run $opts \
-d \
-ti \
--mount type=bind,source="$(pwd)"/videos,target=/videos \
--privileged \
--net=host \
--env="setupvars=$setupvars" \
--env="DISPLAY=$display" \
--env="input=$input" \
--env="device=$device" \
--env="app=$app" \
--env="detection=$detection" \
--env="model=$model" \
--env="m_det=$m_det" \
--env="m_reid=$m_reid" \
......@@ -63,8 +63,7 @@ docker run $opts \
--env="agM=$agM" \
--env="emM=$emM" \
--env="hpM=$hpM" \
--device="/dev/video0:/dev/video0" \
--volume="/dev:/dev" \
--device="/dev/dri:/dev/dri" \
--volume="/tmp/.X11-unix:/tmp/.X11-unix" \
--env="PS1=$myname> "\
--publish-all=true \
......
......@@ -27,16 +27,16 @@ models_dir=/root/openvino_models/ir
input=/videos/test.mp4
# device: CPU or MYRIAD
device=CPU
detection=yolo
app=${examples_dir}/object_detection_demo
###### CPU YOLO V3 ########
####### CPU Object Detection ###
if [ "$device" == CPU ]; then
app=${examples_dir}/object_detection_demo_yolov3_async
model=${models_dir}/yolo/FP32/yolo_v3.xml
model=${models_dir}/public/yolo-v3-tiny-tf/FP32/yolo-v3-tiny-tf.xml
fi
###### NCS2 YOLO V3 ########
###### NCS2 Object Detection ####
if [ "$device" == MYRIAD ]; then
app=${examples_dir}/object_detection_demo_yolov3_async
model=${models_dir}/yolo/FP16/yolo_v3.xml
model=${models_dir}/public/yolo-v3-tiny-tf/FP16/yolo-v3-tiny-tf.xml
fi
opts="--entrypoint=/entrypoint.sh"
......@@ -47,13 +47,13 @@ docker run $opts \
-d \
-ti \
--mount type=bind,source="$(pwd)"/videos,target=/videos \
--privileged \
--net=host \
--env="setupvars=$setupvars" \
--env="DISPLAY=$display" \
--env="input=$input" \
--env="device=$device" \
--env="app=$app" \
--env="detection=$detection" \
--env="model=$model" \
--env="m_det=$m_det" \
--env="m_reid=$m_reid" \
......@@ -63,8 +63,7 @@ docker run $opts \
--env="agM=$agM" \
--env="emM=$emM" \
--env="hpM=$hpM" \
--device="/dev/video0:/dev/video0" \
--volume="/dev:/dev" \
--device="/dev/dri:/dev/dri" \
--volume="/tmp/.X11-unix:/tmp/.X11-unix" \
--env="PS1=$myname> "\
--publish-all=true \
......
......@@ -31,65 +31,21 @@ RUN python3 $downloader --name emotions-recognition-retail-0003 --output_dir $mo
# Downloading Head Pose Estimation
RUN python3 $downloader --name head-pose-estimation-adas-0001 --output_dir $models/ir
# Download tracker networks
# Downloadind tracker networks
RUN $downloader --name person-detection-retail-0013 --output_dir $models/ir
RUN $downloader --name person-reidentification-retail-0277 --output_dir $models/ir
# Downloading SSD Detection
RUN python3 $downloader --name ssd300 --output_dir $models
RUN python3 $optimizer \
--input_model $models/public/ssd300/models/VGGNet/VOC0712Plus/SSD_300x300_ft/VGG_VOC0712Plus_SSD_300x300_ft_iter_160000.caffemodel \
--input_proto $models/public/ssd300/models/VGGNet/VOC0712Plus/SSD_300x300_ft/deploy.prototxt \
--output_dir $models/ir/ssd300/FP32 --model_name ssd300 --data_type FP32
RUN python3 $optimizer \
--input_model $models/public/ssd300/models/VGGNet/VOC0712Plus/SSD_300x300_ft/VGG_VOC0712Plus_SSD_300x300_ft_iter_160000.caffemodel \
--input_proto $models/public/ssd300/models/VGGNet/VOC0712Plus/SSD_300x300_ft/deploy.prototxt \
--output_dir $models/ir/ssd300/FP16 --model_name ssd300 --data_type FP16
# Downloading Human Pose Detection 3D
RUN python3 $downloader --list $tools/open_model_zoo/demos/human_pose_estimation_3d_demo/python/models.lst
RUN python3 $converter --list $tools/open_model_zoo/demos/human_pose_estimation_3d_demo/python/models.lst --o $models/ir --mo $optimizer
RUN python3 $downloader --list $tools/open_model_zoo/demos/human_pose_estimation_3d_demo/python/models.lst
RUN python3 $converter --list $tools/open_model_zoo/demos/human_pose_estimation_3d_demo/python/models.lst --output_dir $models/ir --mo $optimizer
# Download Yolo v3
RUN python3 $downloader --name yolo-v3-tf
#wget https://download.01.org/opencv/public_models/022020/yolo_v3/yolov3.pb && \
#wget https://download.01.org/opencv/public_models/022020/yolo_v3/yolo_v3_new.json
# Optimizer on Yolov3
RUN python3 $optimizer \
--input_model $models/public/yolo-v3-tf/yolo-v3.pb \
--transformations_config $models/public/yolo-v3-tf/yolo-v3.json \
--input_shape [1,416,416,3] \
--output_dir $models/ir/yolo/FP32 \
--model_name yolo_v3 \
--data_type FP32
RUN python3 $optimizer \
--input_model $models/public/yolo-v3-tf/yolo-v3.pb \
--transformations_config $models/public/yolo-v3-tf/yolo-v3.json \
--input_shape [1,416,416,3] \
--output_dir $models/ir/yolo/FP16 \
--model_name yolo_v3 \
--data_type FP16
# Downloading SSD Detection
RUN $downloader --name ssd_mobilenet_v1_coco --output_dir $models
RUN $converter --name ssd_mobilenet_v1_coco -d $models --output_dir $models/ir
WORKDIR $models
# Download smallest maskrcnn
RUN python3 $downloader --name mask_rcnn_inception_v2_coco
# Optimizer on maskrcnn
RUN python3 $optimizer \
--input_model $models/public/mask_rcnn_inception_v2_coco/mask_rcnn_inception_v2_coco_2018_01_28/frozen_inference_graph.pb \
--transformations_config $tools/model_optimizer/extensions/front/tf/mask_rcnn_support.json \
--tensorflow_object_detection_api_pipeline $models/public/mask_rcnn_inception_v2_coco/mask_rcnn_inception_v2_coco_2018_01_28/pipeline.config \
--output_dir $models/ir/mask_rcnn/FP32 \
--model_name mask_rcnn_inception_v2 \
--data_type FP32 --reverse_input_channels
RUN python3 $optimizer \
--input_model $models/public/mask_rcnn_inception_v2_coco/mask_rcnn_inception_v2_coco_2018_01_28/frozen_inference_graph.pb \
--transformations_config $tools/model_optimizer/extensions/front/tf/mask_rcnn_support.json \
--tensorflow_object_detection_api_pipeline $models/public/mask_rcnn_inception_v2_coco/mask_rcnn_inception_v2_coco_2018_01_28/pipeline.config \
--output_dir $models/ir/mask_rcnn/FP16 \
--model_name mask_rcnn_inception_v2 \
--data_type FP16 --reverse_input_channels
# Downloading Yolo v3 Detection
RUN $downloader --name yolo-v3-tiny-tf --output_dir $models
RUN $converter --name yolo-v3-tiny-tf -d $models --output_dir $models/ir
# Download Instance Segmentation
RUN python3 $downloader --name instance-segmentation-security-0228 --output_dir $models/ir
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment