Commit 2a8380bc authored by Juan Diego Gonzales's avatar Juan Diego Gonzales
Browse files

Update to 2019 R2

parent a1347dcc
FROM local/dockervino:demo as build
FROM local/dockervino:demo_R2 as build
ARG downloader=/opt/intel/openvino/deployment_tools/tools/model_downloader/downloader.py
ARG optimizer=/opt/intel/openvino/deployment_tools/model_optimizer/mo.py
......
# @(#) run with coherent naming
docker build --tag=local/dockervino:benchmark . #| tee openvinobuild.log
docker build --tag=local/dockervino:benchmark_R2 . #| tee openvinobuild.log
myimage=local/dockervino:benchmark
myimage=local/dockervino:benchmark_R2
myname=benchmark
###### Settings ########
......
# Debug Docker image after installation
# Image to debug on demo
FROM local/dockervino:demo
FROM local/dockervino:demo_R2
MAINTAINER jdg:juan-diego.gonzales-zuniga@kontron.com
RUN apt install -y x11-utils vim strace wget libtool autoconf unzip
......@@ -13,5 +13,5 @@ RUN wget https://github.com/libusb/libusb/archive/v1.0.22.zip && \
make -j4 && make install && \
rm -rf /tmp/*
WORKDIR /root/inference_engine_samples_build/intel64/Release
WORKDIR /root/omz_demos_build/intel64/Release
CMD ["/bin/bash"]
# @(#) run with coherent naming
docker build --tag=local/dockervino:debug . #| tee openvinobuild.log
docker build --tag=local/dockervino:debug_R2 . #| tee openvinobuild.log
#!/bin/bash
myimage=local/dockervino:debug
myimage=local/dockervino:debug_R2
myname=debug
#################################
......@@ -21,7 +21,7 @@ rm -f ./xauthority ; cp $XAUTHORITY ./xauthority ;chmod 666 ./xauthority #else r
#################################
# Init Openvino environment
setupvars=/opt/intel/openvino/bin/setupvars.sh
examples_dir=/root/inference_engine_samples_build/intel64/Release
examples_dir=/root/omz_demos_build/intel64/Release
models_dir=/root/openvino_models/ir
# Input: IPCam or USBCam
input=cam #input=http://94.214.173.241:8001/mjpg/video.mjpg
......@@ -33,7 +33,7 @@ input=cam #input=http://94.214.173.241:8001/mjpg/video.mjpg
####### CPU Pose Estimation ########
device=CPU
app=${examples_dir}/human_pose_estimation_demo
model=${models_dir}/Transportation/human_pose_estimation/mobilenet-v1/dldt/human-pose-estimation-0001.xml
model=${models_dir}/Transportation/human_pose_estimation/mobilenet-v1/dldt/FP32/human-pose-estimation-0001.xml
###### NCS2 Pose Estimation ######
#device=MYRIAD
......@@ -45,7 +45,8 @@ set -x
docker run \
-d \
-ti \
--mount type=bind,source="$(pwd)"/openvino-samples,target=/opt/intel/openvino/inference_engine/samples \
--mount type=bind,source="$(pwd)"/samples,target=/opt/intel/openvino/inference_engine/samples \
--mount type=bind,source="$(pwd)"/demos,target=/opt/intel/openvino/inference_engine/demos \
--privileged \
--net=host \
--env="setupvars=$setupvars" \
......
FROM local/dockervino:phase2 as phase2
FROM local/dockervino:phase3 as phase3
FROM local/dockervino:phase2_R2 as phase2
FROM local/dockervino:phase3_R2 as phase3
FROM scratch
COPY --from=phase2 / /
COPY --from=phase3 /root/openvino_models/ir /root/openvino_models/ir
......
# @(#) run with coherent naming
docker build --tag=local/dockervino:demo . #| tee openvinobuild.log
docker build --tag=local/dockervino:demo_R2 . #| tee openvinobuild.log
......@@ -10,7 +10,7 @@ echo "Application used: $app"
echo "Model used:$model"
if [ "$app" = "/root/inference_engine_samples_build/intel64/Release/interactive_face_detection_demo" ]; then
if [ "$app" = "/root/omz_demos_build/intel64/Release/interactive_face_detection_demo" ]; then
$app -i $input \
-d $device -m $model \
-d_ag $device -m_ag $agM \
......
#!/bin/bash
myimage=local/dockervino:demo
myname=dockervino
myimage=local/dockervino:demo_R2
myname=dockervinodemo
#################################
##### Display Parameters #####
......@@ -21,7 +21,7 @@ rm -f ./xauthority ; cp $XAUTHORITY ./xauthority ;chmod 666 ./xauthority #else r
#################################
# Init Openvino environment
setupvars=/opt/intel/openvino/bin/setupvars.sh
examples_dir=/root/inference_engine_samples_build/intel64/Release
examples_dir=/root/omz_demos_build/intel64/Release
models_dir=/root/openvino_models/ir
# Input: IPCam or USBCam
input=cam #input=http://94.214.173.241:8001/mjpg/video.mjpg
......@@ -33,49 +33,7 @@ input=cam #input=http://94.214.173.241:8001/mjpg/video.mjpg
###### CPU Object Detection ########
device=CPU
app=${examples_dir}/object_detection_demo_ssd_async
model=${models_dir}/ssd300/ssd300.xml
####### CPU Pose Estimation ########
#device=CPU
#app=${examples_dir}/human_pose_estimation_demo
#model=${models_dir}/Transportation/human_pose_estimation/mobilenet-v1/dldt/human-pose-estimation-0001.xml
###### CPU Face, Emotion, Age, Gender and Head Pose estimation #########
#device=CPU
#app=${examples_dir}/interactive_face_detection_demo
#model=${models_dir}/Retail/object_detection/face/sqnet1.0modif-ssd/0004/dldt/face-detection-retail-0004.xml
#agM=${models_dir}/Retail/object_attributes/age_gender/dldt/age-gender-recognition-retail-0013.xml
#emM=${models_dir}/Retail/object_attributes/emotions_recognition/0003/dldt/emotions-recognition-retail-0003.xml
#hpM=${models_dir}/Transportation/object_attributes/headpose/vanilla_cnn/dldt/head-pose-estimation-adas-0001.xml
###### CPU YOLO V3 ########
#device=CPU
#app=${examples_dir}/object_detection_demo_yolov3_async
#model=/root/openvino_models/ir/yolo/yolo_v3.xml
###### NCS2 Object Detection ########
#device=MYRIAD
#app=${examples_dir}/object_detection_demo_ssd_async
#model=${models_dir}/ssd300/ssd300-fp16.xml
###### NCS2 Pose Estimation ######
#device=MYRIAD
#app=${examples_dir}/human_pose_estimation_demo
#model=${models_dir}/Transportation/human_pose_estimation/mobilenet-v1/dldt/human-pose-estimation-0001-fp16.xml
###### NCS2 Face, Emotion, Age, Gender and Head Pose estimation #########
#device=MYRIAD
#app=${examples_dir}/interactive_face_detection_demo
#model=${models_dir}/Retail/object_detection/face/sqnet1.0modif-ssd/0004/dldt/face-detection-retail-0004-fp16.xml
#agM=${models_dir}/Retail/object_attributes/age_gender/dldt/age-gender-recognition-retail-0013-fp16.xml
#emM=${models_dir}/Retail/object_attributes/emotions_recognition/0003/dldt/emotions-recognition-retail-0003-fp16.xml
#hpM=${models_dir}/Transportation/object_attributes/headpose/vanilla_cnn/dldt/head-pose-estimation-adas-0001-fp16.xml
###### NCS2 YOLO V3 ########
#device=MYRIAD
#app=${examples_dir}/object_detection_demo_yolov3_async
#model=/root/openvino_models/ir/yolo/yolo_v3-fp16.xml
model=${models_dir}/ssd300/FP32/ssd300.xml
# Input and Log options
opts="--entrypoint=/entrypoint.sh"
......
......@@ -4,7 +4,7 @@
FROM ubuntu:xenial
MAINTAINER jdg:juan-diego.gonzales-zuniga@kontron.com
ARG openvinoTar=l_openvino_toolkit_p_2019.1.144
ARG openvinoTar=l_openvino_toolkit_p_2019.2.242
ARG INSTALL_DIR=/opt/intel/openvino
ARG TEMP_DIR=/tmp/openvino_installer
RUN mkdir -p $TEMP_DIR
......@@ -41,8 +41,9 @@ RUN bash $INSTALL_DIR/install_dependencies/install_openvino_dependencies.sh
# build Inference Engine Samples
RUN $INSTALL_DIR/deployment_tools/inference_engine/samples/build_samples.sh
RUN $INSTALL_DIR/deployment_tools/inference_engine/demos/build_demos.sh
# install model_optimizer requisites
# install model_optimizer requisites needs tf 1.15 for kontron
RUN sed -i 's/>=1.2.0/==1.5.0/g' $INSTALL_DIR/deployment_tools/model_optimizer/requirements.txt
#RUN sed -i 's/>=1.2.0/==1.5.0/g' $INSTALL_DIR/deployment_tools/model_optimizer/requirements_tf.txt
RUN $INSTALL_DIR/deployment_tools/model_optimizer/install_prerequisites/install_prerequisites.sh
......
# @(#) run with coherent naming
docker build --tag=local/dockervino:phase1 . #| tee openvinobuild.log
docker build --tag=local/dockervino:phase1_R2 . #| tee openvinobuild.log
myimage=local/dockervino:phase1
myimage=local/dockervino:phase1_R2
myname=dockervinoph1
set -x
......
# Compilation of examples for openVino
# Compilation demo_squeezenet_download_conver_run and security_barrier_camera
FROM local/dockervino:phase1 as build
FROM local/dockervino:phase1_R2 as build
MAINTAINER jdg:juan-diego.gonzales-zuniga@kontron.com
ARG INSTALL_DIR=/opt/intel/openvino
WORKDIR $INSTALL_DIR/deployment_tools/demo
......
# @(#) run with coherent naming
docker build --tag=local/dockervino:phase2 . #| tee openvinobuild.log
docker build --tag=local/dockervino:phase2_R2 . #| tee openvinobuild.log
myimage=local/dockervino:phase2
#!/bin/bash
myimage=local/dockervino:phase2_R2
myname=dockervinoph2
set -x
......
# Docker image after installation
# Downloads model ssd300 and creates yolov3
FROM local/dockervino:phase2
FROM local/dockervino:phase2_R2
MAINTAINER jdg:juan-diego.gonzales-zuniga@kontron.com
ARG INSTALL_DIR=/opt/intel/openvino
ARG downloader=$INSTALL_DIR/deployment_tools/tools/model_downloader/downloader.py
......@@ -13,32 +13,27 @@ RUN python3 $downloader --name ssd300 --output_dir $models
RUN python3 $optimizer/mo.py \
--input_model $models/object_detection/common/ssd/300/caffe/models/VGGNet/VOC0712Plus/SSD_300x300_ft/VGG_VOC0712Plus_SSD_300x300_ft_iter_160000.caffemodel \
--input_proto $models/object_detection/common/ssd/300/caffe/models/VGGNet/VOC0712Plus/SSD_300x300_ft/deploy.prototxt \
--output_dir $models/ir/ssd300 --model_name ssd300 --data_type FP32
--output_dir $models/ir/ssd300/FP32 --model_name ssd300 --data_type FP32
RUN python3 $optimizer/mo.py \
--input_model $models/object_detection/common/ssd/300/caffe/models/VGGNet/VOC0712Plus/SSD_300x300_ft/VGG_VOC0712Plus_SSD_300x300_ft_iter_160000.caffemodel \
--input_proto $models/object_detection/common/ssd/300/caffe/models/VGGNet/VOC0712Plus/SSD_300x300_ft/deploy.prototxt \
--output_dir $models/ir/ssd300 --model_name ssd300-fp16 --data_type FP16
--output_dir $models/ir/ssd300/FP16 --model_name ssd300 --data_type FP16
# Downloading Human Pose Detection
RUN python3 $downloader --name human-pose-estimation-0001 --output_dir $models/ir
RUN python3 $downloader --name human-pose-estimation-0001-fp16 --output_dir $models/ir
# Downloading Face Detection
RUN python3 $downloader --name face-detection-retail-0004 --output_dir $models/ir
RUN python3 $downloader --name face-detection-retail-0004-fp16 --output_dir $models/ir
# Downloading Age gender Recognition
RUN python3 $downloader --name age-gender-recognition-retail-0013 --output_dir $models/ir
RUN python3 $downloader --name age-gender-recognition-retail-0013-fp16 --output_dir $models/ir
# Downloaing Emotion Recognition
RUN python3 $downloader --name emotions-recognition-retail-0003 --output_dir $models/ir
RUN python3 $downloader --name emotions-recognition-retail-0003-fp16 --output_dir $models/ir
# Downloading Head Pose Estimation
RUN python3 $downloader --name head-pose-estimation-adas-0001 --output_dir $models/ir
RUN python3 $downloader --name head-pose-estimation-adas-0001-fp16 --output_dir $models/ir
WORKDIR $models
# Clone Yolo V3 tensorflow
......@@ -60,15 +55,15 @@ RUN python3 $optimizer/mo_tf.py \
--input_model $models/tensorflow-yolo-v3/yolo_v3.pb \
--tensorflow_use_custom_operations_config $models/tensorflow-yolo-v3/yolo_v3.json \
--input_shape [1,416,416,3] \
--output_dir $models/ir/yolo \
--output_dir $models/ir/yolo/FP32 \
--model_name yolo_v3 \
--data_type FP32
RUN python3 $optimizer/mo_tf.py \
--input_model $models/tensorflow-yolo-v3/yolo_v3.pb \
--tensorflow_use_custom_operations_config $models/tensorflow-yolo-v3/yolo_v3.json \
--input_shape [1,416,416,3] \
--output_dir $models/ir/yolo \
--model_name yolo_v3-fp16 \
--output_dir $models/ir/yolo/FP16 \
--model_name yolo_v3 \
--data_type FP16
# Optimizer on Yolov3Tiny
......@@ -77,15 +72,35 @@ RUN python3 $optimizer/mo_tf.py \
--input_model $models/tensorflow-yolo-v3/yolo_v3_tiny.pb \
--tensorflow_use_custom_operations_config $models/tensorflow-yolo-v3/yolo_v3_tiny.json \
--input_shape [1,416,416,3] \
--output_dir $models/ir/yolo \
--output_dir $models/ir/yolo/FP32 \
--model_name yolo_v3_tiny \
--data_type FP32
RUN python3 $optimizer/mo_tf.py \
--input_model $models/tensorflow-yolo-v3/yolo_v3_tiny.pb \
--tensorflow_use_custom_operations_config $models/tensorflow-yolo-v3/yolo_v3_tiny.json \
--input_shape [1,416,416,3] \
--output_dir $models/ir/yolo \
--model_name yolo_v3_tiny-fp16 \
--output_dir $models/ir/yolo/FP16 \
--model_name yolo_v3_tiny \
--data_type FP16
WORKDIR $models
# Download smallest maskrcnn
RUN wget http://download.tensorflow.org/models/object_detection/mask_rcnn_inception_v2_coco_2018_01_28.tar.gz
RUN tar -xzf mask_rcnn_inception_v2_coco_2018_01_28.tar.gz
# Optimizer on maskrcnn
RUN python3 $optimizer/mo_tf.py \
--input_model $models/mask_rcnn_inception_v2_coco_2018_01_28/frozen_inference_graph.pb \
--tensorflow_use_custom_operations_config /opt/intel/openvino/deployment_tools/model_optimizer/extensions/front/tf/mask_rcnn_support.json \
--tensorflow_object_detection_api_pipeline $models/mask_rcnn_inception_v2_coco_2018_01_28/pipeline.config \
--output_dir $models/ir/mask_rcnn/FP32 \
--model_name mask_rcnn_inception_v2 \
--data_type FP32 --reverse_input_channels
RUN python3 $optimizer/mo_tf.py \
--input_model $models/mask_rcnn_inception_v2_coco_2018_01_28/frozen_inference_graph.pb \
--tensorflow_use_custom_operations_config /opt/intel/openvino/deployment_tools/model_optimizer/extensions/front/tf/mask_rcnn_support.json \
--tensorflow_object_detection_api_pipeline $models/mask_rcnn_inception_v2_coco_2018_01_28/pipeline.config \
--output_dir $models/ir/mask_rcnn/FP16 \
--model_name mask_rcnn_inception_v2 \
--data_type FP16 --reverse_input_channels
CMD ["/bin/bash"]
# @(#) run with coherent naming
docker build --tag=local/dockervino:phase3 . #| tee openvinobuild.log
docker build --tag=local/dockervino:phase3_R2 . #| tee openvinobuild.log
myimage=local/dockervino:phase3
#!/bin/bash
myimage=local/dockervino:phase3_R2
myname=dockervinoph3
set -x
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment