Commit 50e771fd authored by Juan Diego's avatar Juan Diego
Browse files

Updated to work with openVINO 2019.1.094

-Phase1: update directories, python 3.6 installed
-Phase2: update directories
-Phase3: update downloads and optimizers
-demo: update entrypoint.sh and runcmd.sh
parent 80ef7768
......@@ -5,12 +5,17 @@ source $setupvars
### If face detection then apply MYRIAD DEVICE
# app, device, model and input are defined in the runcmd file
echo "Device used: $device"
echo "Application used: $app"
echo "Model used:$model"
if [ "$app" = "/root/inference_engine_samples/intel64/Release/interactive_face_detection_demo" ]; then
$app -d $device -m $faceM -d_ag $device -m_ag $ageM -d_em $device -m_em $emM -d_hp $device -m_hp $headM
if [ "$app" = "/root/inference_engine_samples_build/intel64/Release/interactive_face_detection_demo" ]; then
$app -i $input \
-d $device -m $model \
-d_ag $device -m_ag $agM \
-d_em $device -m_em $emM \
-d_hp $device -m_hp $hpM
else
echo "Device used $device"
echo "Application used $app"
echo "Model used $model"
$app -d $device -m $model -i $input
$app -i $input -d $device -m $model
fi
......@@ -20,9 +20,9 @@ rm -f ./xauthority ; cp $XAUTHORITY ./xauthority ;chmod 666 ./xauthority #else r
##### General Parameters ######
#################################
# Init Openvino environment
setupvars=/opt/intel/computer_vision_sdk/bin/setupvars.sh
examples_dir=/root/inference_engine_samples/intel64/Release
intel_models_dir=/opt/intel/computer_vision_sdk/deployment_tools/intel_models
setupvars=/opt/intel/openvino/bin/setupvars.sh
examples_dir=/root/inference_engine_samples_build/intel64/Release
models_dir=/root/openvino_models/ir
# Input: IPCam or USBCam
input=cam #input=http://94.214.173.241:8001/mjpg/video.mjpg
......@@ -31,52 +31,53 @@ input=cam #input=http://94.214.173.241:8001/mjpg/video.mjpg
#################################
###### CPU Object Detection ########
device=CPU
app=${examples_dir}/object_detection_demo_ssd_async
model=/root/openvino_models/ir/ssd300/FP32/ssd300.xml
#device=CPU
#app=${examples_dir}/object_detection_demo_ssd_async
#model=${models_dir}/ssd300/ssd300.xml
####### CPU Pose Estimation ########
# device=CPU
# app=${examples_dir}/human_pose_estimation_demo
# model=${intel_models_dir}/human-pose-estimation-0001/FP32/human-pose-estimation-0001.xml
#device=CPU
#app=${examples_dir}/human_pose_estimation_demo
#model=${models_dir}/Transportation/human_pose_estimation/mobilenet-v1/dldt/human-pose-estimation-0001.xml
###### CPU Face, Emotion, Age, Gender and Head Pose estimation #########
#device=CPU
#app=${examples_dir}/interactive_face_detection_demo
#faceM=${intel_models_dir}/face-detection-retail-0004/FP32/face-detection-retail-0004.xml
#ageM=${intel_models_dir}/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml
#emM=${intel_models_dir}/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml
#headM=${intel_models_dir}/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml
#faceM=${models_dir}/Retail/object_detection/face/sqnet1.0modif-ssd/0004/dldt/face-detection-retail-0004.xml
#agM=${models_dir}/Retail/object_attributes/age_gender/dldt/age-gender-recognition-retail-0013.xml
#emM=${models_dir}/Retail/object_attributes/emotions_recognition/0003/dldt/emotions-recognition-retail-0003.xml
#hpM=${models_dir}/Transportation/object_attributes/headpose/vanilla_cnn/dldt/head-pose-estimation-adas-0001.xml
###### CPU YOLO V3 ########
#device=CPU
#app=${examples_dir}/object_detection_demo_yolov3_async
#model=/root/openvino_models/ir/yolo/v3/tf/FP32/yolo_v3.xml
#model=/root/openvino_models/ir/yolo/yolo_v3.xml
###### NCS2 Object Detection ########
#device=MYRIAD
#app=${examples_dir}/object_detection_demo_ssd_async
#model=/root/openvino_models/ir/ssd300/FP16/ssd300.xml
#model=${models_dir}/ssd300/ssd300-fp16.xml
###### NCS2 Pose Estimation ######
#device=MYRIAD
#app=${examples_dir}/human_pose_estimation_demo
#model=${intel_models_dir}/human-pose-estimation-0001/FP16/human-pose-estimation-0001.xml
device=MYRIAD
app=${examples_dir}/human_pose_estimation_demo
model=${models_dir}/Transportation/human_pose_estimation/mobilenet-v1/dldt/human-pose-estimation-0001-fp16.xml
###### NCS2 Face, Emotion, Age, Gender and Head Pose estimation #########
#device=MYRIAD
#app=${examples_dir}/interactive_face_detection_demo
#faceM=${intel_models_dir}/face-detection-retail-0004/FP16/face-detection-retail-0004.xml
#ageM=${intel_models_dir}/age-gender-recognition-retail-0013/FP16/age-gender-recognition-retail-0013.xml
#emM=${intel_models_dir}/emotions-recognition-retail-0003/FP16/emotions-recognition-retail-0003.xml
#headM=${intel_models_dir}/head-pose-estimation-adas-0001/FP16/head-pose-estimation-adas-0001.xml
#model=${models_dir}/Retail/object_detection/face/sqnet1.0modif-ssd/0004/dldt/face-detection-retail-0004-fp16.xml
#agM=${models_dir}/Retail/object_attributes/age_gender/dldt/age-gender-recognition-retail-0013-fp16.xml
#emM=${models_dir}/Retail/object_attributes/emotions_recognition/0003/dldt/emotions-recognition-retail-0003-fp16.xml
#hpM=${models_dir}/Transportation/object_attributes/headpose/vanilla_cnn/dldt/head-pose-estimation-adas-0001-fp16.xml
###### NCS2 YOLO V3 ########
#device=MYRIAD
#app=${examples_dir}/object_detection_demo_yolov3_async
#model=/root/openvino_models/ir/yolo/v3/tf/FP16/yolo_v3.xml
#model=/root/openvino_models/ir/yolo/yolo_v3-fp16.xml
# Input and Log options
opts="--entrypoint=/entrypoint.sh"
#opts="-ti --cap-add SYS_PTRACE"
......@@ -93,10 +94,9 @@ docker run $opts \
--env="device=$device" \
--env="app=$app" \
--env="model=$model" \
--env="faceM=$faceM" \
--env="ageM=$ageM" \
--env="agM=$agM" \
--env="emM=$emM" \
--env="headM=$headM" \
--env="hpM=$hpM" \
--device="/dev/video0:/dev/video0" \
--volume="/dev:/dev" \
--volume="/tmp/.X11-unix:/tmp/.X11-unix" \
......
......@@ -4,31 +4,47 @@
FROM ubuntu:xenial
MAINTAINER jdg:juan-diego.gonzales-zuniga@kontron.com
ARG openvinoTar=l_openvino_toolkit_p_2018.5.455
ARG openvinoTar=l_openvino_toolkit_p_2019.1.094
ARG INSTALL_DIR=/opt/intel/openvino
ARG TEMP_DIR=/tmp/openvino_installer
RUN mkdir -p $TEMP_DIR
WORKDIR $TEMP_DIR
COPY ./$openvinoTar.tgz $TEMP_DIR
RUN apt-get update && apt-get -y upgrade && apt-get autoremove
RUN mkdir -p /home/openvino
WORKDIR /home/openvino
RUN apt-get update
RUN apt-get install -y python3.5
COPY ./$openvinoTar.tgz /home/openvino
RUN tar -zxf $openvinoTar.tgz
WORKDIR $openvinoTar
RUN sed -i 's/sudo -E//g' ./install_cv_sdk_dependencies.sh
RUN sed -i 's/apt install -y/apt install -y cpio/g' ./install_cv_sdk_dependencies.sh
RUN ./install_cv_sdk_dependencies.sh
RUN sed -i 's/ACCEPT_EULA=decline/ACCEPT_EULA=accept/g' ./silent.cfg
RUN sed -i 's/#INTEL_SW_IMPROVEMENT_PROGRAM_CONSENT=no/INTEL_SW_IMPROVEMENT_PROGRAM_CONSENT=no/g' ./silent.cfg
RUN ./install.sh --silent silent.cfg
RUN sed -i 's/$(lsb_release -r -s)/"16.04"/g' /opt/intel/computer_vision_sdk/bin/setupvars.sh
RUN /opt/intel/computer_vision_sdk/bin/setupvars.sh
RUN echo "source /opt/intel/computer_vision_sdk/bin/setupvars.sh" >> /root/.bashrc
RUN sed -i 's/sudo -E//g' /opt/intel/computer_vision_sdk/deployment_tools/model_optimizer/install_prerequisites/install_prerequisites.sh
RUN sed -i 's/>=1.2.0/==1.5.0/g' /opt/intel/computer_vision_sdk/deployment_tools/model_optimizer/requirements.txt
RUN sed -i 's/>=1.2.0/==1.5.0/g' /opt/intel/computer_vision_sdk/deployment_tools/model_optimizer/requirements_tf.txt
RUN /opt/intel/computer_vision_sdk/deployment_tools/model_optimizer/install_prerequisites/install_prerequisites.sh
WORKDIR /home/openvino
RUN rm ./$openvinoTar.tgz
RUN rm -r $openvinoTar
# installing needed dependencies
RUN apt-get install -y --no-install-recommends \
build-essential \
cpio \
curl \
lsb-release \
pciutils \
software-properties-common \
sudo && \
sudo add-apt-repository ppa:jonathonf/python-3.6 && \
apt-get update && apt-get install -y python3.6 python3.6-dev && \
curl https://bootstrap.pypa.io/get-pip.py | sudo -H python3.6 && \
rm /usr/bin/python3 && \
sudo ln -s python3.6 /usr/bin/python3 && \
rm -rf /var/lib/apt/lists/*
# installing openVINO itself
RUN tar xf $openvinoTar.tgz && cd $openvinoTar && \
sed -i 's/decline/accept/g' silent.cfg && \
./install.sh -s silent.cfg && \
rm -rf $TEMP_DIR
ENTRYPOINT /bin/sh
CMD /bin/sh
# installing openVINO dependencies
RUN bash $INSTALL_DIR/install_dependencies/install_openvino_dependencies.sh
# build Inference Engine Samples
RUN $INSTALL_DIR/deployment_tools/inference_engine/samples/build_samples.sh
# install model_optimizer requisites
RUN sed -i 's/>=1.2.0/==1.5.0/g' $INSTALL_DIR/deployment_tools/model_optimizer/requirements.txt
#RUN sed -i 's/>=1.2.0/==1.5.0/g' $INSTALL_DIR/deployment_tools/model_optimizer/requirements_tf.txt
RUN $INSTALL_DIR/deployment_tools/model_optimizer/install_prerequisites/install_prerequisites.sh
# inti openvino env
RUN echo "source $INSTALL_DIR/bin/setupvars.sh" >> /root/.bashrc
CMD ["/bin/bash"]
myimage=local/dockervino:phase1
myname=dockervino
myname=dockervinoph1
set -x
docker run \
......
......@@ -3,16 +3,10 @@
FROM local/dockervino:phase1 as build
MAINTAINER jdg:juan-diego.gonzales-zuniga@kontron.com
WORKDIR /opt/intel/computer_vision_sdk/deployment_tools/demo
RUN sed -i 's/sudo -E//g' /opt/intel/computer_vision_sdk/deployment_tools/demo/demo_squeezenet_download_convert_run.sh
RUN sed -i 's/source "/"/g' /opt/intel/computer_vision_sdk/deployment_tools/demo/demo_squeezenet_download_convert_run.sh
RUN sed -i 's:"install_prerequisites.sh":"./install_prerequisites.sh":g' /opt/intel/computer_vision_sdk/deployment_tools/demo/demo_squeezenet_download_convert_run.sh
RUN sed -i 's:make -j8 classification_sample:make -j8:g' /opt/intel/computer_vision_sdk/deployment_tools/demo/demo_squeezenet_download_convert_run.sh
RUN /opt/intel/computer_vision_sdk/deployment_tools/demo/demo_squeezenet_download_convert_run.sh
RUN sed -i 's/sudo -E//g' /opt/intel/computer_vision_sdk/deployment_tools/demo/demo_security_barrier_camera.sh
RUN rm -r /root/openvino_models/models
FROM scratch
COPY --from=build / /
ENTRYPOINT /bin/sh
CMD /bin/sh
ARG INSTALL_DIR=/opt/intel/openvino
WORKDIR $INSTALL_DIR/deployment_tools/demo
#RUN sed -i 's/source "/"/g' $INSTALL_DIR/deployment_tools/demo/demo_squeezenet_download_convert_run.sh
#RUN sed -i 's:"install_prerequisites.sh":"./install_prerequisites.sh":g' /opt/intel/openvino/deployment_tools/demo/demo_squeezenet_download_convert_run.sh
#RUN sed -i 's:make -j8 classification_sample:make -j8:g' /opt/intel/openvino/deployment_tools/demo/demo_squeezenet_download_convert_run.sh
RUN $INSTALL_DIR/deployment_tools/demo/demo_squeezenet_download_convert_run.sh
CMD ["/bin/bash"]
myimage=local/dockervino:phase2
myname=dockervino
myname=dockervinoph2
set -x
docker run \
......
......@@ -3,41 +3,69 @@
FROM local/dockervino:phase2
MAINTAINER jdg:juan-diego.gonzales-zuniga@kontron.com
ARG downloader=/opt/intel/computer_vision_sdk/deployment_tools/model_downloader/downloader.py
ARG optimizer=/opt/intel/computer_vision_sdk/deployment_tools/model_optimizer
ARG INSTALL_DIR=/opt/intel/openvino
ARG downloader=$INSTALL_DIR/deployment_tools/tools/model_downloader/downloader.py
ARG optimizer=$INSTALL_DIR/deployment_tools/model_optimizer
ARG models=/root/openvino_models
# Downloading SSD Detection
RUN python3 $downloader --name ssd300 --output_dir $models
RUN python3 $optimizer/mo.py --input_model $models/object_detection/common/ssd/300/caffe/ssd300.caffemodel --output_dir $models/ir/ssd300/FP32 --data_type FP32
RUN python3 $optimizer/mo.py --input_model $models/object_detection/common/ssd/300/caffe/ssd300.caffemodel --output_dir $models/ir/ssd300/FP16 --data_type FP16
RUN python3 $optimizer/mo.py --input_model $models/object_detection/common/ssd/300/caffe/ssd300.caffemodel --output_dir $models/ir/ssd300 --model_name ssd300 --data_type FP32
RUN python3 $optimizer/mo.py --input_model $models/object_detection/common/ssd/300/caffe/ssd300.caffemodel --output_dir $models/ir/ssd300 --model_name ssd300-fp16 --data_type FP16
# Downloading Human Pose Detection
RUN python3 $downloader --name human-pose-estimation-0001 --output_dir $models/ir
RUN python3 $downloader --name human-pose-estimation-0001-fp16 --output_dir $models/ir
# Downloading Face Detection
RUN python3 $downloader --name face-detection-retail-0004 --output_dir $models/ir
RUN python3 $downloader --name face-detection-retail-0004-fp16 --output_dir $models/ir
# Downloading Age gender Recognition
RUN python3 $downloader --name age-gender-recognition-retail-0013 --output_dir $models/ir
RUN python3 $downloader --name age-gender-recognition-retail-0013-fp16 --output_dir $models/ir
# Downloaing Emotion Recognition
RUN python3 $downloader --name emotions-recognition-retail-0003 --output_dir $models/ir
RUN python3 $downloader --name emotions-recognition-retail-0003-fp16 --output_dir $models/ir
# Downloading Head Pose Estimation
RUN python3 $downloader --name head-pose-estimation-adas-0001 --output_dir $models/ir
RUN python3 $downloader --name head-pose-estimation-adas-0001-fp16 --output_dir $models/ir
# Copying YoloV3 and Yolov3 tiny
COPY ./yolo_v3.pb $models/object_detection/common/yolo/v3/tf/yolo_v3.pb
COPY ./yolo_v3.json $optimizer/extensions/front/tf/yolo_v3.json
RUN python3 $optimizer/mo_tf.py \
--input_model $models/object_detection/common/yolo/v3/tf/yolo_v3.pb \
--tensorflow_use_custom_operations_config $optimizer/extensions/front/tf/yolo_v3.json \
--input_shape [1,416,416,3] \
--output_dir $models/ir/yolo/v3/tf/FP32 \
--output_dir $models/ir/yolo \
--model_name yolo_v3 \
--data_type FP32
RUN python3 $optimizer/mo_tf.py \
--input_model $models/object_detection/common/yolo/v3/tf/yolo_v3.pb \
--tensorflow_use_custom_operations_config $optimizer/extensions/front/tf/yolo_v3.json \
--input_shape [1,416,416,3] \
--output_dir $models/ir/yolo/v3/tf/FP16 \
--output_dir $models/ir/yolo \
--model_name yolo_v3-fp16 \
--data_type FP16
COPY ./yolo_v3_tiny.pb $models/object_detection/common/yolo/v3/tf/yolo_v3_tiny.pb
COPY ./yolo_v3_tiny.json $optimizer/extensions/front/tf/yolo_v3_tiny.json
RUN python3 $optimizer/mo_tf.py \
--input_model $models/object_detection/common/yolo/v3/tf/yolo_v3_tiny.pb \
--tensorflow_use_custom_operations_config $optimizer/extensions/front/tf/yolo_v3_tiny.json \
--input_shape [1,416,416,3] \
--output_dir $models/ir/yolo-tiny/v3/tf/FP32 \
--output_dir $models/ir/yolo \
--model_name yolo_v3_tiny \
--data_type FP32
RUN python3 $optimizer/mo_tf.py \
--input_model $models/object_detection/common/yolo/v3/tf/yolo_v3_tiny.pb \
--tensorflow_use_custom_operations_config $optimizer/extensions/front/tf/yolo_v3_tiny.json \
--input_shape [1,416,416,3] \
--output_dir $models/ir/yolo-tiny/v3/tf/FP16 \
--output_dir $models/ir/yolo \
--model_name yolo_v3_tiny-fp16 \
--data_type FP16
ENTRYPOINT /bin/sh
CMD /bin/sh
myimage=local/dockervino:phase3
myname=dockervino
myname=dockervinoph3
set -x
docker run \
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment