Commit 19313051 authored by Ukhu's avatar Ukhu
Browse files

Update with OpenVINO 2020.1.023

parent 91962e9f
*.tgz
*.mp4
*.pb
.idea*
......@@ -5,9 +5,8 @@ FROM local/dockervino:phase1 as phase1
MAINTAINER jdg:juan-diego.gonzales-zuniga@kontron.com
# Copy models from phase2
COPY --from=phase2 /root/openvino_models/ir /root/openvino_models/ir
ARG INSTALL_DIR=/opt/intel/openvino
WORKDIR $INSTALL_DIR/deployment_tools
RUN apt install -y x11-utils vim strace wget libtool autoconf unzip libmosquittopp-dev
RUN apt install -y x11-utils vim strace libtool autoconf libmosquittopp-dev mosquitto mosquitto-clients
WORKDIR /tmp
RUN wget https://github.com/libusb/libusb/archive/v1.0.22.zip && \
unzip v1.0.22.zip && cd libusb-1.0.22 && \
......
......@@ -25,7 +25,7 @@ examples_dir=/root/omz_demos_build/intel64/Release
models_dir=/root/openvino_models/ir
build_cmd=/opt/intel/openvino/deployment_tools/open_model_zoo/demos/build_demos.sh
# Input: IPCam or USBCam
input=https://motchallenge.net/movies/ETH-Bahnhof.mp4
input=/videos/test.mp4
#################################
######## Examples ###########
#################################
......@@ -51,7 +51,7 @@ docker run $opts \
-d \
-ti \
--mount type=bind,source="$(pwd)"/../../open_model_zoo,target=/opt/intel/openvino/deployment_tools/open_model_zoo \
--mount type=bind,source="$(pwd)"/videos,target=/videos \
--mount type=bind,source="$(pwd)"/../demo/videos,target=/videos \
--privileged \
--net=host \
--env="setupvars=$setupvars" \
......
FROM local/dockervino:phase2 as phase2
FROM local/dockervino:phase1 as phase1
ARG INSTALL_DIR=/opt/intel/openvino
WORKDIR $INSTALL_DIR/deployment_tools
RUN apt install -y libmosquittopp-dev && \
rm -rf open_model_zoo && \
......
#!/bin/bash
myimage=local/dockervino:demo
myname=dockervinodemo
#################################
##### Display Parameters #####
#################################
# if run from a graphics window or shell, set some server params, else document what is needed
[ k$DISPLAY = k ] && doit=echo
# X11 Display and access file (to us in XAUTORITY env)
# enable access from anywhere (used for containers)
$doit xhost +
# disable screen saver
$doit xset s 0
# force screen to use (0 is usually the physical display of the system, others would be Xvnc virtual screens)
display=$DISPLAY
rm -f ./xauthority ; cp $XAUTHORITY ./xauthority ;chmod 666 ./xauthority #else root user inside container cannot open
#################################
##### General Parameters ######
#################################
# Init Openvino environment
setupvars=/opt/intel/openvino/bin/setupvars.sh
examples_dir=/root/omz_demos_build/intel64/Release
models_dir=/root/openvino_models/ir
# Input: IPCam or USBCam
input=/videos/test.mp4
#################################
######## Examples ###########
#################################
####### CPU Pose Estimation ########
device=CPU
app=${examples_dir}/human_pose_estimation_demo
model=${models_dir}/intel/human-pose-estimation-0001/FP32/human-pose-estimation-0001.xml
raw=true
broker=kontron-lora.cloudapp.net
client=Demo
###### NCS2 Pose Estimation ######
#device=MYRIAD
#app=${examples_dir}/human_pose_estimation_demo
#model=${models_dir}/intel/human-pose-estimation-0001/FP16/human-pose-estimation-0001.xml
opts="--entrypoint=/entrypoint.sh"
# Running the container
set -x
docker run $opts \
-d \
-ti \
--mount type=bind,source="$(pwd)"/videos,target=/videos \
--privileged \
--net=host \
--env="setupvars=$setupvars" \
--env="DISPLAY=$display" \
--env="input=$input" \
--env="device=$device" \
--env="app=$app" \
--env="model=$model" \
--env="m_det=$m_det" \
--env="m_reid=$m_reid" \
--env="broker=$broker" \
--env="client=$client" \
--env="raw=$raw" \
--env="agM=$agM" \
--env="emM=$emM" \
--env="hpM=$hpM" \
--device="/dev/video0:/dev/video0" \
--volume="/dev:/dev" \
--volume="/tmp/.X11-unix:/tmp/.X11-unix" \
--env="PS1=$myname> "\
--publish-all=true \
--hostname=$myname \
--entrypoint /entrypoint.sh \
--name $myname \
--cidfile=/tmp/monitcont.id \
$myimage /bin/sh
contid=$(cat /tmp/monitcont.id; rm -f /tmp/monitcont.id)
set +x
$debug docker logs $contid
$debug docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' $1 $contid
set +x
......@@ -24,20 +24,12 @@ setupvars=/opt/intel/openvino/bin/setupvars.sh
examples_dir=/root/omz_demos_build/intel64/Release
models_dir=/root/openvino_models/ir
# Input: IPCam or USBCam
input=https://motchallenge.net/movies/ETH-Bahnhof.mp4
input=/videos/test.mp4
#################################
######## Examples ###########
#################################
####### CPU Pose Estimation ########
#device=CPU
#app=${examples_dir}/human_pose_estimation_demo
#model=${models_dir}/intel/human-pose-estimation-0001/FP32/human-pose-estimation-0001.xml
#raw=true
#broker=kontron-lora.cloudapp.net
#client=Demo
####### Pedestrian Tracker ########
device=CPU
app=${examples_dir}/pedestrian_tracker_demo
......@@ -59,6 +51,7 @@ set -x
docker run $opts \
-d \
-ti \
--mount type=bind,source="$(pwd)"/videos,target=/videos \
--privileged \
--net=host \
--env="setupvars=$setupvars" \
......
......@@ -2,49 +2,52 @@
# Replace openvinoTar with the openvinoversion
# The openvinoTar is erased after installation
FROM ubuntu:xenial
FROM ubuntu:18.04
MAINTAINER jdg:juan-diego.gonzales-zuniga@kontron.com
ARG openvinoTar=l_openvino_toolkit_p_2019.3.376
ARG INSTALL_DIR=/opt/intel/openvino
# Dependencies
ARG DEPENDENCIES="autoconf \
automake \
build-essential \
cmake \
cpio \
curl \
gnupg2 \
libdrm2 \
libglib2.0-0 \
lsb-release \
libgtk-3-0 \
libtool \
python3-pip \
udev \
unzip \
sudo \
wget \
git"
RUN apt-get update && apt-get -y upgrade && apt-get autoremove && \
apt-get install -y --no-install-recommends ${DEPENDENCIES} && \
rm -rf /var/lib/apt/lists/*
# Openvino Version
ARG openvinoTar=l_openvino_toolkit_p_2020.1.023
ENV INSTALL_DIR=/opt/intel/openvino
ARG TEMP_DIR=/tmp/openvino_installer
RUN mkdir -p $TEMP_DIR
WORKDIR $TEMP_DIR
COPY ./$openvinoTar.tgz $TEMP_DIR
RUN apt-get update && apt-get -y upgrade && apt-get autoremove
# installing needed dependencies
RUN apt-get install -y --no-install-recommends \
build-essential \
cpio \
wget \
git \
curl \
lsb-release \
pciutils \
software-properties-common \
sudo && \
sudo add-apt-repository ppa:jonathonf/python-3.6 && \
apt-get update && apt-get install -y python3.6 python3.6-dev && \
curl https://bootstrap.pypa.io/get-pip.py | sudo -H python3.6 && \
rm /usr/bin/python3 && \
sudo ln -s python3.6 /usr/bin/python3 && \
pip3 install pillow && \
rm -rf /var/lib/apt/lists/*
# installing openVINO itself
RUN tar xf $openvinoTar.tgz && cd $openvinoTar && \
sed -i 's/decline/accept/g' silent.cfg && \
./install.sh -s silent.cfg && \
rm -rf $TEMP_DIR
# installing openVINO dependencies
RUN bash $INSTALL_DIR/install_dependencies/install_openvino_dependencies.sh
rm -rf $TEMP_DIR && \
$INSTALL_DIR/install_dependencies/install_openvino_dependencies.sh
# install model_optimizer requisites needs tf 1.15 for kontron
RUN sed -i 's/>=1.2.0/==1.5.0/g' $INSTALL_DIR/deployment_tools/model_optimizer/requirements.txt
# Install model_optimizer requisites, it needs tf 1.5 for ApolloLake and setuptools
RUN sed -i 's/<2.0.0/<=1.5.0/g' $INSTALL_DIR/deployment_tools/model_optimizer/requirements.txt
RUN pip3 install setuptools
RUN $INSTALL_DIR/deployment_tools/model_optimizer/install_prerequisites/install_prerequisites.sh
# init openvino env
# Init Openvino variables
RUN echo "source $INSTALL_DIR/bin/setupvars.sh" >> /root/.bashrc
# check installation with example
# Check installation with benchmark demo
RUN $INSTALL_DIR/deployment_tools/demo/demo_benchmark_app.sh
CMD ["/bin/bash"]
......@@ -3,22 +3,9 @@
FROM local/dockervino:phase1
MAINTAINER jdg:juan-diego.gonzales-zuniga@kontron.com
ARG INSTALL_DIR=/opt/intel/openvino
ARG downloader=$INSTALL_DIR/deployment_tools/tools/model_downloader/downloader.py
ARG optimizer=$INSTALL_DIR/deployment_tools/model_optimizer
ARG models=/root/openvino_models
# Downloading SSD Detection
RUN python3 $downloader --name ssd300 --output_dir $models
RUN python3 $optimizer/mo.py \
--input_model $models/public/ssd300/models/VGGNet/VOC0712Plus/SSD_300x300_ft/VGG_VOC0712Plus_SSD_300x300_ft_iter_160000.caffemodel \
--input_proto $models/public/ssd300/models/VGGNet/VOC0712Plus/SSD_300x300_ft/deploy.prototxt \
--output_dir $models/ir/ssd300/FP32 --model_name ssd300 --data_type FP32
RUN python3 $optimizer/mo.py \
--input_model $models/public/ssd300/models/VGGNet/VOC0712Plus/SSD_300x300_ft/VGG_VOC0712Plus_SSD_300x300_ft_iter_160000.caffemodel \
--input_proto $models/public/ssd300/models/VGGNet/VOC0712Plus/SSD_300x300_ft/deploy.prototxt \
--output_dir $models/ir/ssd300/FP16 --model_name ssd300 --data_type FP16
ENV downloader=$INSTALL_DIR/deployment_tools/tools/model_downloader/downloader.py
ENV optimizer=$INSTALL_DIR/deployment_tools/model_optimizer
ENV models=/root/openvino_models
# Downloading Human Pose Detection
RUN python3 $downloader --name human-pose-estimation-0001 --output_dir $models/ir
......@@ -35,54 +22,44 @@ RUN python3 $downloader --name emotions-recognition-retail-0003 --output_dir $mo
# Downloading Head Pose Estimation
RUN python3 $downloader --name head-pose-estimation-adas-0001 --output_dir $models/ir
WORKDIR $models
# Clone Yolo V3 tensorflow
RUN git clone https://github.com/mystic123/tensorflow-yolo-v3
WORKDIR $models/tensorflow-yolo-v3
# Download weights of Yolov3 and Yolov3Tiny
RUN wget https://raw.githubusercontent.com/pjreddie/darknet/master/data/coco.names && \
wget https://pjreddie.com/media/files/yolov3.weights && \
wget https://pjreddie.com/media/files/yolov3-tiny.weights
RUN python3 convert_weights_pb.py --class_names coco.names \
--data_format NHWC --weights_file yolov3.weights --output_graph yolo_v3.pb
RUN python3 convert_weights_pb.py --class_names coco.names \
--data_format NHWC --weights_file yolov3-tiny.weights \
--output_graph yolo_v3_tiny.pb --tiny
# Download tracker networks
RUN $downloader --name person-detection-retail-0013 --output_dir $models/ir
RUN $downloader --name person-reidentification-retail-0031 --output_dir $models/ir
# Downloading SSD Detection
RUN python3 $downloader --name ssd300 --output_dir $models
RUN python3 $optimizer/mo.py \
--input_model $models/public/ssd300/models/VGGNet/VOC0712Plus/SSD_300x300_ft/VGG_VOC0712Plus_SSD_300x300_ft_iter_160000.caffemodel \
--input_proto $models/public/ssd300/models/VGGNet/VOC0712Plus/SSD_300x300_ft/deploy.prototxt \
--output_dir $models/ir/ssd300/FP32 --model_name ssd300 --data_type FP32
RUN python3 $optimizer/mo.py \
--input_model $models/public/ssd300/models/VGGNet/VOC0712Plus/SSD_300x300_ft/VGG_VOC0712Plus_SSD_300x300_ft_iter_160000.caffemodel \
--input_proto $models/public/ssd300/models/VGGNet/VOC0712Plus/SSD_300x300_ft/deploy.prototxt \
--output_dir $models/ir/ssd300/FP16 --model_name ssd300 --data_type FP16
# Download Yolo v3
WORKDIR $models
RUN apt-get install git wget -y
RUN wget https://download.01.org/opencv/public_models/022020/yolo_v3/yolov3.pb && \
wget https://download.01.org/opencv/public_models/022020/yolo_v3/yolo_v3_new.json
# Optimizer on Yolov3
COPY ./yolo_v3.json $models/tensorflow-yolo-v3
RUN python3 $optimizer/mo_tf.py \
--input_model $models/tensorflow-yolo-v3/yolo_v3.pb \
--tensorflow_use_custom_operations_config $models/tensorflow-yolo-v3/yolo_v3.json \
--input_model yolov3.pb \
--transformations_config yolo_v3_new.json \
--input_shape [1,416,416,3] \
--output_dir $models/ir/yolo/FP32 \
--model_name yolo_v3 \
--data_type FP32
RUN python3 $optimizer/mo_tf.py \
--input_model $models/tensorflow-yolo-v3/yolo_v3.pb \
--tensorflow_use_custom_operations_config $models/tensorflow-yolo-v3/yolo_v3.json \
--input_model yolov3.pb \
--transformations_config yolo_v3_new.json \
--input_shape [1,416,416,3] \
--output_dir $models/ir/yolo/FP16 \
--model_name yolo_v3 \
--data_type FP16
# Optimizer on Yolov3Tiny
COPY ./yolo_v3_tiny.json $models/tensorflow-yolo-v3
RUN python3 $optimizer/mo_tf.py \
--input_model $models/tensorflow-yolo-v3/yolo_v3_tiny.pb \
--tensorflow_use_custom_operations_config $models/tensorflow-yolo-v3/yolo_v3_tiny.json \
--input_shape [1,416,416,3] \
--output_dir $models/ir/yolo/FP32 \
--model_name yolo_v3_tiny \
--data_type FP32
RUN python3 $optimizer/mo_tf.py \
--input_model $models/tensorflow-yolo-v3/yolo_v3_tiny.pb \
--tensorflow_use_custom_operations_config $models/tensorflow-yolo-v3/yolo_v3_tiny.json \
--input_shape [1,416,416,3] \
--output_dir $models/ir/yolo/FP16 \
--model_name yolo_v3_tiny \
--data_type FP16
WORKDIR $models
# Download smallest maskrcnn
RUN wget http://download.tensorflow.org/models/object_detection/mask_rcnn_inception_v2_coco_2018_01_28.tar.gz
......@@ -90,19 +67,17 @@ RUN tar -xzf mask_rcnn_inception_v2_coco_2018_01_28.tar.gz
# Optimizer on maskrcnn
RUN python3 $optimizer/mo_tf.py \
--input_model $models/mask_rcnn_inception_v2_coco_2018_01_28/frozen_inference_graph.pb \
--tensorflow_use_custom_operations_config /opt/intel/openvino/deployment_tools/model_optimizer/extensions/front/tf/mask_rcnn_support.json \
--transformations_config /opt/intel/openvino/deployment_tools/model_optimizer/extensions/front/tf/mask_rcnn_support.json \
--tensorflow_object_detection_api_pipeline $models/mask_rcnn_inception_v2_coco_2018_01_28/pipeline.config \
--output_dir $models/ir/mask_rcnn/FP32 \
--model_name mask_rcnn_inception_v2 \
--data_type FP32 --reverse_input_channels
RUN python3 $optimizer/mo_tf.py \
--input_model $models/mask_rcnn_inception_v2_coco_2018_01_28/frozen_inference_graph.pb \
--tensorflow_use_custom_operations_config /opt/intel/openvino/deployment_tools/model_optimizer/extensions/front/tf/mask_rcnn_support.json \
--transformations_config /opt/intel/openvino/deployment_tools/model_optimizer/extensions/front/tf/mask_rcnn_support.json \
--tensorflow_object_detection_api_pipeline $models/mask_rcnn_inception_v2_coco_2018_01_28/pipeline.config \
--output_dir $models/ir/mask_rcnn/FP16 \
--model_name mask_rcnn_inception_v2 \
--data_type FP16 --reverse_input_channels
# Download tracker networks
RUN $downloader --name person-detection-retail-0013 --output_dir $models/ir
RUN $downloader --name person-reidentification-retail-0031 --output_dir $models/ir
CMD ["/bin/bash"]
[
{
"id": "TFYOLOV3",
"match_kind": "general",
"custom_attributes": {
"classes": 80,
"coords": 4,
"num": 9,
"mask": [0, 1, 2],
"jitter": 0.3,
"ignore_thresh": 0.7,
"truth_thresh": 1,
"random": 1,
"anchors":[10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59,119, 116,90, 156, 198, 373, 326],
"entry_points": ["detector/yolo-v3/Reshape", "detector/yolo-v3/Reshape_4", "detector/yolo-v3/Reshape_8"]
}
}
]
[
{
"id": "TFYOLOV3",
"match_kind": "general",
"custom_attributes": {
"classes": 80,
"coords": 4,
"num": 6,
"mask": [0, 1, 2],
"jitter": 0.3,
"ignore_thresh": 0.7,
"truth_thresh": 1,
"random": 1,
"anchors":[10, 14, 23, 27, 37, 58, 81, 82, 135, 169, 344, 319],
"entry_points": ["detector/yolo-v3-tiny/Reshape", "detector/yolo-v3-tiny/Reshape_4"]
}
}
]
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment