Commit 3ee34c4c authored by Ukhu's avatar Ukhu
Browse files

Update with HumanPose 3D

parent 74b69cd3
......@@ -3,6 +3,12 @@
FROM local/dockervino:phase2 as phase2
FROM local/dockervino:phase1 as phase1
MAINTAINER jdg:juan-diego.gonzales-zuniga@kontron.com
ENV tools=$INSTALL_DIR/deployment_tools
ENV downloader=$tools/tools/model_downloader/downloader.py
ENV optimizer=$tools/model_optimizer
ENV converter=$tools/tools/model_downloader/converter.py
ENV models=/root/openvino_models
# Copy models from phase2
COPY --from=phase2 /root/openvino_models/ir /root/openvino_models/ir
WORKDIR $INSTALL_DIR/deployment_tools
......
......@@ -23,7 +23,7 @@ rm -f ./xauthority ; cp $XAUTHORITY ./xauthority ;chmod 666 ./xauthority #else r
setupvars=/opt/intel/openvino/bin/setupvars.sh
examples_dir=/root/omz_demos_build/intel64/Release
models_dir=/root/openvino_models/ir
build_cmd=/opt/intel/openvino/deployment_tools/open_model_zoo/demos/build_demos.sh
build_cmd="/opt/intel/openvino/deployment_tools/open_model_zoo/demos/build_demos.sh -DENABLE_PYTHON=on"
# Input: IPCam or USBCam
input=/videos/test.mp4
#################################
......
......@@ -4,8 +4,7 @@ WORKDIR $INSTALL_DIR/deployment_tools
RUN apt install -y libmosquittopp-dev && \
rm -rf open_model_zoo && \
git clone https://github.com/Ukhupacha/open_model_zoo.git --branch kontron --depth 1 && \
$INSTALL_DIR/deployment_tools/inference_engine/demos/build_demos.sh && \
rm -rf $INSTALL_DIR/deployment_tools/open_model_zoo && \
$INSTALL_DIR/deployment_tools/inference_engine/demos/build_demos.sh -DENABLE_PYTHON=on && \
apt autoremove -y && rm -rf /var/lib/apt/lists/*
FROM scratch
COPY --from=phase1 / /
......
#!/bin/bash
# setupvars defined in runcmd (necessary for the same bash session)
source $setupvars
export PYTHONPATH="$PYTHONPATH:/root/omz_demos_build/intel64/Release/lib"
### If face detection then apply MYRIAD DEVICE
# app, device, model and input are defined in the runcmd file
echo "Device used: $device"
echo "Application used: $app"
echo "Model used:$model"
echo "Model used: $model"
if [ "$app" = "/root/omz_demos_build/intel64/Release/interactive_face_detection_demo" ]; then
......@@ -21,6 +21,6 @@ elif [ "$app" = "/root/omz_demos_build/intel64/Release/human_pose_estimation_dem
elif [ "$app" = "/root/omz_demos_build/intel64/Release/pedestrian_tracker_demo" ]; then
$app -i $input -d_det $device -d_reid $device -m_det $m_det -m_reid $m_reid -r $raw -broker $broker -client $client
else
$app -i $input -d $device -m $model
python3 $app -i $input -d $device -m $model
fi
#!/bin/bash
myimage=local/dockervino:demo
myname=dockervinodemo
#################################
##### Display Parameters #####
#################################
# if run from a graphics window or shell, set some server params, else document what is needed
[ k$DISPLAY = k ] && doit=echo
# X11 Display and access file (to us in XAUTORITY env)
# enable access from anywhere (used for containers)
$doit xhost +
# disable screen saver
$doit xset s 0
# force screen to use (0 is usually the physical display of the system, others would be Xvnc virtual screens)
display=$DISPLAY
rm -f ./xauthority ; cp $XAUTHORITY ./xauthority ;chmod 666 ./xauthority #else root user inside container cannot open
#################################
##### General Parameters ######
#################################
# Init Openvino environment
setupvars=/opt/intel/openvino/bin/setupvars.sh
examples_dir=/root/omz_demos_build/intel64/Release
models_dir=/root/openvino_models/ir
# Input: IPCam or USBCam
input=/videos/test.mp4
# device: CPU or MYRIAD
device=CPU
######## CPU 3D Pose Estimation ###
if [ "$device" == CPU ]; then
app=/opt/intel/openvino/deployment_tools/open_model_zoo/demos/python_demos/human_pose_estimation_3d_demo/human_pose_estimation_3d_demo.py
model=${models_dir}/public/human-pose-estimation-3d-0001/FP32/human-pose-estimation-3d-0001.xml
fi
###### NCS2 3D Pose Estimation #####
if [ "$device" == MYRIAD ]; then
app=/opt/intel/openvino/deployment_tools/open_model_zoo/demos/python_demos/human_pose_estimation_3d_demo/human_pose_estimation_3d_demo.py
model=${models_dir}/public/human-pose-estimation-3d-0001/FP16/human-pose-estimation-3d-0001.xml
fi
opts="--entrypoint=/entrypoint.sh"
# Running the container
set -x
docker run $opts \
-d \
-ti \
--mount type=bind,source="$(pwd)"/videos,target=/videos \
--privileged \
--net=host \
--env="setupvars=$setupvars" \
--env="DISPLAY=$display" \
--env="input=$input" \
--env="device=$device" \
--env="app=$app" \
--env="model=$model" \
--env="m_det=$m_det" \
--env="m_reid=$m_reid" \
--env="broker=$broker" \
--env="client=$client" \
--env="raw=$raw" \
--env="agM=$agM" \
--env="emM=$emM" \
--env="hpM=$hpM" \
--device="/dev/video0:/dev/video0" \
--volume="/dev:/dev" \
--volume="/tmp/.X11-unix:/tmp/.X11-unix" \
--env="PS1=$myname> "\
--publish-all=true \
--hostname=$myname \
--entrypoint /entrypoint.sh \
--name $myname \
--cidfile=/tmp/monitcont.id \
$myimage /bin/sh
contid=$(cat /tmp/monitcont.id; rm -f /tmp/monitcont.id)
set +x
$debug docker logs $contid
$debug docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' $1 $contid
set +x
......@@ -18,6 +18,7 @@ ARG DEPENDENCIES="autoconf \
libgtk-3-0 \
libtool \
python3-pip \
python3-dev \
udev \
unzip \
sudo \
......@@ -43,11 +44,12 @@ RUN tar xf $openvinoTar.tgz && cd $openvinoTar && \
# Install model_optimizer requisites, it needs tf 1.5 for ApolloLake and setuptools
RUN sed -i 's/<2.0.0/<=1.5.0/g' $INSTALL_DIR/deployment_tools/model_optimizer/requirements.txt
RUN pip3 install setuptools
RUN pip3 install setuptools torch
RUN $INSTALL_DIR/deployment_tools/model_optimizer/install_prerequisites/install_prerequisites.sh
# Init Openvino variables
RUN echo "source $INSTALL_DIR/bin/setupvars.sh" >> /root/.bashrc
RUN echo 'export PYTHONPATH="$PYTHONPATH:/root/omz_demos_build/intel64/Release/lib"' >> /root/.bashrc
# Check installation with benchmark demo
RUN $INSTALL_DIR/deployment_tools/demo/demo_benchmark_app.sh
CMD ["/bin/bash"]
......@@ -3,10 +3,13 @@
FROM local/dockervino:phase1
MAINTAINER jdg:juan-diego.gonzales-zuniga@kontron.com
ENV downloader=$INSTALL_DIR/deployment_tools/tools/model_downloader/downloader.py
ENV optimizer=$INSTALL_DIR/deployment_tools/model_optimizer
ENV tools=$INSTALL_DIR/deployment_tools
ENV downloader=$tools/tools/model_downloader/downloader.py
ENV optimizer=$tools/model_optimizer
ENV converter=$tools/tools/model_downloader/converter.py
ENV models=/root/openvino_models
WORKDIR $models
# Downloading Human Pose Detection
RUN python3 $downloader --name human-pose-estimation-0001 --output_dir $models/ir
......@@ -38,8 +41,11 @@ RUN python3 $optimizer/mo.py \
--input_proto $models/public/ssd300/models/VGGNet/VOC0712Plus/SSD_300x300_ft/deploy.prototxt \
--output_dir $models/ir/ssd300/FP16 --model_name ssd300 --data_type FP16
# Downloading Human Pose Detection 3D
RUN python3 $downloader --list $tools/open_model_zoo/demos/python_demos/human_pose_estimation_3d_demo/models.lst
RUN python3 $converter --list $tools/open_model_zoo/demos/python_demos/human_pose_estimation_3d_demo/models.lst --o $models/ir --mo $optimizer/mo.py
# Download Yolo v3
WORKDIR $models
RUN apt-get install git wget -y
RUN wget https://download.01.org/opencv/public_models/022020/yolo_v3/yolov3.pb && \
wget https://download.01.org/opencv/public_models/022020/yolo_v3/yolo_v3_new.json
......@@ -79,5 +85,5 @@ RUN python3 $optimizer/mo_tf.py \
--output_dir $models/ir/mask_rcnn/FP16 \
--model_name mask_rcnn_inception_v2 \
--data_type FP16 --reverse_input_channels
RUN echo 'export PYTHONPATH="$PYTHONPATH:/root/omz_demos_build/intel64/Release/lib"' >> ~/.bashrc
CMD ["/bin/bash"]
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment