Commit 715b8c61 authored by GONZALES ZUNIGA Juan Diego's avatar GONZALES ZUNIGA Juan Diego
Browse files

Build reworked

-Now the demos are built with the openvino_model_zoo repo fork
-Phase2 is within phase1
-phase3 is moved to phase2
-phase3 is the bulidng repo block
-execute mode for scripts is changed
parent d875b095
File mode changed from 100644 to 100755
File mode changed from 100644 to 100755
File mode changed from 100644 to 100755
File mode changed from 100644 to 100755
File mode changed from 100644 to 100755
......@@ -24,7 +24,7 @@ setupvars=/opt/intel/openvino/bin/setupvars.sh
examples_dir=/root/omz_demos_build/intel64/Release
models_dir=/root/openvino_models/ir
# Input: IPCam or USBCam
input=cam #input=http://94.214.173.241:8001/mjpg/video.mjpg
input=http://71.73.8.17:8080/cam_1.cgi #input=http://94.214.173.241:8001/mjpg/video.mjpg
#################################
######## Examples ###########
......
......@@ -40,15 +40,16 @@ RUN tar xf $openvinoTar.tgz && cd $openvinoTar && \
RUN bash $INSTALL_DIR/install_dependencies/install_openvino_dependencies.sh
# build Inference Engine Samples
RUN $INSTALL_DIR/deployment_tools/inference_engine/samples/build_samples.sh
RUN $INSTALL_DIR/deployment_tools/inference_engine/demos/build_demos.sh
# RUN $INSTALL_DIR/deployment_tools/inference_engine/samples/build_samples.sh
# RUN $INSTALL_DIR/deployment_tools/inference_engine/demos/build_demos.sh
# install model_optimizer requisites needs tf 1.15 for kontron
RUN sed -i 's/>=1.2.0/==1.5.0/g' $INSTALL_DIR/deployment_tools/model_optimizer/requirements.txt
#RUN sed -i 's/>=1.2.0/==1.5.0/g' $INSTALL_DIR/deployment_tools/model_optimizer/requirements_tf.txt
RUN $INSTALL_DIR/deployment_tools/model_optimizer/install_prerequisites/install_prerequisites.sh
# inti openvino env
# init openvino env
RUN echo "source $INSTALL_DIR/bin/setupvars.sh" >> /root/.bashrc
# check installation with example
RUN $INSTALL_DIR/deployment_tools/demo/demo_squeezenet_download_convert_run.sh
CMD ["/bin/bash"]
File mode changed from 100644 to 100755
File mode changed from 100644 to 100755
# Compilation of examples for openVino
# Compilation demo_squeezenet_download_conver_run and security_barrier_camera
# Docker image after installation
# Downloads model ssd300 and creates yolov3
FROM local/dockervino:phase1_R3 as build
FROM local/dockervino:phase1_R3
MAINTAINER jdg:juan-diego.gonzales-zuniga@kontron.com
ARG INSTALL_DIR=/opt/intel/openvino
WORKDIR $INSTALL_DIR/deployment_tools/demo
#RUN sed -i 's/source "/"/g' $INSTALL_DIR/deployment_tools/demo/demo_squeezenet_download_convert_run.sh
#RUN sed -i 's:"install_prerequisites.sh":"./install_prerequisites.sh":g' /opt/intel/openvino/deployment_tools/demo/demo_squeezenet_download_convert_run.sh
#RUN sed -i 's:make -j8 classification_sample:make -j8:g' /opt/intel/openvino/deployment_tools/demo/demo_squeezenet_download_convert_run.sh
RUN $INSTALL_DIR/deployment_tools/demo/demo_squeezenet_download_convert_run.sh
ARG downloader=$INSTALL_DIR/deployment_tools/tools/model_downloader/downloader.py
ARG optimizer=$INSTALL_DIR/deployment_tools/model_optimizer
ARG models=/root/openvino_models
# Downloading SSD Detection
RUN python3 $downloader --name ssd300 --output_dir $models
RUN python3 $optimizer/mo.py \
--input_model $models/public/ssd300/models/VGGNet/VOC0712Plus/SSD_300x300_ft/VGG_VOC0712Plus_SSD_300x300_ft_iter_160000.caffemodel \
--input_proto $models/public/ssd300/models/VGGNet/VOC0712Plus/SSD_300x300_ft/deploy.prototxt \
--output_dir $models/ir/ssd300/FP32 --model_name ssd300 --data_type FP32
RUN python3 $optimizer/mo.py \
--input_model $models/public/ssd300/models/VGGNet/VOC0712Plus/SSD_300x300_ft/VGG_VOC0712Plus_SSD_300x300_ft_iter_160000.caffemodel \
--input_proto $models/public/ssd300/models/VGGNet/VOC0712Plus/SSD_300x300_ft/deploy.prototxt \
--output_dir $models/ir/ssd300/FP16 --model_name ssd300 --data_type FP16
# Downloading Human Pose Detection
RUN python3 $downloader --name human-pose-estimation-0001 --output_dir $models/ir
# Downloading Face Detection
RUN python3 $downloader --name face-detection-retail-0004 --output_dir $models/ir
# Downloading Age gender Recognition
RUN python3 $downloader --name age-gender-recognition-retail-0013 --output_dir $models/ir
# Downloaing Emotion Recognition
RUN python3 $downloader --name emotions-recognition-retail-0003 --output_dir $models/ir
# Downloading Head Pose Estimation
RUN python3 $downloader --name head-pose-estimation-adas-0001 --output_dir $models/ir
WORKDIR $models
# Clone Yolo V3 tensorflow
RUN git clone https://github.com/mystic123/tensorflow-yolo-v3
WORKDIR $models/tensorflow-yolo-v3
# Download weights of Yolov3 and Yolov3Tiny
RUN wget https://raw.githubusercontent.com/pjreddie/darknet/master/data/coco.names && \
wget https://pjreddie.com/media/files/yolov3.weights && \
wget https://pjreddie.com/media/files/yolov3-tiny.weights
RUN python3 convert_weights_pb.py --class_names coco.names \
--data_format NHWC --weights_file yolov3.weights --output_graph yolo_v3.pb
RUN python3 convert_weights_pb.py --class_names coco.names \
--data_format NHWC --weights_file yolov3-tiny.weights \
--output_graph yolo_v3_tiny.pb --tiny
# Optimizer on Yolov3
COPY ./yolo_v3.json $models/tensorflow-yolo-v3
RUN python3 $optimizer/mo_tf.py \
--input_model $models/tensorflow-yolo-v3/yolo_v3.pb \
--tensorflow_use_custom_operations_config $models/tensorflow-yolo-v3/yolo_v3.json \
--input_shape [1,416,416,3] \
--output_dir $models/ir/yolo/FP32 \
--model_name yolo_v3 \
--data_type FP32
RUN python3 $optimizer/mo_tf.py \
--input_model $models/tensorflow-yolo-v3/yolo_v3.pb \
--tensorflow_use_custom_operations_config $models/tensorflow-yolo-v3/yolo_v3.json \
--input_shape [1,416,416,3] \
--output_dir $models/ir/yolo/FP16 \
--model_name yolo_v3 \
--data_type FP16
# Optimizer on Yolov3Tiny
COPY ./yolo_v3_tiny.json $models/tensorflow-yolo-v3
RUN python3 $optimizer/mo_tf.py \
--input_model $models/tensorflow-yolo-v3/yolo_v3_tiny.pb \
--tensorflow_use_custom_operations_config $models/tensorflow-yolo-v3/yolo_v3_tiny.json \
--input_shape [1,416,416,3] \
--output_dir $models/ir/yolo/FP32 \
--model_name yolo_v3_tiny \
--data_type FP32
RUN python3 $optimizer/mo_tf.py \
--input_model $models/tensorflow-yolo-v3/yolo_v3_tiny.pb \
--tensorflow_use_custom_operations_config $models/tensorflow-yolo-v3/yolo_v3_tiny.json \
--input_shape [1,416,416,3] \
--output_dir $models/ir/yolo/FP16 \
--model_name yolo_v3_tiny \
--data_type FP16
WORKDIR $models
# Download smallest maskrcnn
RUN wget http://download.tensorflow.org/models/object_detection/mask_rcnn_inception_v2_coco_2018_01_28.tar.gz
RUN tar -xzf mask_rcnn_inception_v2_coco_2018_01_28.tar.gz
# Optimizer on maskrcnn
RUN python3 $optimizer/mo_tf.py \
--input_model $models/mask_rcnn_inception_v2_coco_2018_01_28/frozen_inference_graph.pb \
--tensorflow_use_custom_operations_config /opt/intel/openvino/deployment_tools/model_optimizer/extensions/front/tf/mask_rcnn_support.json \
--tensorflow_object_detection_api_pipeline $models/mask_rcnn_inception_v2_coco_2018_01_28/pipeline.config \
--output_dir $models/ir/mask_rcnn/FP32 \
--model_name mask_rcnn_inception_v2 \
--data_type FP32 --reverse_input_channels
RUN python3 $optimizer/mo_tf.py \
--input_model $models/mask_rcnn_inception_v2_coco_2018_01_28/frozen_inference_graph.pb \
--tensorflow_use_custom_operations_config /opt/intel/openvino/deployment_tools/model_optimizer/extensions/front/tf/mask_rcnn_support.json \
--tensorflow_object_detection_api_pipeline $models/mask_rcnn_inception_v2_coco_2018_01_28/pipeline.config \
--output_dir $models/ir/mask_rcnn/FP16 \
--model_name mask_rcnn_inception_v2 \
--data_type FP16 --reverse_input_channels
# Download tracker networks
RUN $downloader --name person-detection-retail-0013 --output_dir $models/ir
RUN $downloader --name person-reidentification-retail-0031 --output_dir $models/ir
CMD ["/bin/bash"]
File mode changed from 100644 to 100755
# Docker image after installation
# Downloads model ssd300 and creates yolov3
# Compilation of examples for openVino
# Compilation demo_squeezenet_download_conver_run and security_barrier_camera
FROM local/dockervino:phase2_R3
FROM local/dockervino:phase2_R3 as build
MAINTAINER jdg:juan-diego.gonzales-zuniga@kontron.com
ARG INSTALL_DIR=/opt/intel/openvino
ARG downloader=$INSTALL_DIR/deployment_tools/tools/model_downloader/downloader.py
ARG optimizer=$INSTALL_DIR/deployment_tools/model_optimizer
ARG models=/root/openvino_models
# Downloading SSD Detection
RUN python3 $downloader --name ssd300 --output_dir $models
RUN python3 $optimizer/mo.py \
--input_model $models/public/ssd300/models/VGGNet/VOC0712Plus/SSD_300x300_ft/VGG_VOC0712Plus_SSD_300x300_ft_iter_160000.caffemodel \
--input_proto $models/public/ssd300/models/VGGNet/VOC0712Plus/SSD_300x300_ft/deploy.prototxt \
--output_dir $models/ir/ssd300/FP32 --model_name ssd300 --data_type FP32
RUN python3 $optimizer/mo.py \
--input_model $models/public/ssd300/models/VGGNet/VOC0712Plus/SSD_300x300_ft/VGG_VOC0712Plus_SSD_300x300_ft_iter_160000.caffemodel \
--input_proto $models/public/ssd300/models/VGGNet/VOC0712Plus/SSD_300x300_ft/deploy.prototxt \
--output_dir $models/ir/ssd300/FP16 --model_name ssd300 --data_type FP16
# Downloading Human Pose Detection
RUN python3 $downloader --name human-pose-estimation-0001 --output_dir $models/ir
# Downloading Face Detection
RUN python3 $downloader --name face-detection-retail-0004 --output_dir $models/ir
# Downloading Age gender Recognition
RUN python3 $downloader --name age-gender-recognition-retail-0013 --output_dir $models/ir
# Downloaing Emotion Recognition
RUN python3 $downloader --name emotions-recognition-retail-0003 --output_dir $models/ir
# Downloading Head Pose Estimation
RUN python3 $downloader --name head-pose-estimation-adas-0001 --output_dir $models/ir
WORKDIR $models
# Clone Yolo V3 tensorflow
RUN git clone https://github.com/mystic123/tensorflow-yolo-v3
WORKDIR $models/tensorflow-yolo-v3
# Download weights of Yolov3 and Yolov3Tiny
RUN wget https://raw.githubusercontent.com/pjreddie/darknet/master/data/coco.names && \
wget https://pjreddie.com/media/files/yolov3.weights && \
wget https://pjreddie.com/media/files/yolov3-tiny.weights
RUN python3 convert_weights_pb.py --class_names coco.names \
--data_format NHWC --weights_file yolov3.weights --output_graph yolo_v3.pb
RUN python3 convert_weights_pb.py --class_names coco.names \
--data_format NHWC --weights_file yolov3-tiny.weights \
--output_graph yolo_v3_tiny.pb --tiny
# Optimizer on Yolov3
COPY ./yolo_v3.json $models/tensorflow-yolo-v3
RUN python3 $optimizer/mo_tf.py \
--input_model $models/tensorflow-yolo-v3/yolo_v3.pb \
--tensorflow_use_custom_operations_config $models/tensorflow-yolo-v3/yolo_v3.json \
--input_shape [1,416,416,3] \
--output_dir $models/ir/yolo/FP32 \
--model_name yolo_v3 \
--data_type FP32
RUN python3 $optimizer/mo_tf.py \
--input_model $models/tensorflow-yolo-v3/yolo_v3.pb \
--tensorflow_use_custom_operations_config $models/tensorflow-yolo-v3/yolo_v3.json \
--input_shape [1,416,416,3] \
--output_dir $models/ir/yolo/FP16 \
--model_name yolo_v3 \
--data_type FP16
# Optimizer on Yolov3Tiny
COPY ./yolo_v3_tiny.json $models/tensorflow-yolo-v3
RUN python3 $optimizer/mo_tf.py \
--input_model $models/tensorflow-yolo-v3/yolo_v3_tiny.pb \
--tensorflow_use_custom_operations_config $models/tensorflow-yolo-v3/yolo_v3_tiny.json \
--input_shape [1,416,416,3] \
--output_dir $models/ir/yolo/FP32 \
--model_name yolo_v3_tiny \
--data_type FP32
RUN python3 $optimizer/mo_tf.py \
--input_model $models/tensorflow-yolo-v3/yolo_v3_tiny.pb \
--tensorflow_use_custom_operations_config $models/tensorflow-yolo-v3/yolo_v3_tiny.json \
--input_shape [1,416,416,3] \
--output_dir $models/ir/yolo/FP16 \
--model_name yolo_v3_tiny \
--data_type FP16
WORKDIR $models
# Download smallest maskrcnn
RUN wget http://download.tensorflow.org/models/object_detection/mask_rcnn_inception_v2_coco_2018_01_28.tar.gz
RUN tar -xzf mask_rcnn_inception_v2_coco_2018_01_28.tar.gz
# Optimizer on maskrcnn
RUN python3 $optimizer/mo_tf.py \
--input_model $models/mask_rcnn_inception_v2_coco_2018_01_28/frozen_inference_graph.pb \
--tensorflow_use_custom_operations_config /opt/intel/openvino/deployment_tools/model_optimizer/extensions/front/tf/mask_rcnn_support.json \
--tensorflow_object_detection_api_pipeline $models/mask_rcnn_inception_v2_coco_2018_01_28/pipeline.config \
--output_dir $models/ir/mask_rcnn/FP32 \
--model_name mask_rcnn_inception_v2 \
--data_type FP32 --reverse_input_channels
RUN python3 $optimizer/mo_tf.py \
--input_model $models/mask_rcnn_inception_v2_coco_2018_01_28/frozen_inference_graph.pb \
--tensorflow_use_custom_operations_config /opt/intel/openvino/deployment_tools/model_optimizer/extensions/front/tf/mask_rcnn_support.json \
--tensorflow_object_detection_api_pipeline $models/mask_rcnn_inception_v2_coco_2018_01_28/pipeline.config \
--output_dir $models/ir/mask_rcnn/FP16 \
--model_name mask_rcnn_inception_v2 \
--data_type FP16 --reverse_input_channels
# Download tracker networks
RUN $downloader --name person-detection-retail-0013 --output_dir $models/ir
RUN $downloader --name person-reidentification-retail-0031 --output_dir $models/ir
WORKDIR $INSTALL_DIR/deployment_tools
RUN rm -rf open_model_zoo
# clone forked branch
RUN git clone https://github.com/Ukhupacha/open_model_zoo.git
# build Inference Engine Samples
RUN $INSTALL_DIR/deployment_tools/inference_engine/samples/build_samples.sh
# build demo samples
RUN $INSTALL_DIR/deployment_tools/inference_engine/demos/build_demos.sh
CMD ["/bin/bash"]
File mode changed from 100644 to 100755
File mode changed from 100644 to 100755
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment