diff --git a/.ci_local_test/Jenkinsfile b/.ci_local_test/Jenkinsfile
index 8dcb84ac..2264f86e 100644
--- a/.ci_local_test/Jenkinsfile
+++ b/.ci_local_test/Jenkinsfile
@@ -3,28 +3,62 @@ pipeline {
environment {
// Test_Server is the local test machine.
Test_Server = "robotics-testNUC11"
- WORKSPACE_PATH = "/home/intel/ros2_openvino_toolkit"
+ Test_WORKSPACE = "/home/intel/ros2_openvino_toolkit_test"
}
stages {
- stage('Test Ros2 Galatic') {
+ stage('Check The Conflict') {
steps {
script {
- def flag = sh script: "ssh intel@$Test_Server 'cd $WORKSPACE_PATH && docker images | grep ros2_openvino_test'", returnStatus: true
- if (flag == 0) {
- docker rmi -f ros2_openvino_test
- }
- def test_result = sh script: "ssh intel@$Test_Server 'cd $WORKSPACE_PATH && ./self_host_test_ros2_openvino.sh '", returnStatus: true
+ sh script: "ssh intel@$Test_Server 'cd $Test_WORKSPACE && ./check_conflict.sh'", returnStatus: true
+ echo "no conflict, the task continue"
+ }
+ }
+ }
+ stage('Get The env') {
+ steps {
+ script {
+ // rm the old env
+ sh script: "ssh intel@$Test_Server 'rm -rf $Test_WORKSPACE/env'", returnStatus: true
+ // get new env
+ sh script: "export | tee -a env", returnStatus: true
+ sh script: "scp -r env intel@$Test_Server:$Test_WORKSPACE", returnStatus: true
+ }
+ }
+ }
+ stage('Moving The Code To Test Machine') {
+ steps {
+ script {
+ sh script: "ssh intel@$Test_Server 'rm -rf $Test_WORKSPACE/ros2_openvino_toolkit'", returnStatus: true
+ sh script: "scp -r $WORKSPACE intel@$Test_Server:$Test_WORKSPACE/ros2_openvino_toolkit", returnStatus: true
+ // sh script: "ssh intel@$Test_Server 'docker cp $Test_WORKSPACE/ros2_openvino_toolkit:/root/catkin_ws/src'", returnStatus: true
+ }
+ }
+ }
+ stage('Klocwork Code check') {
+ steps {
+ script {
+ echo 'klocwork code check'
+ sh script: "sudo docker cp $WORKSPACE klocwork_test:/home/intel/catkin_ws/src/ros2_openvino_toolkit", returnStatus: true
+ sh script: "sudo docker exec -i klocwork_test bash -c 'source ~/.bashrc && cd catkin_ws && ./klocwork_scan.sh'", returnStatus: true
+ }
+
+ }
+ }
+ stage('The Ros2_openvino container run') {
+ steps {
+ script {
+ def test_result = sh script: "ssh intel@$Test_Server 'cd $Test_WORKSPACE && ./self_container_ros2_openvino_test.sh '", returnStatus: true
if (test_result == 0) {
echo "test pass"
} else {
echo "test fail"
exit -1
}
-
+
}
+
}
}
-
}
}
diff --git a/.ci_local_test/ros2_openvino_toolkit_test/docker_run.sh b/.ci_local_test/ros2_openvino_toolkit_test/docker_run.sh
new file mode 100755
index 00000000..91a19139
--- /dev/null
+++ b/.ci_local_test/ros2_openvino_toolkit_test/docker_run.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+export DISPLAY=:0
+
+export work_dir=$PWD
+
+
+function run_container() {
+
+ docker images | grep ros2_openvino_docker
+
+ if [ $? -eq 0 ]
+ then
+ echo "the image of ros2_openvino_docker:01 existence"
+ docker rmi -f ros2_openvino_docker:01
+ fi
+
+ docker ps -a | grep ros2_openvino_container
+ if [ $? -eq 0 ]
+ then
+ docker rm -f ros2_openvino_container
+ fi
+
+ # Removing some docker image ..
+ # Using jenkins server ros2_openvino_toolkit code instead of git clone code.
+ cd $work_dir && sed -i '/RUN git clone -b ros2/d' Dockerfile
+ # add the jpg for test.
+ cd $work_dir && sed -i '$i COPY jpg /root/jpg' Dockerfile
+
+ cd $work_dir && docker build --build-arg ROS_PRE_INSTALLED_PKG=galactic-desktop --build-arg VERSION=galactic -t ros2_openvino_docker:01 .
+ cd $work_dir && docker images
+ docker run -i --privileged=true --device=/dev/dri -v $work_dir/ros2_openvino_toolkit:/root/catkin_ws/src/ros2_openvino_toolkit -v $HOME/.Xauthority:/root/.Xauthority -e GDK_SCALE -v $work_dir/test_cases:/root/test_cases --name ros2_openvino_container ros2_openvino_docker:01 bash -c "cd /root/test_cases && ./run.sh galactic"
+
+}
+
+run_container
+if [ $? -ne 0 ]
+then
+ echo "Test fail"
+ exit -1
+fi
+
+
diff --git a/.ci_local_test/ros2_openvino_toolkit_test/jpg/car.jpg b/.ci_local_test/ros2_openvino_toolkit_test/jpg/car.jpg
new file mode 100644
index 00000000..f53b0339
Binary files /dev/null and b/.ci_local_test/ros2_openvino_toolkit_test/jpg/car.jpg differ
diff --git a/.ci_local_test/ros2_openvino_toolkit_test/test_cases/config.sh b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/config.sh
new file mode 100755
index 00000000..0efee6ce
--- /dev/null
+++ b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/config.sh
@@ -0,0 +1,13 @@
+#/bin/bash
+
+if [[ $1 == '' ]]
+then
+ export ros2_branch=galactic
+else
+ export ros2_branch=$1
+fi
+
+export dynamic_vino_sample=/root/catkin_ws/install/openvino_node/share/openvino_node
+
+
+source /opt/ros/$ros2_branch/setup.bash
diff --git a/.ci_local_test/ros2_openvino_toolkit_test/test_cases/ros2_openvino_tool_model_download.sh b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/ros2_openvino_tool_model_download.sh
new file mode 100755
index 00000000..e2678f36
--- /dev/null
+++ b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/ros2_openvino_tool_model_download.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+mkdir -p /opt/openvino_toolkit/models
+#apt install -y python-pip
+apt install -y python3.8-venv
+cd ~ && python3 -m venv openvino_env && source openvino_env/bin/activate
+python -m pip install --upgrade pip
+pip install openvino-dev[tensorflow2,onnx]==2022.3
+
+
+#Download the optimized Intermediate Representation (IR) of model (execute once)
+cd ~/catkin_ws/src/ros2_openvino_toolkit/data/model_list && omz_downloader --list download_model.lst -o /opt/openvino_toolkit/models/
+
+cd ~/catkin_ws/src/ros2_openvino_toolkit/data/model_list && omz_converter --list convert_model.lst -d /opt/openvino_toolkit/models/ -o /opt/openvino_toolkit/models/convert
+
+
+#Copy label files (execute once)
+cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP32/
+cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/
+cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/emotions-recognition/FP32/emotions-recognition-retail-0003.labels /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/
+cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/intel/semantic-segmentation-adas-0001/FP32/
+cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/intel/semantic-segmentation-adas-0001/FP16/
+cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels /opt/openvino_toolkit/models/intel/vehicle-license-plate-detection-barrier-0106/FP32
+
+mkdir -p /opt/openvino_toolkit/models/public/mask_rcnn_inception_resnet_v2_atrous_coco/FP16/
+cp /opt/openvino_toolkit/models/convert/public/mask_rcnn_inception_resnet_v2_atrous_coco/FP16/* /opt/openvino_toolkit/models/public/mask_rcnn_inception_resnet_v2_atrous_coco/FP16/
+
+cd /root/test_cases/ && ./yolov5_model_download.sh
+cd /root/test_cases/ && ./yolov8_model_download.sh
+
diff --git a/.ci_local_test/ros2_openvino_toolkit_test/test_cases/run.sh b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/run.sh
new file mode 100755
index 00000000..d13dd828
--- /dev/null
+++ b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/run.sh
@@ -0,0 +1,36 @@
+#!/bin/bash
+
+export ros2_branch=$1
+if [[ $1 == '' ]]
+then
+ export ros2_branch=galactic
+else
+ export ros2_branch=$1
+fi
+source /root/test_cases/config.sh $ros2_branch
+
+cd /root/catkin_ws && colcon build --symlink-install
+cd /root/catkin_ws && source ./install/local_setup.bash
+
+apt-get update
+# apt-get install -y ros-$ros2_branch-diagnostic-updater
+apt-get install python3-defusedxml
+apt-get install -y python3-pip
+pip3 install XTestRunner==1.5.0
+
+cd /root/test_cases && ./ros2_openvino_tool_model_download.sh
+mkdir -p /root/test_cases/log
+echo "===cat pipeline_people_ci.yaml"
+cat /root/catkin_ws/install/openvino_node/share/openvino_node/param/pipeline_people_ci.yaml
+
+cd /root/test_cases/unittest && python3 run_all.py
+result=$?
+#echo "cat segmentation maskrcnn"
+#cat /root/test_cases/log/pipeline_segmentation_maskrcnn_test_ci.log
+
+echo "Test ENV:" && df -h && free -g
+if [ $result -ne 0 ]
+then
+ exit -1
+fi
+
diff --git a/.ci_local_test/ros2_openvino_toolkit_test/test_cases/unittest/run_all.py b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/unittest/run_all.py
new file mode 100755
index 00000000..09e05d3f
--- /dev/null
+++ b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/unittest/run_all.py
@@ -0,0 +1,47 @@
+#!/usr/opt/python3
+import unittest
+from test_cases import Test_Cases
+from XTestRunner import HTMLTestRunner
+
+def main():
+
+ suite = unittest.TestSuite()
+
+ all_cases = [Test_Cases('test_1_pipeline_people_ci'),
+ Test_Cases('test_2_pipeline_reidentification_ci'),
+ Test_Cases('test_3_pipeline_image_ci'),
+ Test_Cases('test_4_pipeline_segmentation_ci'),
+ Test_Cases('test_5_pipeline_vehicle_detection_ci'),
+ Test_Cases('test_6_pipeline_person_attributes_ci'),
+ Test_Cases('test_7_pipeline_segmentation_image_ci'),
+ Test_Cases('test_8_pipeline_object_yolov5_ci'),
+ Test_Cases('test_9_pipeline_object_yolov8_ci')]
+ #Test_Cases('test_10_pipeline_segmentation_instance_ci')]
+ suite.addTests(all_cases)
+
+ with (open('./result.html', 'wb')) as fp:
+ runner = HTMLTestRunner(
+ stream=fp,
+ title='ROS2 Openvino Test Report',
+ description='Test ROS2-galactic openvino all cases',
+ language='en',
+ )
+ result = runner.run(
+ testlist=suite,
+ rerun=1,
+ save_last_run=False
+ )
+
+ failure_count = len(all_cases) - result.success_count
+ print(f"all count: {len(all_cases)}")
+ print(f"success count: {result.success_count}")
+ print(f"failure count: {failure_count}")
+ if result.success_count == len(all_cases) and failure_count == 0:
+ print(f"Test ALL PASS")
+ else:
+ print(f"Test FAIL")
+ exit(-1)
+
+if __name__=="__main__":
+ main()
+
diff --git a/.ci_local_test/ros2_openvino_toolkit_test/test_cases/unittest/test_cases.py b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/unittest/test_cases.py
new file mode 100755
index 00000000..a9bbb34b
--- /dev/null
+++ b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/unittest/test_cases.py
@@ -0,0 +1,121 @@
+#from asyncio import sleep
+from time import sleep
+import unittest
+import subprocess
+import pdb
+import os
+
+class Test_Cases(unittest.TestCase):
+
+ def test_pipeline(self, launch_file, log_file, topic_list=['/rosout']):
+ print(f"{log_file} topic_list", topic_list)
+ subprocess.Popen([f"ros2 launch openvino_node {launch_file} > {log_file} &"], shell=True)
+ for topic in topic_list:
+ name=topic.split('/', -1)[-1]
+ sleep(3)
+ print(f"{topic} {name}.log")
+ subprocess.Popen([f"ros2 topic echo {topic} > {name}.log &"], shell=True)
+ if name == "segmented_obejcts":
+ subprocess.Popen([f"ros2 topic echo {topic} >> {name}.log &"], shell=True)
+ kill_ros2_process()
+ print(f"kill the test process done")
+ with open(f"{log_file}") as handle:
+ log = handle.read()
+ check_log = log.split("user interrupted with ctrl-c (SIGINT)")[0]
+ self.assertIn('One Pipeline Created!', check_log)
+ self.assertNotIn('ERROR', check_log)
+ for topic in topic_list:
+ name = topic.split('/', -1)[-1]
+ with open(f"{name}.log") as topic_handle:
+ topic_info = topic_handle.read()
+ if "header" not in topic_info:
+ print(f"the {launch_file} topic {name} failed")
+ else:
+ print(f"the {launch_file} topic {name} pass")
+ self.assertIn("header", topic_info)
+ print(f"check all done")
+
+
+ def test_1_pipeline_people_ci(self):
+ topic_ls = ["/ros2_openvino_toolkit/age_genders_Recognition", \
+ "/ros2_openvino_toolkit/headposes_estimation", \
+ "/ros2_openvino_toolkit/face_detection", \
+ "/ros2_openvino_toolkit/emotions_recognition"]
+ launch_file = f"pipeline_people_ci_test.py"
+ log_file = f"/root/test_cases/log/pipeline_people_test_ci.log"
+ self.test_pipeline(launch_file, log_file, topic_list=topic_ls)
+
+ def test_2_pipeline_reidentification_ci(self):
+ topic_ls = ["/ros2_openvino_toolkit/reidentified_persons",]
+ launch_file = f"pipeline_reidentification_ci_test.py"
+ log_file = f"/root/test_cases/log/pipeline_reidentification_test_ci.log"
+ self.test_pipeline(launch_file, log_file, topic_list=topic_ls)
+
+ def test_3_pipeline_image_ci(self):
+ topic_ls = ["/ros2_openvino_toolkit/emotions_recognition", \
+ "/ros2_openvino_toolkit/headposes_estimation", \
+ "/ros2_openvino_toolkit/people/age_genders_Recognition"]
+ launch_file = f"pipeline_image_ci_test.py"
+ log_file = f"/root/test_cases/log/pipeline_image_test_ci.log"
+ self.test_pipeline(launch_file, log_file, topic_list=topic_ls)
+
+ def test_4_pipeline_segmentation_ci(self):
+ topic_ls = ["/ros2_openvino_toolkit/segmented_obejcts"]
+ launch_file = f"pipeline_segmentation_ci_test.py"
+ log_file = f"/root/test_cases/log/pipeline_segmentation_test_ci.log"
+ self.test_pipeline(launch_file, log_file, topic_list=topic_ls)
+
+ def test_5_pipeline_vehicle_detection_ci(self):
+ topic_ls = ["/ros2_openvino_toolkit/detected_license_plates",
+ "/ros2_openvino_toolkit/detected_vehicles_attribs"]
+ launch_file = f"pipeline_vehicle_detection_ci_test.py"
+ log_file = f"/root/test_cases/log/pipeline_vehicle_detection_test_ci.log"
+ self.test_pipeline(launch_file, log_file, topic_list=topic_ls)
+
+ def test_6_pipeline_person_attributes_ci(self):
+ topic_ls = ["/ros2_openvino_toolkit/detected_objects", \
+ "/ros2_openvino_toolkit/person_attributes"]
+ launch_file = f"pipeline_person_attributes_ci_test.py"
+ log_file = f"/root/test_cases/log/pipeline_person_attributes_test_ci.log"
+ self.test_pipeline(launch_file, log_file, topic_list=topic_ls)
+
+ def test_7_pipeline_segmentation_image_ci(self):
+ topic_ls = ["/ros2_openvino_toolkit/segmented_obejcts"]
+ launch_file = f"pipeline_segmentation_image_ci_test.py"
+ log_file = f"/root/test_cases/log/pipeline_segmentation_image_test_ci.log"
+ self.test_pipeline(launch_file, log_file, topic_list=topic_ls)
+
+ def test_8_pipeline_object_yolov5_ci(self):
+ topic_ls = ["/ros2_openvino_toolkit/detected_objects"]
+ launch_file = f"pipeline_object_yolov5_ci_test.py"
+ log_file = f"/root/test_cases/log/pipeline_object_yolov5_test_ci.log"
+ self.test_pipeline(launch_file, log_file, topic_list=topic_ls)
+
+ def test_9_pipeline_object_yolov8_ci(self):
+ topic_ls = ["/ros2_openvino_toolkit/detected_objects"]
+ launch_file = f"pipeline_object_yolov8_ci_test.py"
+ log_file = f"/root/test_cases/log/pipeline_object_yolov8_test_ci.log"
+ self.test_pipeline(launch_file, log_file, topic_list=topic_ls)
+
+ def test_10_pipeline_segmentation_instance_ci(self):
+ topic_ls = ["/ros2_openvino_toolkit/segmented_obejcts"]
+ launch_file = f"pipeline_segmentation_instance_ci_test.py"
+ log_file = f"/root/test_cases/log/pipeline_segmentation_instance.log"
+ self.test_pipeline(launch_file, log_file, topic_list=topic_ls)
+
+
+ @unittest.skip("skip case")
+ def test_9_pipeline_segmentation_maskrcnn_ci(self):
+ topic_ls = ["/ros2_openvino_toolkit/segmented_obejcts"]
+ launch_file = f"pipeline_segmentation_maskrcnn_ci_test.py"
+ log_file = f"/root/test_cases/log/pipeline_segmentation_maskrcnn_test_ci.log"
+ self.test_pipeline(launch_file, log_file, topic_list=topic_ls)
+
+
+def kill_ros2_process(sleep_z=30):
+ sleep(sleep_z)
+ process_result = subprocess.Popen(["ps -ef | grep ros2 | grep -v 'grep' | awk '{print $2}'"],stdout=subprocess.PIPE, shell=True).communicate()
+ print(process_result[0].decode('utf-8').replace('\n', ' '))
+ kill_process = 'kill -9 ' + process_result[0].decode('utf-8').replace('\n', ' ')
+ subprocess.Popen([kill_process], shell=True).communicate()
+
diff --git a/.ci_local_test/ros2_openvino_toolkit_test/test_cases/yolov5_model_download.sh b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/yolov5_model_download.sh
new file mode 100755
index 00000000..f3e50d3b
--- /dev/null
+++ b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/yolov5_model_download.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+
+#1. Copy YOLOv5 Repository from GitHub
+cd /root && git clone https://github.com/ultralytics/yolov5.git
+
+#Set Environment for Installing YOLOv5
+
+cd yolov5
+python3 -m venv yolo_env # Create a virtual python environment
+source yolo_env/bin/activate # Activate environment
+pip install -r requirements.txt # Install yolov5 prerequisites
+pip install wheel
+pip install onnx
+
+# Download PyTorch Weights
+mkdir -p /root/yolov5/model_convert && cd /root/yolov5/model_convert
+wget https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt
+
+cd /root/yolov5
+python3 export.py --weights model_convert/yolov5n.pt --include onnx
+
+
+#2. Convert ONNX files to IR files
+cd /root/yolov5/
+python3 -m venv ov_env # Create openVINO virtual environment
+source ov_env/bin/activate # Activate environment
+python -m pip install --upgrade pip # Upgrade pip
+pip install openvino[onnx]==2022.3.0 # Install OpenVINO for ONNX
+pip install openvino-dev[onnx]==2022.3.0 # Install OpenVINO Dev Tool for ONNX
+
+
+cd /root/yolov5/model_convert
+mo --input_model yolov5n.onnx
+
+
+mkdir -p /opt/openvino_toolkit/models/convert/public/yolov5n/FP32/
+sudo cp yolov5n.bin yolov5n.mapping yolov5n.xml /opt/openvino_toolkit/models/convert/public/yolov5n/FP32/
+
diff --git a/.ci_local_test/ros2_openvino_toolkit_test/test_cases/yolov8_model_download.sh b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/yolov8_model_download.sh
new file mode 100755
index 00000000..a3879291
--- /dev/null
+++ b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/yolov8_model_download.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+#Pip install the ultralytics package including all requirements in a Python>=3.7 environment with PyTorch>=1.7.
+
+mkdir -p yolov8 && cd yolov8
+pip install ultralytics
+apt install python3.8-venv
+python3 -m venv openvino_env
+source openvino_env/bin/activate
+
+
+#Export a YOLOv8n model to a different format like ONNX, CoreML, etc.
+# export official model
+yolo export model=yolov8n.pt format=openvino
+yolo export model=yolov8n-seg.pt format=openvino
+
+
+# Move to the Recommended Model Path
+mkdir -p /opt/openvino_toolkit/models/convert/public/FP32/yolov8n
+mkdir -p /opt/openvino_toolkit/models/convert/public/FP32/yolov8n-seg
+
+cp yolov8n_openvino_model/* /opt/openvino_toolkit/models/convert/public/FP32/yolov8n
+cp yolov8n-seg_openvino_model/* /opt/openvino_toolkit/models/convert/public/FP32/yolov8n-seg
+
diff --git a/.clang-format b/.clang-format
new file mode 100644
index 00000000..aa8a2aff
--- /dev/null
+++ b/.clang-format
@@ -0,0 +1,65 @@
+---
+BasedOnStyle: Google
+AccessModifierOffset: -2
+ConstructorInitializerIndentWidth: 2
+AlignEscapedNewlinesLeft: false
+AlignTrailingComments: true
+AllowAllParametersOfDeclarationOnNextLine: false
+AllowShortIfStatementsOnASingleLine: false
+AllowShortLoopsOnASingleLine: false
+AllowShortFunctionsOnASingleLine: None
+AlwaysBreakTemplateDeclarations: true
+AlwaysBreakBeforeMultilineStrings: true
+BreakBeforeBinaryOperators: false
+BreakBeforeTernaryOperators: false
+BreakConstructorInitializersBeforeComma: true
+BinPackParameters: true
+ColumnLimit: 120
+ConstructorInitializerAllOnOneLineOrOnePerLine: true
+DerivePointerBinding: false
+PointerBindsToType: true
+ExperimentalAutoDetectBinPacking: false
+IndentCaseLabels: true
+MaxEmptyLinesToKeep: 1
+NamespaceIndentation: None
+ObjCSpaceBeforeProtocolList: true
+PenaltyBreakBeforeFirstCallParameter: 19
+PenaltyBreakComment: 60
+PenaltyBreakString: 1
+PenaltyBreakFirstLessLess: 1000
+PenaltyExcessCharacter: 1000
+PenaltyReturnTypeOnItsOwnLine: 90
+SpacesBeforeTrailingComments: 2
+Cpp11BracedListStyle: false
+Standard: Auto
+IndentWidth: 2
+TabWidth: 2
+UseTab: Never
+IndentFunctionDeclarationAfterType: false
+SpacesInParentheses: false
+SpacesInAngles: false
+SpaceInEmptyParentheses: false
+SpacesInCStyleCastParentheses: false
+SpaceAfterControlStatementKeyword: true
+SpaceBeforeAssignmentOperators: true
+ContinuationIndentWidth: 4
+SortIncludes: false
+SpaceAfterCStyleCast: false
+
+# Configure each individual brace in BraceWrapping
+BreakBeforeBraces: Custom
+
+# Control of individual brace wrapping cases
+BraceWrapping: {
+ AfterClass: 'true'
+ AfterControlStatement: 'false'
+ AfterEnum : 'true'
+ AfterFunction : 'true'
+ AfterNamespace : 'true'
+ AfterStruct : 'true'
+ AfterUnion : 'true'
+ BeforeCatch : 'false'
+ BeforeElse : 'false'
+ IndentBraces : 'false'
+}
+...
diff --git a/.github/workflows/basic_func_tests.yml b/.github/workflows/basic_func_tests.yml
new file mode 100644
index 00000000..3973960c
--- /dev/null
+++ b/.github/workflows/basic_func_tests.yml
@@ -0,0 +1,52 @@
+# This is a basic workflow to help you get started with Actions
+
+name: Basic_Func_CI
+
+# Controls when the workflow will run
+on:
+ # Triggers the workflow on push or pull request events but only for the "master" branch
+ push:
+ branches: [ "master", "ros2" ]
+ pull_request:
+ branches: [ "master", "ros2" ]
+
+ # Allows you to run this workflow manually from the Actions tab
+ workflow_dispatch:
+
+# Set default top-level permissions, no write permission is granted at top-level.
+permissions: read-all
+
+# A workflow run is made up of one or more jobs that can run sequentially or in parallel
+jobs:
+ # Removed the old artifacts
+ remove-old-artifacts:
+ runs-on: ubuntu-20.04
+ timeout-minutes: 10
+ steps:
+ - name: Remove old artifacts
+ uses: c-hive/gha-remove-artifacts@v1
+ with:
+ age: '15 days'
+ # This workflow contains a single job called "build"
+ build:
+ # The type of runner that the job will run on
+ runs-on: ubuntu-20.04
+ # Steps represent a sequence of tasks that will be executed as part of the job
+ steps:
+ # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
+ - uses: actions/checkout@v3
+ # Runs a set of commands using the runners shell
+ - name: ros2_openvino_toolkit_test
+ run: |
+ df -h
+ sudo docker rmi $(docker image ls -aq) || true
+ sudo swapoff /swapfile || true
+ sudo rm -rf /usr/share/dotnet /usr/local/lib/android /opt/ghc || true
+ mkdir -p ../workspace
+ cp -r ${GITHUB_WORKSPACE}/.ci_local_test/ros2_openvino_toolkit_test ../workspace
+ cp -r ${GITHUB_WORKSPACE} ../workspace/ros2_openvino_toolkit_test
+ ls ${GITHUB_WORKSPACE}/docker/Dockerfile
+ cp ${GITHUB_WORKSPACE}/docker/Dockerfile ../workspace/ros2_openvino_toolkit_test
+ ls ../workspace/ros2_openvino_toolkit_test/Dockerfile
+ cd ../workspace/ros2_openvino_toolkit_test && ./docker_run.sh
+
diff --git a/.github/workflows/code_format.yml b/.github/workflows/code_format.yml
new file mode 100644
index 00000000..b0fb096d
--- /dev/null
+++ b/.github/workflows/code_format.yml
@@ -0,0 +1,43 @@
+
+name: Code_Format_Check
+
+# Controls when the workflow will run
+on:
+ # Triggers the workflow on push or pull request events but only for the "master" branch
+ push:
+ branches: [ "master", "ros2" ]
+ pull_request:
+ branches: [ "master", "ros2" ]
+
+ # Allows you to run this workflow manually from the Actions tab
+ workflow_dispatch:
+
+# Set default top-level permissions, no write permission is granted at top-level.
+permissions: read-all
+
+# A workflow run is made up of one or more jobs that can run sequentially or in parallel
+jobs:
+ # Removed the old artifacts
+ remove-old-artifacts:
+ runs-on: ubuntu-22.04
+ timeout-minutes: 10
+ steps:
+ - name: Remove old artifacts
+ uses: c-hive/gha-remove-artifacts@v1
+ with:
+ age: '15 days'
+ pre-commit:
+ # The type of runner that the job will run on
+ runs-on: ubuntu-22.04
+ # Steps represent a sequence of tasks that will be executed as part of the job
+ steps:
+ # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
+ - uses: actions/checkout@v3
+ # Runs a set of commands using the runners shell
+ - name: code_format_check
+ run: |
+ sudo apt-get install clang-format -y
+ find . -name '*.h' -or -name '*.hpp' -or -name '*.cpp' | xargs clang-format -i -style=file
+ git diff --exit-code
+
+
diff --git a/.github/workflows/dev-ov_2020-3.yml b/.github/workflows/dev-ov_2020-3.yml
index b6c82595..6f946fda 100644
--- a/.github/workflows/dev-ov_2020-3.yml
+++ b/.github/workflows/dev-ov_2020-3.yml
@@ -8,6 +8,9 @@ on:
pull_request:
branches: [ dev-ov.2020.3 ]
+# Set default top-level permissions, no write permission is granted at top-level.
+permissions: read-all
+
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:
# This workflow contains a single job called "build"
diff --git a/.github/workflows/dev-ov_2021-3.yml b/.github/workflows/dev-ov_2021-3.yml
index b7a708fc..145ad66d 100644
--- a/.github/workflows/dev-ov_2021-3.yml
+++ b/.github/workflows/dev-ov_2021-3.yml
@@ -8,6 +8,9 @@ on:
pull_request:
branches: [ dev-ov.2021.3 ]
+# Set default top-level permissions, no write permission is granted at top-level.
+permissions: read-all
+
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:
# This workflow contains a single job called "build"
diff --git a/README.md b/README.md
index 26f481e0..869fa51c 100644
--- a/README.md
+++ b/README.md
@@ -1,49 +1,280 @@
# ros2_openvino_toolkit
-ROS2 Version supported:
+# Table of Contents
+* [➤ Overview](#overview)
+ * [ROS2 Version Supported](#ros2-version-supported)
+ * [Inference Features Supported](#inference-features-supported)
+* [➤ Prerequisite](#prerequisite)
+* [➤ Introduction](#introduction)
+ * [Design Architecture](#design-architecture)
+ * [Logic Flow](#logic-flow)
+* [➤ Supported Features](#supported-features)
+ * [Multiple Input Components](#multiple-input-components)
+ * [Inference Implementations](#inference-implementations)
+ * [ROS Interfaces and Outputs](#ros-interfaces-and-outputs)
+ * [Demo Result Snapshots](#demo-result-snapshots)
+* [➤ Installation & Launching](#installation-and-launching)
+ * [Deploy in Local Environment](#deploy-in-local-environment)
+ * [Deploy in Docker](#deploy-in-docker)
+* [➤ Reference](#reference)
+* [➤ FAQ](#faq)
+* [➤ Feedback](#feedback)
+* [➤ More Information](#more-information)
-* [x] ROS2 Dashing
-* [x] ROS2 Eloquent
-* [x] ROS2 Foxy
-* [x] ROS2 Galactic
+# Overview
+## ROS2 Version Supported
-Inference Features supported:
+|Branch Name|ROS2 Version Supported|Openvino Version|OS Version|
+|-----------------------|-----------------------|--------------------------------|----------------------|
+|[ros2](https://github.com/intel/ros2_openvino_toolkit/tree/ros2)|Galactic, Foxy, Humble|V2022.1, V2022.2, V2022.3|Ubuntu 20.04, Ubuntu 22.04|
+|[dashing](https://github.com/intel/ros2_openvino_toolkit/tree/dashing)|Dashing|V2022.1, V2022.2, V2022.3|Ubuntu 18.04|
+|[foxy-ov2021.4](https://github.com/intel/ros2_openvino_toolkit/tree/foxy)|Foxy|V2021.4|Ubuntu 20.04|
+|[galactic-ov2021.4](https://github.com/intel/ros2_openvino_toolkit/tree/galactic-ov2021.4)|Galactic|V2021.4|Ubuntu 20.04|
+## Inference Features Supported
* [x] Object Detection
* [x] Face Detection
-* [x] Age-Gender Recognition
+* [x] Age Gender Recognition
* [x] Emotion Recognition
* [x] Head Pose Estimation
-* [x] Object Segmentation
+* [x] Object Segmentation (Semantic & Instance)
* [x] Person Re-Identification
* [x] Vehicle Attribute Detection
* [x] Vehicle License Plate Detection
-## Introduction
+# Prerequisite
-The OpenVINO™ (Open visual inference and neural network optimization) toolkit provides a ROS-adaptered runtime framework of neural network which quickly deploys applications and solutions for vision inference. By leveraging Intel® OpenVINO™ toolkit and corresponding libraries, this ROS2 runtime framework extends workloads across Intel® hardware (including accelerators) and maximizes performance.
+|Prerequisite|Mandatory?|Description|
+|-----------------------|-----------------------|--------------------------------|
+|**Processor**|Mandatory|A platform with Intel processors assembled. (Refer to [here](https://software.intel.com/content/www/us/en/develop/articles/openvino-2020-3-lts-relnotes.html) for the full list of Intel processors supported.)|
+|**OS**|Mandatory|We only tested this project under Ubuntu distros. It is recommended to install the corresponding Ubuntu Distro according to the ROS distro that you select to use. **For example: Ubuntu 18.04 for dashing, Ubuntu 20.04 for Foxy and Galactic, Ubuntu 22.04 for Humble.**|
+|**ROS2**|Mandatory|We have already supported active ROS distros (Humble, Galactic, Foxy and Dashing (deprecated)). Choose the one matching your needs. You may find the corresponding branch from the table above in section [**ROS2 Version Supported**](#ros2-version-supported).|
+|**OpenVINO**|Mandatory|The version of OpenVINO toolkit is decided by the OS and ROS2 distros you use. See the table above in Section [**ROS2 Version Supported**](#ros2-version-supported).|
+|**Realsense Camera**|Optional|Realsense Camera is optional, you may choose these alternatives as the input: Standard Camera, ROS Image Topic, Video/Image File or RTSP camera.|
+# Introduction
+## Design Architecture
+
Architecture Design
+From the view of hirarchical architecture design, the package is divided into different functional components, as shown in below picture.
+
+
+
+
+
+Intel® OpenVINO™ toolkit
+
+- **Intel® OpenVINO™ toolkit** provides a ROS-adapted runtime framework of neural network which quickly deploys applications and solutions for vision inference. By leveraging Intel® OpenVINO™ toolkit and corresponding libraries, this ROS2 runtime framework extends workloads across Intel® hardware (including accelerators) and maximizes performance.
+ - Increase deep learning workload performance up to 19x1 with computer vision accelerators from Intel.
+ - Unleash convolutional neural network (CNN)-based deep learning inference using a common API.
+ - Speed development using optimized OpenCV* and OpenVX* functions.
See more from [here](https://github.com/openvinotoolkit/openvino) for Intel OpenVINO™ introduction.
+
+
+
+
+
+ROS OpenVINO Runtime Framework
+
+- **ROS OpenVINO Runtime Framework** is the main body of this repo. It provides key logic implementation for pipeline lifecycle management, resource management and ROS system adapter, which extends Intel OpenVINO toolkit and libraries. Furthermore, this runtime framework provides ways to simplify launching, configuration, data analysis and re-use.
+
+
+
+
+
+ROS Input & Output
+
+- **Diversal Input resources** are data resources to be infered and analyzed with the OpenVINO framework.
+- **ROS interfaces and outputs** currently include _Topic_ and _service_. Natively, RViz output and CV image window output are also supported by refactoring topic message and inferrence results.
+
+
+
+
+
+Optimized Models
+
+- **Optimized Models** provided by Model Optimizer component of Intel® OpenVINO™ toolkit. Imports trained models from various frameworks (Caffe*, Tensorflow*, MxNet*, ONNX*, Kaldi*) and converts them to a unified intermediate representation file. It also optimizes topologies through node merging, horizontal fusion, eliminating batch normalization, and quantization. It also supports graph freeze and graph summarize along with dynamic input freezing.
+
+
+
+
+## Logic Flow
+ Logic Flow
+From the view of logic implementation, the package introduces the definitions of parameter manager, pipeline and pipeline manager. The following picture depicts how these entities co-work together when the corresponding program is launched.
+
+
+
+Once a corresponding program is launched with a specified .yaml config file passed in the .launch file or via commandline, _**parameter manager**_ analyzes the configurations about pipeline and the whole framework, then shares the parsed configuration information with pipeline procedure. A _**pipeline instance**_ is created by following the configuration info and is added into _**pipeline manager**_ for lifecycle control and inference action triggering.
+
+The contents in **.yaml config file** should be well structured and follow the supported rules and entity names. Please see [yaml configuration guidance](./doc/quick_start/yaml_configuration_guide.md) for how to create or edit the config files.
+
+
+
+Pipeline
+
+**Pipeline** fulfills the whole data handling process: initiliazing Input Component for image data gathering and formating; building up the structured inference network and passing the formatted data through the inference network; transfering the inference results and handling output, etc.
+
+
+
+
+
+Pipeline manager
+
+**Pipeline manager** manages all the created pipelines according to the inference requests or external demands (say, system exception, resource limitation, or end user's operation). Because of co-working with resource management and being aware of the whole framework, it covers the ability of performance optimization by sharing system resource between pipelines and reducing the burden of data copy.
+
+
+
+
+# Supported Features
+## Multiple Input Components
+Currently, the package supports several input resources for acquiring image data. The following tables are listed:
+
+
+
+Input Resource Table
+
+|Input Resource|Description|
+|--------------------|------------------------------------------------------------------|
+|StandardCamera|Any RGB camera with USB port supporting. Currently only the first USB camera if many are connected.|
+|RealSenseCamera| Intel RealSense RGB-D Camera, directly calling RealSense Camera via librealsense plugin of openCV.|
+|ImageTopic| Any ROS topic which is structured in image message.|
+|Image| Any image file which can be parsed by openCV, such as .png, .jpeg.|
+|Video| Any video file which can be parsed by openCV.|
+|IpCamera| Any RTSP server which can push video stream.|
+
+
-## Prerequisite
+## Inference Implementations
+Currently, the corresponding relation of supported inference features, models used and yaml configurations are listed as follows:
-* Processor: A platform with Intel processors assembled. (see [here](https://software.intel.com/content/www/us/en/develop/articles/openvino-2021-4-lts-relnotes.html) for the full list of Intel processors supported.)
-* OS: Ubuntu 20.04
-* ROS2: Galactic Geochelone
-* OpenVINO: V2021.4, see [the release notes](https://software.intel.com/content/www/us/en/develop/articles/openvino-relnotes.html) for more info.
-* [Optional] RealSense D400 Series Camera
-* [Optional] Intel NCS2 Stick
-## Tables of contents
-* [Design Architecture and Logic Flow](./doc/tables_of_contents/Design_Architecture_and_logic_flow.md)
-* [Supported Features](./doc/tables_of_contents/supported_features/Supported_features.md)
-* Tutorials
- - [How to configure a inference pipeline?](./doc/tables_of_contents/tutorials/configuration_file_customization.md)
- - [How to create multiple pipelines in a process?](./doc/tables_of_contents/tutorials/Multiple_Pipelines.md)
+
+
+Inference Feature Correspondence Table
-## Installation & Launching
-See Getting Start Pages for [ROS2 Dashing](./doc/getting_started_with_Dashing.md) or [ROS2 Foxy](./doc/getting_started_with_Foxy_Ubuntu20.04.md) or [ROS2 Galactic](./doc/getting_started_with_Galactic_Ubuntu20.04.md) for detailed installation & lauching instructions.
+|Inference|Description|YAML Configuration|Model Used|
+|-----------------------|------------------------------------------------------------------|----------------------|----------------------|
+|Face Detection| Object Detection task applied to face recognition using a sequence of neural networks.|[pipeline_image.yaml](./sample/param/pipeline_image.yaml)
[pipeline_image_video.yaml](./sample/param/pipeline_image_video.yaml)
[pipeline_people.yaml](./sample/param/pipeline_people.yaml)
[pipeline_people_ip.yaml](./sample/param/pipeline_people_ip.yaml)|[face-detection-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/face-detection-adas-0001)
[age-gender-recognition-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/age-gender-recognition-retail-0013)
[emotions-recognition-retail-0003](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/emotions-recognition-retail-0003)
[head-pose-estimation-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/head-pose-estimation-adas-0001)|
+|Emotion Recognition| Emotion recognition based on detected face image.|[pipeline_image.yaml](./sample/param/pipeline_image.yaml)
[pipeline_image_video.yaml](./sample/param/pipeline_image_video.yaml)
[pipeline_people.yaml](./sample/param/pipeline_people.yaml)
[pipeline_people_ip.yaml](./sample/param/pipeline_people_ip.yaml)|[emotions-recognition-retail-0003](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/emotions-recognition-retail-0003)|
+|Age & Gender Recognition| Age and gender recognition based on detected face image.|[pipeline_image.yaml](./sample/param/pipeline_image.yaml)
[pipeline_image_video.yaml](./sample/param/pipeline_image_video.yaml)
[pipeline_people.yaml](./sample/param/pipeline_people.yaml)
[pipeline_people_ip.yaml](./sample/param/pipeline_people_ip.yaml)|[age-gender-recognition-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/age-gender-recognition-retail-0013)|
+|Head Pose Estimation| Head pose estimation based on detected face image.|[pipeline_image.yaml](./sample/param/pipeline_image.yaml)
[pipeline_image_video.yaml](./sample/param/pipeline_image_video.yaml)
[pipeline_people.yaml](./sample/param/pipeline_people.yaml)
[pipeline_people_ip.yaml](./sample/param/pipeline_people_ip.yaml)|[head-pose-estimation-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/head-pose-estimation-adas-0001)|
+|Object Detection| Object detection based on SSD-based trained models.|[pipeline_object.yaml](./sample/param/pipeline_object.yaml)
[pipeline_object_topic.yaml](./sample/param/pipeline_object_topic.yaml)|[mobilenet-ssd](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/public/mobilenet-ssd)
[yolov5](https://github.com/openvinotoolkit/openvino_notebooks/tree/main/notebooks/111-yolov5-quantization-migration)
[yolov7](https://github.com/openvinotoolkit/openvino_notebooks/tree/main/notebooks/226-yolov7-optimization)
[yolov8](https://github.com/openvinotoolkit/openvino_notebooks/tree/main/notebooks/230-yolov8-optimization)|
+|Vehicle and License Detection| Vehicle and license detection based on Intel models.|[pipeline_vehicle_detection.yaml](./sample/param/pipeline_vehicle_detection.yaml)|[vehicle-license-plate-detection-barrier-0106](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/vehicle-license-plate-detection-barrier-0106)
[vehicle-attributes-recognition-barrier-0039](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/vehicle-attributes-recognition-barrier-0039)
[license-plate-recognition-barrier-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/license-plate-recognition-barrier-0001)|
+|Object Segmentation - Semantic| semantic segmentation, assign a class label to each pixel in an image. |[pipeline_segmentation.yaml](./sample/param/pipeline_segmentation.yaml)
[pipeline_segmentation_image.yaml](./sample/param/pipeline_segmentation_image.yaml)
[pipeline_video.yaml](./sample/param/pipeline_video.yaml)|[semantic-segmentation-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/semantic-segmentation-adas-0001)
[deeplabv3](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/public/deeplabv3)|
+| Object Segmentation - Instance | Instance Segmentation, combination of semantic segmentation & object detection. | [pipeline_segmentation_instance.launch.yaml](./sample/param/pipeline_segmentation_instance.yaml) | [yolov8-seg](https://github.com/openvinotoolkit/openvino_notebooks/tree/main/notebooks/230-yolov8-optimization)
[mask_rcnn_inception_v2_coco_2018_01_28](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/public/mask_rcnn_inception_resnet_v2_atrous_coco)|
+|Person Attributes| Person attributes based on object detection.|[pipeline_person_attributes.yaml](./sample/param/pipeline_person_attributes.yaml)|[person-attributes-recognition-crossroad-0230](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/person-attributes-recognition-crossroad-0230)
[person-detection-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/person-detection-retail-0013)|
+|Person Reidentification|Person reidentification based on object detection.|[pipeline_person_reidentification.yaml](./sample/param/pipeline_reidentification.yaml)|[person-detection-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/person-detection-retail-0013)
[person-reidentification-retail-0277](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/person-reidentification-retail-0277)|
+|Object Segmentation Maskrcnn| Object segmentation and detection based on maskrcnn model.[_Deprecated, it is recommended to use `object segementation - instance` for first try._]|[pipeline_segmentation_maskrcnn.yaml](./sample/param/pipeline_segmentation_maskrcnn.yaml)|[mask_rcnn_inception_v2_coco_2018_01_28](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/public/mask_rcnn_inception_resnet_v2_atrous_coco)|
+
+
+
+## ROS interfaces and outputs
+The inference results can be output in several types. One or more types can be enabled for any inference pipeline.
+### Topic
+Specific topic(s) can be generated and published according to the given inference functionalities.
+
+
+
+Published Topic Correspondence Table
+
+|Inference|Published Topic|
+|---|---|
+|People Detection|```/ros2_openvino_toolkit/face_detection```([object_msgs:msg:ObjectsInBoxes](https://github.com/intel/ros2_object_msgs/blob/master/msg/ObjectsInBoxes.msg))|
+|Emotion Recognition|```/ros2_openvino_toolkit/emotions_recognition```([object_msgs:msg:EmotionsStamped](../../../object_msgs/msg/EmotionsStamped.msg))|
+|Age and Gender Recognition|```/ros2_openvino_toolkit/age_genders_Recognition```([object_msgs:msg:AgeGenderStamped](../../../object_msgs/msg/AgeGenderStamped.msg))|
+|Head Pose Estimation|```/ros2_openvino_toolkit/headposes_estimation```([object_msgs:msg:HeadPoseStamped](../../../object_msgs/msg/HeadPoseStamped.msg))|
+|Object Detection|```/ros2_openvino_toolkit/detected_objects```([object_msgs::msg::ObjectsInBoxes](https://github.com/intel/ros2_object_msgs/blob/master/msg/ObjectsInBoxes.msg))|
+|Object Segmentation|```/ros2_openvino_toolkit/segmented_obejcts```([object_msgs::msg::ObjectsInMasks](../../../object_msgs/msg/ObjectsInMasks.msg))|
+|Object Segmentation Maskrcnn|```/ros2_openvino_toolkit/segmented_obejcts```([object_msgs::msg::ObjectsInMasks](../../../object_msgs/msg/ObjectsInMasks.msg))|
+|Person Reidentification|```/ros2_openvino_toolkit/reidentified_persons```([object_msgs::msg::ReidentificationStamped](../../../object_msgs/msg/ReidentificationStamped.msg))|
+|Vehicle Detection|```/ros2_openvino_toolkit/detected_vehicles_attribs```([object_msgs::msg::VehicleAttribsStamped](../../../object_msgs/msg/PersonAttributeStamped.msg))|
+|Vehicle License Detection|```/ros2_openvino_toolkit/detected_license_plates```([object_msgs::msg::LicensePlateStamped](../../../object_msgs/msg/LicensePlateStamped.msg))|
+
+
+
+### Service
+Several ROS2 Services are created, expecting to be used in client/server mode, especially when synchronously getting inference results for a given image frame or when managing inference pipeline's lifecycle.
+
+
+
+Service Correspondence Table
+
+|Inference|Service|
+|---|---|
+|Object Detection Service|```/detect_object```([object_msgs::srv::DetectObject](https://github.com/intel/ros2_object_msgs/blob/master/srv/DetectObject.srv))|
+|Face Detection Service|```/detect_face```([object_msgs::srv::DetectObject](https://github.com/intel/ros2_object_msgs/blob/master/srv/DetectObject.srv))|
+|Age Gender Detection Service|```/detect_age_gender```([object_msgs::srv::AgeGender](./object_msgs/srv/AgeGenderSrv.srv))|
+|Headpose Detection Service|```/detect_head_pose```([object_msgs::srv::HeadPose](./object_msgs/srv/HeadPoseSrv.srv))|
+|Emotion Detection Service|```/detect_emotion```([object_msgs::srv::Emotion](./object_msgs/srv/EmotionSrv.srv))|
+
+
+
+### RViz
+RViz display is also supported by the composited topic of original image frame with inference result.
+To show in RViz tool, add an image marker with the composited topic:
+```/ros2_openvino_toolkit/image_rviz```([sensor_msgs::Image](https://docs.ros.org/en/api/sensor_msgs/html/msg/Image.html))
+
+### Image Window
+OpenCV based image window is natively supported by the package.
+To enable window, Image Window output should be added into the output choices in .yaml config file. Refer to [the config file guidance](./doc/quick_start/yaml_configuration_guide.md) for more information about checking/adding this feature in your launching.
+
+## Demo Result Snapshots
+Demo Snapshots
+For the snapshot of demo results, refer to the following picture.
+
+* Face detection input from standard camera
+
+
+* Object detection input from realsense camera
+
+
+* Object segmentation input from video
+
+
+* Person reidentification input from standard camera
+
+
+
+# Installation and Launching
+## Deploy in Local Environment
+* Refer to the quick start document for [getting_started_with_ros2](./doc/quick_start/getting_started_with_ros2_ov2.0.md) for detailed installation & lauching instructions.
+* Refer to the quick start document for [yaml configuration guidance](./doc/quick_start/yaml_configuration_guide.md) for detailed configuration guidance.
+
+## Deploy in Docker
+* Refer to the docker instruction for [docker_instructions](./docker/docker_instructions_ov2.0.md) for detailed information about building docker image and launching.
+* Refer to the quick start document for [yaml configuration guidance](./doc/quick_start/yaml_configuration_guide.md) for detailed configuration guidance.
+
+# Reference
+* Open_model_zoo: Refer to the OpenVINO document for [open_model_zoo](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3) for detailed model structure and demo samples.
+* OpenVINO api 2.0: Refer to the OpenVINO document for [OpenVINO_api_2.0](https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html) for latest api 2.0 transition guide.
+
+# FAQ
+* How to get the IR file for [yolov5](./doc/quick_start/tutorial_for_yolov5_converted.md) | [yolov7](./doc/quick_start/tutorial_for_yolov7_converted.md) | [yolov8](./doc/quick_start/tutorial_for_yolov8_converted.md) ?
+* [How to build OpenVINO by source?](https://github.com/openvinotoolkit/openvino/wiki#how-to-build)
+* [How to build RealSense by source?](https://github.com/IntelRealSense/librealsense/blob/master/doc/installation.md)
+* [What is the basic command of Docker CLI?](https://docs.docker.com/engine/reference/commandline/docker/)
+* [What is the canonical C++ API for interacting with ROS?](https://docs.ros2.org/latest/api/rclcpp/)
+ How to change logging level?
+ This project provides to logging levels: *DEBUG* & *INFO*.
+ You may follow the steps to change logging level:
+
+ - Update ./openvino_wrapper_lib/CMakeLists.txt by uncommenting (for DEBUG level) or commenting (for INFO level) this line:
+ ```code
+ #add_definitions(-DLOG_LEVEL_DEBUG)
+ ```
+ - Rebuild project
+ Refer corresponding quick-start documents to rebuild this project. e.g.:
+ ```code
+ source /opt/ros//setup.bash
+ colcon build --symlink-install
+ ```
+ - Launch OpenVINO Node
+ You will see the logging is changed.
+
+
+# Feedback
+* Report questions, issues and suggestions, using: [issue](https://github.com/intel/ros2_openvino_toolkit/issues).
# More Information
-* ROS2 OpenVINO discription writen in Chinese: https://mp.weixin.qq.com/s/BgG3RGauv5pmHzV_hkVAdw
+* ROS2 OpenVINO discription written in Chinese: https://mp.weixin.qq.com/s/BgG3RGauv5pmHzV_hkVAdw
###### *Any security issue should be reported using process at https://01.org/security*
+
diff --git a/data/labels/object_detection/coco.names b/data/labels/object_detection/coco.names
new file mode 100755
index 00000000..16315f2b
--- /dev/null
+++ b/data/labels/object_detection/coco.names
@@ -0,0 +1,80 @@
+person
+bicycle
+car
+motorbike
+aeroplane
+bus
+train
+truck
+boat
+traffic light
+fire hydrant
+stop sign
+parking meter
+bench
+bird
+cat
+dog
+horse
+sheep
+cow
+elephant
+bear
+zebra
+giraffe
+backpack
+umbrella
+handbag
+tie
+suitcase
+frisbee
+skis
+snowboard
+sports ball
+kite
+baseball bat
+baseball glove
+skateboard
+surfboard
+tennis racket
+bottle
+wine glass
+cup
+fork
+knife
+spoon
+bowl
+banana
+apple
+sandwich
+orange
+broccoli
+carrot
+hot dog
+pizza
+donut
+cake
+chair
+sofa
+pottedplant
+bed
+diningtable
+toilet
+tvmonitor
+laptop
+mouse
+remote
+keyboard
+cell phone
+microwave
+oven
+toaster
+sink
+refrigerator
+book
+clock
+vase
+scissors
+teddy bear
+hair drier
+toothbrush
\ No newline at end of file
diff --git a/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels b/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels
index 23d4cd9a..827dc158 100644
--- a/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels
+++ b/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels
@@ -1,2 +1,3 @@
+background
vehicle
license
diff --git a/data/labels/object_segmentation/frozen_inference_graph.labels b/data/labels/object_segmentation/frozen_inference_graph.labels
index b4427edc..744de27d 100644
--- a/data/labels/object_segmentation/frozen_inference_graph.labels
+++ b/data/labels/object_segmentation/frozen_inference_graph.labels
@@ -1,3 +1,4 @@
+_background
person
bicycle
car
@@ -87,4 +88,4 @@ vase
scissors
teddy_bear
hair_drier
-toothbrush
\ No newline at end of file
+toothbrush
diff --git a/data/model_list/convert_model.lst b/data/model_list/convert_model.lst
new file mode 100644
index 00000000..0cfc7f5b
--- /dev/null
+++ b/data/model_list/convert_model.lst
@@ -0,0 +1,5 @@
+# This file can be used with the --list option of the model converter.
+mobilenet-ssd
+deeplabv3
+mask_rcnn_inception_resnet_v2_atrous_coco
+
diff --git a/data/model_list/download_model.lst b/data/model_list/download_model.lst
new file mode 100644
index 00000000..0744a846
--- /dev/null
+++ b/data/model_list/download_model.lst
@@ -0,0 +1,18 @@
+# This file can be used with the --list option of the model downloader.
+face-detection-adas-0001
+age-gender-recognition-retail-0013
+emotions-recognition-retail-0003
+landmarks-regression-retail-0009
+license-plate-recognition-barrier-0001
+person-detection-retail-0013
+person-attributes-recognition-crossroad-0230
+person-reidentification-retail-0277
+vehicle-attributes-recognition-barrier-0039
+vehicle-license-plate-detection-barrier-0106
+head-pose-estimation-adas-0001
+human-pose-estimation-0001
+semantic-segmentation-adas-0001
+mobilenet-ssd
+deeplabv3
+mask_rcnn_inception_resnet_v2_atrous_coco
+
diff --git a/doc/design/Pipeline_service.png b/doc/design/Pipeline_service.png
new file mode 100644
index 00000000..b5907d9d
Binary files /dev/null and b/doc/design/Pipeline_service.png differ
diff --git a/doc/design/arch_design-configurable_pipeline_management.PNG b/doc/design/arch_design-configurable_pipeline_management.PNG
new file mode 100644
index 00000000..2f076d68
Binary files /dev/null and b/doc/design/arch_design-configurable_pipeline_management.PNG differ
diff --git a/doc/design/arch_design-decoupling.PNG b/doc/design/arch_design-decoupling.PNG
new file mode 100644
index 00000000..9e31e9c5
Binary files /dev/null and b/doc/design/arch_design-decoupling.PNG differ
diff --git a/doc/design/arch_design-hierarchical_components.PNG b/doc/design/arch_design-hierarchical_components.PNG
new file mode 100644
index 00000000..f36cda9c
Binary files /dev/null and b/doc/design/arch_design-hierarchical_components.PNG differ
diff --git a/doc/design/arch_design-pipeline_composition.PNG b/doc/design/arch_design-pipeline_composition.PNG
new file mode 100644
index 00000000..2cfc7ae1
Binary files /dev/null and b/doc/design/arch_design-pipeline_composition.PNG differ
diff --git a/doc/design/config_example-result_filtering_for_vehicle_analytics.png b/doc/design/config_example-result_filtering_for_vehicle_analytics.png
new file mode 100644
index 00000000..a08fa33d
Binary files /dev/null and b/doc/design/config_example-result_filtering_for_vehicle_analytics.png differ
diff --git a/doc/design/config_example-vehicle_analytics.png b/doc/design/config_example-vehicle_analytics.png
new file mode 100644
index 00000000..1b89aa44
Binary files /dev/null and b/doc/design/config_example-vehicle_analytics.png differ
diff --git a/doc/design/data_filtering_for_inference_results.png b/doc/design/data_filtering_for_inference_results.png
new file mode 100644
index 00000000..67e0bc56
Binary files /dev/null and b/doc/design/data_filtering_for_inference_results.png differ
diff --git a/doc/design/filtering_example-vehicle_analytics.png b/doc/design/filtering_example-vehicle_analytics.png
new file mode 100644
index 00000000..a73bcc89
Binary files /dev/null and b/doc/design/filtering_example-vehicle_analytics.png differ
diff --git a/doc/design/inference_example-vehicle_analytics_pipeline.png b/doc/design/inference_example-vehicle_analytics_pipeline.png
new file mode 100644
index 00000000..ea2decc1
Binary files /dev/null and b/doc/design/inference_example-vehicle_analytics_pipeline.png differ
diff --git a/doc/design/inference_examples.png b/doc/design/inference_examples.png
new file mode 100644
index 00000000..6e8a856b
Binary files /dev/null and b/doc/design/inference_examples.png differ
diff --git a/doc/inferences/Face_Detection.md b/doc/inferences/Face_Detection.md
deleted file mode 100644
index 3bd2c8fa..00000000
--- a/doc/inferences/Face_Detection.md
+++ /dev/null
@@ -1,21 +0,0 @@
-# Face Detection
-
-## Demo Result Snapshots
-See below pictures for the demo result snapshots.
-* face detection input from image
-
-## Download Models
-* download the optimized Intermediate Representation (IR) of model (excute _once_)
- ```bash
- cd $model_downloader
- sudo python3 downloader.py --name face-detection-adas-0001 --output_dir /opt/openvino_toolkit/models/face_detection/output
- sudo python3 downloader.py --name age-gender-recognition-retail-0013 --output_dir /opt/openvino_toolkit/models/age-gender-recognition/output
- sudo python3 downloader.py --name emotions-recognition-retail-0003 --output_dir /opt/openvino_toolkit/models/emotions-recognition/output
- sudo python3 downloader.py --name head-pose-estimation-adas-0001 --output_dir /opt/openvino_toolkit/models/head-pose-estimation/output
- ```
-* copy label files (excute _once_)
- ```bash
- sudo cp /opt/openvino_toolkit/ros2_openvino_toolkit/data/labels/emotions-recognition/FP32/emotions-recognition-retail-0003.labels /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/
- sudo cp /opt/openvino_toolkit/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP32/
- sudo cp /opt/openvino_toolkit/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/
- ```
diff --git a/doc/inferences/Face_Reidentification.md b/doc/inferences/Face_Reidentification.md
deleted file mode 100644
index 9a496fff..00000000
--- a/doc/inferences/Face_Reidentification.md
+++ /dev/null
@@ -1,10 +0,0 @@
-# Face Reidentification
-## Download Models
-* download the optimized Intermediate Representation (IR) of model (excute _once_)
- ```bash
- cd $model_downloader
- sudo python3 downloader.py --name landmarks-regression-retail-0009 --output_dir /opt/openvino_toolkit/models/landmarks-regression/output
- sudo python3 downloader.py --name face-reidentification-retail-0095 --output_dir /opt/openvino_toolkit/models/face-reidentification/output
- ```
-
-
diff --git a/doc/inferences/Object_Detection.md b/doc/inferences/Object_Detection.md
deleted file mode 100644
index 905b134d..00000000
--- a/doc/inferences/Object_Detection.md
+++ /dev/null
@@ -1,91 +0,0 @@
-# Object Detection
-## Introduction
-The section depict the kind of Object Detection, which produces object classification and its location based ROI.
-Two kinds of models are supported currently:
-- SSD based Object Detection Models
- * SSD300-VGG16, SSD500-VGG16, Mobilenet-SSD (both caffe and tensorflow)
-- YoloV2
-
-## Demo Result Snapshots
-* object detection input from realsense camera
-
-
-
-## Download Models
->> Before using the supported models, you need to first downloand and optimize them into OpenVINO mode. mobilenet-SSD caffe model is the default one used in the Object Detection configuration.
-
-#### mobilenet-ssd
-* download and convert a trained model to produce an optimized Intermediate Representation (IR) of the model
- ```bash
- cd $model_downloader
- sudo python3 ./downloader.py --name mobilenet-ssd
- #FP32 precision model
- sudo python3 $model_optimizer/mo.py --input_model $model_downloader/public/mobilenet-ssd/mobilenet-ssd.caffemodel --output_dir /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP32 --mean_values [127.5,127.5,127.5] --scale_values [127.5]
- #FP16 precision model
- sudo python3 $model_optimizer/mo.py --input_model $model_downloader/public/mobilenet-ssd/mobilenet-ssd.caffemodel --output_dir /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP16 --data_type=FP16 --mean_values [127.5,127.5,127.5] --scale_values [127.5]
- ```
-* copy label files (excute _once_)
- ```bash
- sudo cp $openvino_labels/object_detection/mobilenet-ssd.labels /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP32
- sudo cp $openvino_labels/object_detection/mobilenet-ssd.labels /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP16
- ```
-#### YOLOv2-voc
-* Darkflow to protobuf(.pb)
- - install [darkflow](https://github.com/thtrieu/darkflow)
- - install prerequsites
- ```bash
- pip3 install tensorflow opencv-python numpy networkx cython
- ```
- - Get darkflow and YOLO-OpenVINO
- ```bash
- mkdir -p ~/code && cd ~/code
- git clone https://github.com/thtrieu/darkflow
- git clone https://github.com/chaoli2/YOLO-OpenVINO
- sudo ln -sf ~/code/darkflow /opt/openvino_toolkit/
- ```
- - modify the line self.offset = 16 in the ./darkflow/utils/loader.py file and replace with self.offset = 20
- - Install darkflow
- ```bash
- cd ~/code/darkflow
- pip3 install .
- ```
- - Copy voc.names in YOLO-OpenVINO/common to labels.txt in darkflow.
- ```bash
- cp ~/code/YOLO-OpenVINO/common/voc.names ~/code/darkflow/labels.txt
- ```
- - Get yolov2 weights and cfg
- ```bash
- cd ~/code/darkflow
- mkdir -p models
- cd models
- wget -c https://pjreddie.com/media/files/yolov2-voc.weights
- wget https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolov2-voc.cfg
- ```
- - Run convert script
- ```bash
- cd ~/code/darkflow
- flow --model models/yolov2-voc.cfg --load models/yolov2-voc.weights --savepb
- ```
-* Convert YOLOv2-voc TensorFlow Model to the optimized Intermediate Representation (IR) of model
- ```bash
- cd ~/code/darkflow
- # FP32 precision model
- sudo python3 $model_optimizer/mo_tf.py \
- --input_model built_graph/yolov2-voc.pb \
- --batch 1 \
- --tensorflow_use_custom_operations_config $model_optimizer/extensions/front/tf/yolo_v2_voc.json \
- --data_type FP32 \
- --output_dir /opt/openvino_toolkit/models/object_detection/YOLOv2-voc/tf/output/FP32
- # FP16 precision model
- sudo python3 $model_optimizer/mo_tf.py \
- --input_model built_graph/yolov2-voc.pb \
- --batch 1 \
- --tensorflow_use_custom_operations_config $model_optimizer/extensions/front/tf/yolo_v2_voc.json \
- --data_type FP16 \
- --output_dir /opt/openvino_toolkit/models/object_detection/YOLOv2-voc/tf/output/FP16
- ```
-* copy label files (excute _once_)
- ```bash
- sudo cp $openvino_labels/object_detection/yolov2-voc.labels /opt/openvino_toolkit/models/object_detection/YOLOv2-voc/tf/output/FP32
- sudo cp $openvino_labels/object_detection/yolov2-voc.labels /opt/openvino_toolkit/models/object_detection/YOLOv2-voc/tf/output/FP16
- ```
diff --git a/doc/inferences/Object_Segmentation.md b/doc/inferences/Object_Segmentation.md
deleted file mode 100644
index 7e998af9..00000000
--- a/doc/inferences/Object_Segmentation.md
+++ /dev/null
@@ -1,24 +0,0 @@
-# Object Segmentation
-## Demo Result Snapshots
-See below pictures for the demo result snapshots.
-* object segmentation input from video
-
-## Download Models
-* download and convert a trained model to produce an optimized Intermediate Representation (IR) of the model
- ```bash
- #object segmentation model
- mkdir -p ~/Downloads/models
- cd ~/Downloads/models
- wget http://download.tensorflow.org/models/object_detection/mask_rcnn_inception_v2_coco_2018_01_28.tar.gz
- tar -zxvf mask_rcnn_inception_v2_coco_2018_01_28.tar.gz
- cd mask_rcnn_inception_v2_coco_2018_01_28
- #FP32
- sudo python3 $model_optimizer/mo_tf.py --input_model frozen_inference_graph.pb --tensorflow_use_custom_operations_config $model_optimizer/extensions/front/tf/mask_rcnn_support.json --tensorflow_object_detection_api_pipeline_config pipeline.config --reverse_input_channels --output_dir /opt/openvino_toolkit/models/segmentation/output/FP32
- #FP16
- sudo python3 $model_optimizer/mo_tf.py --input_model frozen_inference_graph.pb --tensorflow_use_custom_operations_config $model_optimizer/extensions/front/tf/mask_rcnn_support.json --tensorflow_object_detection_api_pipeline_config pipeline.config --reverse_input_channels --data_type=FP16 --output_dir /opt/openvino_toolkit/models/segmentation/output/FP16
- ```
-* copy label files (excute _once_)
- ```bash
- sudo cp $openvino_labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/segmentation/output/FP32
- sudo cp $openvino_labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/segmentation/output/FP16
- ```
diff --git a/doc/inferences/People_Reidentification.md b/doc/inferences/People_Reidentification.md
deleted file mode 100644
index 39c276d6..00000000
--- a/doc/inferences/People_Reidentification.md
+++ /dev/null
@@ -1,13 +0,0 @@
-# People Reidentification
-## Demo Result Snapshots
-See below pictures for the demo result snapshots.
-* Person Reidentification input from standard camera
-
-## Download Models
-* download the optimized Intermediate Representation (IR) of model (excute _once_)
- ```bash
- cd $model_downloader
- sudo python3 downloader.py --name person-detection-retail-0013 --output_dir /opt/openvino_toolkit/models/person-detection/output
- sudo python3 downloader.py --name person-reidentification-retail-0076 --output_dir /opt/openvino_toolkit/models/person-reidentification/output
- ```
-
diff --git a/doc/inferences/Vehicle_Detection.md b/doc/inferences/Vehicle_Detection.md
deleted file mode 100644
index 8fdb1a5b..00000000
--- a/doc/inferences/Vehicle_Detection.md
+++ /dev/null
@@ -1,14 +0,0 @@
-# Vehicle Detection
-## Download Models
-### OpenSource Version
-* download the optimized Intermediate Representation (IR) of model (excute _once_)
- ```bash
- cd $model_downloader
- sudo python3 downloader.py --name vehicle-license-plate-detection-barrier-0106 --output_dir /opt/openvino_toolkit/models/vehicle-license-plate-detection/output
- sudo python3 downloader.py --name vehicle-attributes-recognition-barrier-0039 --output_dir /opt/openvino_toolkit/models/vehicle-attributes-recongnition/output
- sudo python3 downloader.py --name license-plate-recognition-barrier-0001 --output_dir /opt/openvino_toolkit/models/license-plate-recognition/output
- ```
-* copy label files (excute _once_)
- ```bash
- sudo cp $openvino_labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels /opt/openvino_toolkit/models/vehicle-license-plate-detection/output
- ```
diff --git a/doc/installation/BINARY_INSTALLATION.md b/doc/installation/BINARY_INSTALLATION.md
deleted file mode 100644
index ebe1cf71..00000000
--- a/doc/installation/BINARY_INSTALLATION.md
+++ /dev/null
@@ -1,74 +0,0 @@
-# ros2_openvino_toolkit
-## 1. Prerequisite
-- An x86_64 computer running Ubuntu 18.04. Below processors are supported:
- * 6th-8th Generation Intel® Core™
- * Intel® Xeon® v5 family
- * Intel® Xeon® v6 family
-- ROS2 [Dashing](https://github.com/ros2/ros2/wiki)
-- [OpenVINO™ Toolkit](https://software.intel.com/en-us/openvino-toolkit)
-- RGB Camera, e.g. RealSense D400 Series or standard USB camera or Video/Image File
-- Graphics are required only if you use a GPU. The official system requirements for GPU are:
- * 6th to 8th generation Intel® Core™ processors with Iris® Pro graphics and Intel® HD Graphics
- * 6th to 8th generation Intel® Xeon® processors with Iris Pro graphics and Intel HD Graphics (excluding the e5 product family, which does not have graphics)
- * Intel® Pentium® processors N4200/5, N3350/5, N3450/5 with Intel HD Graphics
-
-- Use one of the following methods to determine the GPU on your hardware:
- * [lspci] command: GPU info may lie in the [VGA compatible controller] line.
- * Ubuntu system: Menu [System Settings] --> [Details] may help you find the graphics information.
- * Openvino: Download the install package, install_GUI.sh inside will check the GPU information before installation.
-
-## 2. Environment Setup
-**Note**:You can choose to build the environment using *./environment_setup_binary.sh* script in the script subfolder.The *modules.conf* file in the same directory as the .sh file is the configuration file that controls the installation process.You can modify the *modules.conf* to customize your installation process.
-```bash
-./environment_setup_binary.sh
-```
-**Note**:You can also choose to follow the steps below to build the environment step by step.
-* Install ROS2 [Dashing](https://github.com/ros2/ros2/wiki) ([guide](https://index.ros.org/doc/ros2/Installation/Dashing/Linux-Development-Setup/))
-* Install [OpenVINO™ Toolkit 2019R3.1](https://software.intel.com/en-us/articles/OpenVINO-Install-Linux) ([download](https://software.intel.com/en-us/openvino-toolkit/choose-download/free-download-linux))
- **Note**: Please use *root privileges* to run the installer when installing the core components.
-* Install [the Intel® Graphics Compute Runtime for OpenCL™ driver components required to use the GPU plugin](https://docs.openvinotoolkit.org/latest/_docs_install_guides_installing_openvino_linux.html#additional-GPU-steps)
-
-- Install Intel® RealSense™ SDK 2.0 [(tag v2.30.0)](https://github.com/IntelRealSense/librealsense/tree/v2.30.0)
- * [Install from package](https://github.com/IntelRealSense/librealsense/blob/v2.30.0/doc/distribution_linux.md)
-
-## 3. Building and Installation
-* Build sample code under openvino toolkit
- ```bash
- # root is required instead of sudo
- source /opt/intel/openvino/bin/setupvars.sh
- cd /opt/intel/openvino/deployment_tools/inference_engine/samples/
- mkdir build
- cd build
- cmake ..
- make
- ```
-* set ENV CPU_EXTENSION_LIB and GFLAGS_LIB
- ```bash
- export CPU_EXTENSION_LIB=/opt/intel/openvino/deployment_tools/inference_engine/samples/build/intel64/Release/lib/libcpu_extension.so
- export GFLAGS_LIB=/opt/intel/openvino/deployment_tools/inference_engine/samples/build/intel64/Release/lib/libgflags_nothreads.a
- ```
-* Install ROS2_OpenVINO packages
- ```bash
- mkdir -p ~/ros2_overlay_ws/src
- cd ~/ros2_overlay_ws/src
- git clone https://github.com/intel/ros2_openvino_toolkit
- git clone https://github.com/intel/ros2_object_msgs
- git clone https://github.com/ros-perception/vision_opencv -b ros2
- git clone https://github.com/ros2/message_filters.git
- git clone https://github.com/ros-perception/image_common.git -b dashing
- git clone https://github.com/intel/ros2_intel_realsense.git -b refactor
- ```
-
-* Build package
- ```
- source ~/ros2_ws/install/local_setup.bash
- source /opt/intel/openvino/bin/setupvars.sh
- cd ~/ros2_overlay_ws
- colcon build --symlink-install
- source ./install/local_setup.bash
- sudo mkdir -p /opt/openvino_toolkit
- sudo ln -sf ~/ros2_overlay_ws/src/ros2_openvino_toolkit /opt/openvino_toolkit/
- ```
-
-
-
diff --git a/doc/installation/OPEN_SOURCE_INSTALLATION.md b/doc/installation/OPEN_SOURCE_INSTALLATION.md
deleted file mode 100644
index cba2ce0c..00000000
--- a/doc/installation/OPEN_SOURCE_INSTALLATION.md
+++ /dev/null
@@ -1,82 +0,0 @@
-# ros2_openvino_toolkit
-
-## 1. Prerequisite
-- An x86_64 computer running Ubuntu 18.04. Below processors are supported:
- * 6th-8th Generation Intel® Core™
- * Intel® Xeon® v5 family
- * Intel® Xeon® v6 family
-- ROS2 [Dashing](https://github.com/ros2/ros2/wiki)
-
-- OpenVINO™ Toolkit Open Source
- * The [Deep Learning Deployment Toolkit](https://github.com/openvinotoolkit/openvino) that helps to enable fast, heterogeneous deep learning inferencing for Intel® processors (CPU and GPU/Intel® Processor Graphics), and supports more than 100 public and custom models.
- * [Open Model Zoo](https://github.com/opencv/open_model_zoo) includes 20+ pre-trained deep learning models to expedite development and improve deep learning inference on Intel® processors (CPU, Intel Processor Graphics, FPGA, VPU), along with many samples to easily get started.
-
-- RGB Camera, e.g. RealSense D400 Series or standard USB camera or Video/Image File
-- Graphics are required only if you use a GPU. The official system requirements for GPU are:
- * 6th to 8th generation Intel® Core™ processors with Iris® Pro graphics and Intel® HD Graphics
- * 6th to 8th generation Intel® Xeon® processors with Iris Pro graphics and Intel HD Graphics (excluding the e5 product family, which does not have graphics)
- * Intel® Pentium® processors N4200/5, N3350/5, N3450/5 with Intel HD Graphics
-
-- Use one of the following methods to determine the GPU on your hardware:
- * [lspci] command: GPU info may lie in the [VGA compatible controller] line.
- * Ubuntu system: Menu [System Settings] --> [Details] may help you find the graphics information.
- * Openvino: Download the install package, install_GUI.sh inside will check the GPU information before installation.
-
-## 2. Environment Setup
-**Note**:You can choose to build the environment using *./environment_setup_binary.sh* script in the script subfolder.The *modules.conf* file in the same directory as the .sh file is the configuration file that controls the installation process.You can modify the *modules.conf* to customize your installation process.
-```bash
-./environment_setup.sh
-```
-**Note**:You can also choose to follow the steps below to build the environment step by step.
-* Install ROS2 [Dashing](https://github.com/ros2/ros2/wiki) ([guide](https://index.ros.org/doc/ros2/Installation/Dashing/Linux-Development-Setup/))
-* Install OpenVINO™ Toolkit Open Source
- * Install OpenCL Driver for GPU
- ```bash
- cd ~/Downloads
- wget https://github.com/intel/compute-runtime/releases/download/19.04.12237/intel-gmmlib_18.4.1_amd64.deb
- wget https://github.com/intel/compute-runtime/releases/download/19.04.12237/intel-igc-core_18.50.1270_amd64.deb
- wget https://github.com/intel/compute-runtime/releases/download/19.04.12237/intel-igc-opencl_18.50.1270_amd64.deb
- wget https://github.com/intel/compute-runtime/releases/download/19.04.12237/intel-opencl_19.04.12237_amd64.deb
- wget https://github.com/intel/compute-runtime/releases/download/19.04.12237/intel-ocloc_19.04.12237_amd64.deb
- sudo dpkg -i *.deb
- ```
- * Install [Deep Learning Deployment Toolkit](https://github.com/openvinotoolkit/openvino)([tag 2019_R3.1](https://github.com/openvinotoolkit/openvino/blob/2019_R3.1/inference-engine/README.md))
- * Install [Open Model Zoo](https://github.com/opencv/open_model_zoo)([tag 2019_R3.1](https://github.com/opencv/open_model_zoo/blob/2019_R3.1/demos/README.md))
-
-- Install Intel® RealSense™ SDK 2.0 [(tag v2.30.0)](https://github.com/IntelRealSense/librealsense/tree/v2.30.0)
- * [Install from package](https://github.com/IntelRealSense/librealsense/blob/v2.30.0/doc/distribution_linux.md)
-
-## 3. Building and Installation
-
-* set ENV InferenceEngine_DIR, CPU_EXTENSION_LIB and GFLAGS_LIB
- ```bash
- export InferenceEngine_DIR=/opt/openvino_toolkit/dldt/inference-engine/build/
- export CPU_EXTENSION_LIB=/opt/openvino_toolkit/dldt/inference-engine/bin/intel64/Release/lib/libcpu_extension.so
- export GFLAGS_LIB=/opt/openvino_toolkit/dldt/inference-engine/bin/intel64/Release/lib/libgflags_nothreads.a
- ```
-* Install ROS2_OpenVINO packages
- ```bash
- mkdir -p ~/ros2_overlay_ws/src
- cd ~/ros2_overlay_ws/src
- git clone https://github.com/intel/ros2_openvino_toolkit
- git clone https://github.com/intel/ros2_object_msgs
- git clone https://github.com/ros-perception/vision_opencv -b ros2
- git clone https://github.com/ros2/message_filters.git
- git clone https://github.com/ros-perception/image_common.git -b dashing
- git clone https://github.com/intel/ros2_intel_realsense.git -b refactor
- ```
-
-* Build package
- ```
- source ~/ros2_ws/install/local_setup.bash
- cd ~/ros2_overlay_ws
- colcon build --symlink-install
- source ./install/local_setup.bash
- sudo mkdir -p /opt/openvino_toolkit
- sudo ln -sf ~/ros2_overlay_ws/src/ros2_openvino_toolkit /opt/openvino_toolkit/
- ```
-
-
-
-
-
diff --git a/doc/installation/installation.md b/doc/installation/installation.md
deleted file mode 100644
index 6596a35a..00000000
--- a/doc/installation/installation.md
+++ /dev/null
@@ -1,11 +0,0 @@
-
-# Installation
->> Intel releases 2 different series of OpenVINO Toolkit, we call them as [OpenSource Version](https://github.com/openvinotoolkit/openvino/) and [Binary Version](https://software.intel.com/en-us/openvino-toolkit). You may choose any of them to install.
-
-**NOTE:** If you are not sure which version you would use, it is recommended for you to choose [Binary Version](https://software.intel.com/en-us/openvino-toolkit), which can simplify your environment setup.
-
-## OpenSource Version
-One-step installation scripts are provided for the dependencies' installation. Please see [the guide](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/installation/OPEN_SOURCE_INSTALLATION.md) for details.
-
-## Binary Version
-One-step installation scripts are provided for the dependencies' installation. Please see [the guide](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/installation/BINARY_INSTALLATION.md) for details.
diff --git a/doc/launching/launch.md b/doc/launching/launch.md
deleted file mode 100644
index efc1d1ae..00000000
--- a/doc/launching/launch.md
+++ /dev/null
@@ -1,37 +0,0 @@
-# Launching
-## 1. Setup Environment
-Please refer to this [guide](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/launching/set_environment.md) for details.
-
-**NOTE:** Configure *once* the Neural Compute Stick USB Driver by following between instructions, in case you have a NCS or NCS2 in hand.
- ```bash
- cd ~/Downloads
- SUBSYSTEM=="usb", ATTRS{idProduct}=="2150", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1"
- SUBSYSTEM=="usb", ATTRS{idProduct}=="2485", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1"
- SUBSYSTEM=="usb", ATTRS{idProduct}=="f63b", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1"
- EOF
- sudo cp 97-usbboot.rules /etc/udev/rules.d/
- sudo udevadm control --reload-rules
- sudo udevadm trigger
- sudo ldconfig
- rm 97-usbboot.rules
- ```
-## 2. Launch Program
-### Topic
-Each inference listed in [section Inference Implementations](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/tables_of_contents/supported_features/Supported_features.md#inference-implementations) is created default launching configurations( xxx.launch.py) in OpenVINO Sample package. You can follow the utility of ROS2 launch instruction to launch them. For example:
- ```bash
- ros2 launch dynamic_vino_sample pipeline_object.launch.py
- ```
-
-The full list of xxx.launch.py is shown in below tabel:
-
-|Download Models|Launch File|Description|
-|---|---|---|
-|[Object Detection](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/inferences/Object_Detection.md)|pipeline_object.launch.py|Launching file for **Object Detection**, by default mobilenet_ssd model and standard USB camera are used.|
-|[Face Detection](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/inferences/Face_Detection.md)|pipeline_people.launch.py|Launching file for **Face Detection**, also including **Age/Gender Recognition, HeadPose Estimation, and Emotion Recognition**.|
-|[Object Segmentation](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/inferences/Object_Segmentation.md)|pipeline_segmentation.launch.py|Launching file for **Object Segmentation**.|
-|[Person Re-Identification](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/inferences/People_Reidentification.md)|pipeline_person_reid.launch.py|Launching file for **Person Re-Identification**.|
-|[Face Re-Identification](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/inferences/Face_Reidentification.md)|pipeline_face_reid.launch.py|Launching file for **Face Segmentation**, in which **Face Landmark Detection** is included.|
-|[Vehicle Detection](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/inferences/Vehicle_Detection.md)|pipeline_vehicle_detection.launch.py|Launching file for **vehicle detection**, in which **license plate recognition** is included.|
-
-### Service
-See [service Page](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/launching/service.md) for detailed launching instructions.
diff --git a/doc/launching/service.md b/doc/launching/service.md
deleted file mode 100644
index c5f5701f..00000000
--- a/doc/launching/service.md
+++ /dev/null
@@ -1,27 +0,0 @@
-# Service
-## Download Models
-### Object Detection Service
-* See [object detection download model](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/inferences/Object_Detection.md#mobilenet-ssd) section for detailed instructions.
-
-### People Detection Service
-* See [People Detection download model](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/inferences/Face_Detection.md#opensource-version) section for detaild instructions.
-
-## Launching
-* run object detection service sample code input from Image
- Run image processing service:
- ```bash
- ros2 launch dynamic_vino_sample image_object_server.launch.py
- ```
- Run example application with an absolute path of an image on another console:
- ```bash
- ros2 run dynamic_vino_sample image_object_client /opt/openvino_toolkit/ros2_openvino_toolkit/data/images/car.png
- ```
-* run face detection service sample code input from Image
- Run image processing service:
- ```bash
- ros2 launch dynamic_vino_sample image_people_server.launch.py
- ```
- Run example application with an absolute path of an image on another console:
- ```bash
- ros2 run dynamic_vino_sample image_people_client /opt/openvino_toolkit/ros2_openvino_toolkit/data/images/team.jpg
- ```
diff --git a/doc/launching/set_environment.md b/doc/launching/set_environment.md
deleted file mode 100644
index d50006a3..00000000
--- a/doc/launching/set_environment.md
+++ /dev/null
@@ -1,32 +0,0 @@
-# Set Environment
-## OpenSource Version
-* Set ENV LD_LIBRARY_PATH and openvino_version
- ```bash
- export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/openvino_toolkit/dldt/inference-engine/bin/intel64/Release/lib
- export openvino_version=opensource
- ```
-* Install prerequisites
- ```bash
- cd /opt/openvino_toolkit/dldt/model-optimizer/install_prerequisites
- sudo ./install_prerequisites.sh
- ```
-* Set model tool variable
- ```bash
- source /opt/openvino_toolkit/ros2_openvino_toolkit/script/set_variable.sh
- ```
-## Binary Version
-* Set ENV LD_LIBRARY_PATH and openvino_version
- ```bash
- source /opt/intel/openvino/bin/setupvars.sh
- export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/intel/openvino/deployment_tools/inference_engine/samples/build/intel64/Release/lib
- export openvino_version=binary
- ```
-* Install prerequisites
- ```bash
- cd /opt/intel/openvino/deployment_tools/model_optimizer/install_prerequisites
- sudo ./install_prerequisites.sh
- ```
-* Set model tool variable
- ```bash
- source /opt/openvino_toolkit/ros2_openvino_toolkit/script/set_variable.sh
- ```
diff --git a/doc/quick_start/getting_started_with_Foxy_Ubuntu20.04.md b/doc/quick_start/getting_started_with_Foxy_Ubuntu20.04.md
deleted file mode 100644
index 0f43cc9f..00000000
--- a/doc/quick_start/getting_started_with_Foxy_Ubuntu20.04.md
+++ /dev/null
@@ -1,136 +0,0 @@
-# ROS2_FOXY_OpenVINO_Toolkit
-
-**NOTE:**
-Below steps have been tested on **Ubuntu 20.04**.
-
-## 1. Environment Setup
-* Install ROS2 Foxy ([guide](https://docs.ros.org/en/foxy/Installation/Ubuntu-Install-Debians.html))
-* Install Intel® OpenVINO™ Toolkit Version: 2021.4 ([guide]https://docs.openvino.ai/2021.4/openvino_docs_install_guides_installing_openvino_linux.html))
-* Install Intel® RealSense ™ SDK ([guide](https://github.com/IntelRealSense/librealsense/blob/master/doc/distribution_linux.md))
-
-## 2. Building and Installation
-* Install ROS2_OpenVINO packages
-```
-mkdir -p ~/catkin_ws/src
-cd ~/catkin_ws/src
-git clone https://github.com/intel/ros2_openvino_toolkit -b foxy_dev
-git clone https://github.com/intel/ros2_object_msgs
-git clone https://github.com/IntelRealSense/realsense-ros.git -b ros2
-git clone https://github.com/ros-perception/vision_opencv.git -b ros2
-```
-* Install dependencies
-```
-sudo apt-get install ros-foxy-diagnostic-updater
-```
-* Build package
-```
-source /opt/ros/foxy/setup.bash
-source /opt/intel/openvino_2021/bin/setupvars.sh
-cd ~/catkin_ws
-colcon build --symlink-install
-source ./install/local_setup.bash
-```
-
-## 3. Running the Demo
-* Preparation
-```
-source /opt/intel/openvino_2021/bin/setupvars.sh
-sudo mkdir -p /opt/openvino_toolkit
-sudo ln -s /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader /opt/openvino_toolkit/models
-sudo chmod 777 -R /opt/openvino_toolkit/models
-```
-
-* See all available models
-```
-cd /opt/intel//deployment_tools/open_model_zoo/tools/downloader
-sudo python3 downloader.py --print_all
-```
-
-* Download the optimized Intermediate Representation (IR) of model (execute once), for example:
-```
-cd /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader
-sudo python3 downloader.py --name face-detection-adas-0001 --output_dir /opt/openvino_toolkit/models/face_detection/output
-sudo python3 downloader.py --name age-gender-recognition-retail-0013 --output_dir /opt/openvino_toolkit/models/age-gender-recognition/output
-sudo python3 downloader.py --name emotions-recognition-retail-0003 --output_dir /opt/openvino_toolkit/models/emotions-recognition/output
-sudo python3 downloader.py --name head-pose-estimation-adas-0001 --output_dir /opt/openvino_toolkit/models/head-pose-estimation/output
-sudo python3 downloader.py --name person-detection-retail-0013 --output_dir /opt/openvino_toolkit/models/person-detection/output
-sudo python3 downloader.py --name person-reidentification-retail-0277 --output_dir /opt/openvino_toolkit/models/person-reidentification/output
-sudo python3 downloader.py --name landmarks-regression-retail-0009 --output_dir /opt/openvino_toolkit/models/landmarks-regression/output
-sudo python3 downloader.py --name semantic-segmentation-adas-0001 --output_dir /opt/openvino_toolkit/models/semantic-segmentation/output
-sudo python3 downloader.py --name vehicle-license-plate-detection-barrier-0106 --output_dir /opt/openvino_toolkit/models/vehicle-license-plate-detection/output
-sudo python3 downloader.py --name vehicle-attributes-recognition-barrier-0039 --output_dir /opt/openvino_toolkit/models/vehicle-attributes-recognition/output
-sudo python3 downloader.py --name license-plate-recognition-barrier-0001 --output_dir /opt/openvino_toolkit/models/license-plate-recognition/output
-sudo python3 downloader.py --name person-attributes-recognition-crossroad-0230 --output_dir /opt/openvino_toolkit/models/person-attributes/output
-```
-
-* copy label files (execute once)
-* Before launch, copy label files to the same model path, make sure the model path and label path match the ros_openvino_toolkit/vino_launch/param/xxxx.yaml.
-```
- sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP32/
- sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/
- sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/emotions-recognition/FP32/emotions-recognition-retail-0003.labels /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/
- sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/semantic-segmentation/output/FP32/
- sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/semantic-segmentation/output/FP16/
- sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels /opt/openvino_toolkit/models/vehicle-license-plate-detection/output/intel/vehicle-license-plate-detection-barrier-0106/FP32
-```
-
-* If the model (tensorflow, caffe, MXNet, ONNX, Kaldi)need to be converted to intermediate representation (For example the model for object detection)
-* (Note: Tensorflow=1.15.5, Python<=3.7)
- * ssd_mobilenet_v2_coco
- ```
- cd /opt/openvino_toolkit/models/
- sudo python3 downloader/downloader.py --name ssd_mobilenet_v2_coco
- sudo python3 /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader/converter.py --name=ssd_mobilenet_v2_coco --mo /opt/intel/openvino_2021/deployment_tools/model_optimizer/mo.py
- ```
- * deeplabv3
- ```
- cd /opt/openvino_toolkit/models/
- sudo python3 downloader/downloader.py --name deeplabv3
- sudo python3 /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader/converter.py --name=deeplabv3 --mo /opt/intel/openvino_2021/deployment_tools/model_optimizer/mo.py
- ```
- * YOLOV2
- ```
- cd /opt/openvino_toolkit/models/
- sudo python3 downloader/downloader.py --name yolo-v2-tf
- sudo python3 /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader/converter.py --name=yolo-v2-tf --mo /opt/intel/openvino_2021/deployment_tools/model_optimizer/mo.py
- ```
-
-* Before launch, check the parameter configuration in ros2_openvino_toolkit/sample/param/xxxx.yaml, make sure the paramter like model path, label path, inputs are right.
- * run face detection sample code input from StandardCamera.
- ```
- ros2 launch dynamic_vino_sample pipeline_people.launch.py
- ```
- * run person reidentification sample code input from StandardCamera.
- ```
- ros2 launch dynamic_vino_sample pipeline_reidentification.launch.py
- ```
- * run person face reidentification sample code input from RealSenseCamera.
- ```
- ros2 launch dynamic_vino_sample pipeline_face_reidentification.launch.py
- ```
- * run face detection sample code input from Image.
- ```
- ros2 launch dynamic_vino_sample pipeline_image.launch.py
- ```
- * run object segmentation sample code input from RealSenseCameraTopic.
- ```
- ros2 launch dynamic_vino_sample pipeline_segmentation.launch.py
- ```
- * run object segmentation sample code input from Image.
- ```
- ros2 launch dynamic_vino_sample pipeline_segmentation_image.launch.py
- ```
- * run vehicle detection sample code input from StandardCamera.
- ```
- ros2 launch dynamic_vino_sample pipeline_vehicle_detection.launch.py
- ```
- * run person attributes sample code input from StandardCamera.
- ```
- ros2 launch dynamic_vino_sample pipeline_person_attributes.launch.py
- ```
-
-# More Information
-* ROS2 OpenVINO discription writen in Chinese: https://mp.weixin.qq.com/s/BgG3RGauv5pmHzV_hkVAdw
-
-###### *Any security issue should be reported using process at https://01.org/security*
-
diff --git a/doc/quick_start/getting_started_with_Galactic_Ubuntu20.04.md b/doc/quick_start/getting_started_with_Galactic_Ubuntu20.04.md
deleted file mode 100644
index a5125268..00000000
--- a/doc/quick_start/getting_started_with_Galactic_Ubuntu20.04.md
+++ /dev/null
@@ -1,156 +0,0 @@
-# ROS2_GALACTIC_OpenVINO_Toolkit
-
-**NOTE:**
-Below steps have been tested on **Ubuntu 20.04**.
-
-## 1. Environment Setup
-* Install ROS2 Galactic ([guide](https://docs.ros.org/en/galactic/Installation/Ubuntu-Install-Debians.html))
-* Install Intel® OpenVINO™ Toolkit Version: 2021.4 ([guide](https://docs.openvino.ai/2021.4/openvino_docs_install_guides_installing_openvino_linux.html)) or building by source code ([guide](https://github.com/openvinotoolkit/openvino/wiki/BuildingForLinux))
-
- * version **intel-openvino-dev-ubuntu20-2021.4.752** was tested. It is recommend to use 2021.4.752 or the newer.
-* Install Intel® RealSense ™ SDK ([guide](https://github.com/IntelRealSense/librealsense/blob/master/doc/distribution_linux.md))
-
-## 2. Building and Installation
-* Install ROS2_OpenVINO_Toolkit packages
-```
-mkdir -p ~/catkin_ws/src
-cd ~/catkin_ws/src
-git clone https://github.com/intel/ros2_openvino_toolkit -b galactic
-git clone https://github.com/intel/ros2_object_msgs
-git clone https://github.com/IntelRealSense/realsense-ros.git -b ros2
-git clone https://github.com/ros-perception/vision_opencv.git -b ros2
-```
-* Install dependencies
-```
-sudo apt-get install ros-galactic-diagnostic-updater
-sudo pip3 install networkx
-sudo apt-get install python3-defusedxml
-sudo pip3 install tensorflow==2.4.1
-```
-* Build package
-```
-source /opt/ros/galactic/setup.bash
-source /opt/intel/openvino_2021/bin/setupvars.sh
-cd ~/catkin_ws
-colcon build --symlink-install
-source ./install/local_setup.bash
-```
-
-## 3. Running the Demo
-* Preparation
-```
-source /opt/intel/openvino_2021/bin/setupvars.sh
-sudo mkdir -p /opt/openvino_toolkit
-sudo ln -s /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader /opt/openvino_toolkit/models
-sudo chmod 777 -R /opt/openvino_toolkit/models
-```
-
-* See all available models
-```
-cd /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader
-sudo python3 downloader.py --print_all
-```
-
-* Download the optimized Intermediate Representation (IR) of model (execute once), for example:
-```
-cd /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader
-sudo python3 downloader.py --name face-detection-adas-0001 --output_dir /opt/openvino_toolkit/models/face_detection/output
-sudo python3 downloader.py --name age-gender-recognition-retail-0013 --output_dir /opt/openvino_toolkit/models/age-gender-recognition/output
-sudo python3 downloader.py --name emotions-recognition-retail-0003 --output_dir /opt/openvino_toolkit/models/emotions-recognition/output
-sudo python3 downloader.py --name head-pose-estimation-adas-0001 --output_dir /opt/openvino_toolkit/models/head-pose-estimation/output
-sudo python3 downloader.py --name person-detection-retail-0013 --output_dir /opt/openvino_toolkit/models/person-detection/output
-sudo python3 downloader.py --name person-reidentification-retail-0277 --output_dir /opt/openvino_toolkit/models/person-reidentification/output
-sudo python3 downloader.py --name landmarks-regression-retail-0009 --output_dir /opt/openvino_toolkit/models/landmarks-regression/output
-sudo python3 downloader.py --name semantic-segmentation-adas-0001 --output_dir /opt/openvino_toolkit/models/semantic-segmentation/output
-sudo python3 downloader.py --name vehicle-license-plate-detection-barrier-0106 --output_dir /opt/openvino_toolkit/models/vehicle-license-plate-detection/output
-sudo python3 downloader.py --name vehicle-attributes-recognition-barrier-0039 --output_dir /opt/openvino_toolkit/models/vehicle-attributes-recognition/output
-sudo python3 downloader.py --name license-plate-recognition-barrier-0001 --output_dir /opt/openvino_toolkit/models/license-plate-recognition/output
-sudo python3 downloader.py --name person-attributes-recognition-crossroad-0230 --output_dir /opt/openvino_toolkit/models/person-attributes/output
-```
-
-* copy label files (execute once)
-* Before launch, copy label files to the same model path, make sure the model path and label path match the ros_openvino_toolkit/vino_launch/param/xxxx.yaml.
-```
- # Lables for Face-Detection
- sudo mkdir -p /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP32/
- sudo mkdir -p /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/
- sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP32/
- sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/
-
- # Lables for Emotions-Recognition
- sudo mkdir -p /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/
- sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/emotions-recognition/FP32/emotions-recognition-retail-0003.labels /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/
-
- # Labels for Sementic-Segmentation
- sudo mkdir -p /opt/openvino_toolkit/models/semantic-segmentation/output/FP32/
- sudo mkdir -p /opt/openvino_toolkit/models/semantic-segmentation/output/FP16/
- sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/semantic-segmentation/output/FP32/
- sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/semantic-segmentation/output/FP16/
-
- # Labels for Vehicle-License_Plate
- sudo mkdir -p /opt/openvino_toolkit/models/vehicle-license-plate-detection/output/intel/vehicle-license-plate-detection-barrier-0106/FP32
- sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels /opt/openvino_toolkit/models/vehicle-license-plate-detection/output/intel/vehicle-license-plate-detection-barrier-0106/FP32
-```
-
-* If the model (tensorflow, caffe, MXNet, ONNX, Kaldi)need to be converted to intermediate representation (For example the model for object detection)
-* (Note: Tensorflow=2.4.1, Python<=3.7)
- * ssd_mobilenet_v2_coco
- ```
- cd /opt/openvino_toolkit/models/
- sudo python3 downloader/downloader.py --name ssd_mobilenet_v2_coco
- sudo python3 /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader/converter.py --name=ssd_mobilenet_v2_coco --mo /opt/intel/openvino_2021/deployment_tools/model_optimizer/mo.py
- ```
- * deeplabv3
- ```
- cd /opt/openvino_toolkit/models/
- sudo python3 downloader/downloader.py --name deeplabv3
- sudo python3 /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader/converter.py --name=deeplabv3 --mo /opt/intel/openvino_2021/deployment_tools/model_optimizer/mo.py -d /opt/openvino_toolkit/models/
- ```
- * YOLOV2
- ```
- cd /opt/openvino_toolkit/models/
- sudo python3 downloader/downloader.py --name yolo-v2-tf
- sudo python3 /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader/converter.py --name=yolo-v2-tf --mo /opt/intel/openvino_2021/deployment_tools/model_optimizer/mo.py
- ```
-
-* Before launch, check the parameter configuration in ros2_openvino_toolkit/sample/param/xxxx.yaml, make sure the paramter like model path, label path, inputs are right.
- * run face detection sample code input from StandardCamera.
- ```
- ros2 launch dynamic_vino_sample pipeline_people.launch.py
- ```
- * run person reidentification sample code input from StandardCamera.
- ```
- ros2 launch dynamic_vino_sample pipeline_reidentification.launch.py
- ```
- * run person face reidentification sample code input from RealSenseCamera.
- ```
- ros2 launch dynamic_vino_sample pipeline_face_reidentification.launch.py
- ```
- * run face detection sample code input from Image.
- ```
- ros2 launch dynamic_vino_sample pipeline_image.launch.py
- ```
- * run object segmentation sample code input from RealSenseCamera.
- ```
- ros2 launch dynamic_vino_sample pipeline_segmentation.launch.py
- ```
- * run object segmentation sample code input from Image.
- ```
- sudo mkdir -p /opt/openvino_toolkit/ros2_openvino_toolkit/data/images
- sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/images/expressway.jpg /opt/openvino_toolkit/ros2_openvino_toolkit/data/images/
- ros2 launch dynamic_vino_sample pipeline_segmentation_image.launch.py
- ```
- * run vehicle detection sample code input from StandardCamera.
- ```
- ros2 launch dynamic_vino_sample pipeline_vehicle_detection.launch.py
- ```
- * run person attributes sample code input from StandardCamera.
- ```
- ros2 launch dynamic_vino_sample pipeline_person_attributes.launch.py
- ```
-
-# More Information
-* ROS2 OpenVINO discription writen in Chinese: https://mp.weixin.qq.com/s/BgG3RGauv5pmHzV_hkVAdw
-
-###### *Any security issue should be reported using process at https://01.org/security*
-
diff --git a/doc/quick_start/getting_started_with_ros2_ov2.0.md b/doc/quick_start/getting_started_with_ros2_ov2.0.md
new file mode 100644
index 00000000..45f79670
--- /dev/null
+++ b/doc/quick_start/getting_started_with_ros2_ov2.0.md
@@ -0,0 +1,135 @@
+# ROS2_OpenVINO_Toolkit
+
+**NOTE:**
+Below steps have been tested on **Ubuntu 20.04** and **Ubuntu 22.04**.
+Supported ROS2 versions include foxy,galactic and humble.
+
+## 1. Environment Setup
+For ROS2 foxy and galactic on ubuntu 20.04:
+ * Install ROS2.
+ Refer to: [ROS_foxy_install_guide](https://docs.ros.org/en/foxy/Installation/Ubuntu-Install-Debians.html) & [ROS_galactic_install_guide](https://docs.ros.org/en/galactic/Installation/Ubuntu-Install-Debians.html)
+
+ * Install Intel® OpenVINO™ Toolkit Version: 2022.3.
+ Refer to: [OpenVINO_install_guide](https://docs.openvino.ai/2022.3/openvino_docs_install_guides_installing_openvino_apt.html#doxid-openvino-docs-install-guides-installing-openvino-apt)
+ * Install from an achive file. Both runtime and development tool are needed, `pip` is recommended for installing the development tool.
+ Refer to: [OpenVINO_devtool_install_guide](https://www.intel.com/content/www/us/en/developer/tools/openvino-toolkit/download.html)
+
+ * Install Intel® RealSense™ SDK.
+ Refer to: [RealSense_install_guide](https://github.com/IntelRealSense/librealsense/blob/master/doc/distribution_linux.md)
+
+For ROS2 humble on ubuntu 22.04:
+ * Install ROS2.
+ Refer to: [ROS_humble_install_guide](https://docs.ros.org/en/humble/Installation/Ubuntu-Install-Debians.html)
+
+ * Install Intel® OpenVINO™ Toolkit Latest Version by Source.
+ Refer to: [OpenVINO_install_guide](https://github.com/openvinotoolkit/openvino/wiki/BuildingCode)
+
+ * Install Intel® RealSense™ SDK by Source.
+ Refer to: [RealSense_install_guide](https://github.com/IntelRealSense/librealsense/blob/master/doc/installation.md)
+
+## 2. Building and Installation
+* Install ROS2_OpenVINO_Toolkit packages
+```
+mkdir -p ~/catkin_ws/src
+cd ~/catkin_ws/src
+git clone https://github.com/intel/ros2_openvino_toolkit -b ros2
+git clone https://github.com/intel/ros2_object_msgs
+git clone https://github.com/IntelRealSense/realsense-ros.git -b ros2-development
+git clone https://github.com/ros-perception/vision_opencv.git -b
+```
+* Install dependencies
+```
+sudo apt-get install ros--diagnostic-updater
+sudo apt install python3-colcon-common-extensions
+```
+* Build package
+```
+source /opt/ros//setup.bash
+source /setupvars.sh
+cd ~/catkin_ws
+colcon build --symlink-install
+source ./install/local_setup.bash
+```
+
+## 3. Running the Demo
+### Install OpenVINO 2022.3 by PIP
+OMZ tools are provided for downloading and converting models of open_model_zoo in ov2022.
+Refer to: [OMZtool_guide](https://pypi.org/project/openvino-dev/)
+
+* See all available models
+```
+omz_downloader --print_all
+```
+
+* Download the optimized Intermediate Representation (IR) of model (execute once), for example:
+```
+cd ~/catkin_ws/src/ros2_openvino_toolkit/data/model_list
+omz_downloader --list download_model.lst -o /opt/openvino_toolkit/models/
+```
+
+* If the model (tensorflow, caffe, MXNet, ONNX, Kaldi) need to be converted to intermediate representation (such as the model for object detection):
+```
+cd ~/catkin_ws/src/ros2_openvino_toolkit/data/model_list
+omz_converter --list convert_model.lst -d /opt/openvino_toolkit/models/ -o /opt/openvino_toolkit/models/convert
+```
+### Install OpenVINO 2022.3 by source code
+* See all available models
+```
+cd ~/openvino/thirdparty/open_model_zoo/tools/model_tools
+sudo python3 downloader.py --print_all
+```
+
+* Download the optimized Intermediate Representation (IR) of models (execute once), for example:
+```
+cd ~/openvino/thirdparty/open_model_zoo/tools/model_tools
+sudo python3 downloader.py --list download_model.lst -o /opt/openvino_toolkit/models/
+```
+
+* If the model (tensorflow, caffe, MXNet, ONNX, Kaldi) need to be converted to Intermediate Representation (such as the model for object detection):
+```
+cd ~/openvino/thirdparty/open_model_zoo/tools/model_tools
+sudo python3 converter.py --list convert_model.lst -d /opt/openvino_toolkit/models/ -o /opt/openvino_toolkit/models/convert
+```
+
+* Copy label files (execute once)
+**Note**:Need to make label_dirs if skip steps for set output_dirs above.
+```
+sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP32/
+sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/
+sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/emotions-recognition/FP32/emotions-recognition-retail-0003.labels /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/
+sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/intel/semantic-segmentation-adas-0001/FP32/
+sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/intel/semantic-segmentation-adas-0001/FP16/
+sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels /opt/openvino_toolkit/models/intel/vehicle-license-plate-detection-barrier-0106/FP32
+```
+
+* Check the parameter configuration in ros2_openvino_toolkit/sample/param/xxxx.yaml before lauching, make sure parameters such as model_path, label_path and input_path are set correctly. Please refer to the quick start document for [yaml configuration guidance](./yaml_configuration_guide.md) for detailed configuration guidance.
+ * run face detection sample code input from StandardCamera.
+ ```
+ ros2 launch openvino_node pipeline_people.launch.py
+ ```
+ * run person reidentification sample code input from StandardCamera.
+ ```
+ ros2 launch openvino_node pipeline_reidentification.launch.py
+ ```
+ * run face detection sample code input from Image.
+ ```
+ ros2 launch openvino_node pipeline_image.launch.py
+ ```
+ * run object segmentation sample code input from RealSenseCameraTopic.
+ ```
+ ros2 launch openvino_node pipeline_segmentation.launch.py
+ ```
+ * run vehicle detection sample code input from StandardCamera.
+ ```
+ ros2 launch openvino_node pipeline_vehicle_detection.launch.py
+ ```
+ * run person attributes sample code input from StandardCamera.
+ ```
+ ros2 launch openvino_node pipeline_person_attributes.launch.py
+ ```
+
+# More Information
+* ROS2 OpenVINO discription writen in Chinese: https://mp.weixin.qq.com/s/BgG3RGauv5pmHzV_hkVAdw
+
+###### *Any security issue should be reported using process at https://01.org/security*
+
diff --git a/doc/quick_start/tutorial_for_yolov5_converted.md b/doc/quick_start/tutorial_for_yolov5_converted.md
new file mode 100644
index 00000000..dfc82ed8
--- /dev/null
+++ b/doc/quick_start/tutorial_for_yolov5_converted.md
@@ -0,0 +1,99 @@
+# Tutorial_For_yolov5_Converted
+
+# Introduction
+This document describes a method to convert YOLOv5 nano PyTorch weight files with the. pt extension to ONNX weight files, and a method to convert ONNX weight files to IR files using the OpenVINO model optimizer. This method can help OpenVINO users optimize YOLOv5n for deployment in practical applications.
+
+## Reference Phrase
+|Term|Description|
+|---|---|
+|OpenVINO|Open Visual Inference & Neural Network Optimization|
+|ONNX|Open Neural Network Exchange|
+|YOLO|You Only Look Once|
+|IR|Intermediate Representation|
+
+## Reference Document
+|Doc|Link|
+|---|---|
+|OpenVINO|[openvino_2_0_transition_guide](https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html)|
+|YOLOv5|[yolov5](https://github.com/ultralytics/yolov5)|
+
+# Convert Weight File to ONNX
+* Copy YOLOv5 Repository from GitHub
+```
+git clone https://github.com/ultralytics/yolov5.git
+```
+
+* Set Environment for Installing YOLOv5
+```
+cd yolov5
+python3 -m venv yolo_env // Create a virtual python environment
+source yolo_env/bin/activate // Activate environment
+pip install -r requirements.txt // Install yolov5 prerequisites
+pip install onnx // Install ONNX
+```
+
+* Download PyTorch Weights
+```
+mkdir model_convert && cd model_convert
+wget https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt
+```
+
+* Convert PyTorch weights to ONNX weights
+YOLOv5 repository provides export.py script, which can be used to convert PyTorch weight to ONNX weight.
+```
+cd ..
+python3 export.py --weights model_convert/yolov5n.pt --include onnx
+```
+
+# Convert ONNX files to IR files
+After obtaining the ONNX weight file from the previous section [Convert Weight File to ONNX](#convert-weight-file-to-onnx), we can use the model optimizer to convert it to an IR file.
+
+* Install the OpenVINO Model Optimizer Environment
+To use the model optimizer, you need to run the following command to install some necessary components (if you are still in the yolo_env virtual environment, you need to run the **deactivate** command to exit the environment or start a new terminal).
+```
+python3 -m venv ov_env // Create openVINO virtual environment
+source ov_env/bin/activate // Activate environment
+python -m pip install --upgrade pip // Upgrade pip
+pip install openvino[onnx]==2022.3.0 // Install OpenVINO for ONNX
+pip install openvino-dev[onnx]==2022.3.0 // Install OpenVINO Dev Tool for ONNX
+```
+
+* Generate IR file
+```
+cd model_convert
+mo --input_model yolov5n.onnx
+```
+Then we will get three files: yolov5n.xml, yolov5n.bin, and yolov5n.mapping under the model_convert folder.
+
+# Move to the Recommended Model Path
+```
+cd ~/yolov5/model_convert
+mkdir -p /opt/openvino_toolkit/models/convert/public/yolov5n/FP32/
+sudo cp yolov5n.bin yolov5n.mapping yolov5n.xml /opt/openvino_toolkit/models/convert/public/yolov5n/FP32/
+```
+
+# yolov5 optimize to yolov5-int8
+```
+The yolov5 optimize to yolov5-int8 refer to the link:
+
+https://github.com/openvinotoolkit/openvino_notebooks/tree/main/notebooks/111-yolov5-quantization-migration
+
+The installation guide
+https://github.com/openvinotoolkit/openvino_notebooks/blob/main/README.md#-installation-guide
+
+```
+
+# FAQ
+
+
+
+How to install the python3-venv package?
+
+On Debian/Ubuntu systems, you need to install the python3-venv package using the following command.
+```
+apt-get update
+apt-get install python3-venv
+```
+You may need to use sudo with that command. After installing, recreate your virtual environment.
+
+
diff --git a/doc/quick_start/tutorial_for_yolov7_converted.md b/doc/quick_start/tutorial_for_yolov7_converted.md
new file mode 100644
index 00000000..9c476634
--- /dev/null
+++ b/doc/quick_start/tutorial_for_yolov7_converted.md
@@ -0,0 +1,103 @@
+# Tutorial_For_yolov7_Converted
+
+# Introduction
+This document describes a method to convert YOLOv7 nano PyTorch weight files with the .pt extension to ONNX weight files, and a method to convert ONNX weight files to IR
+files using the OpenVINO model optimizer. This method can help OpenVINO users optimize YOLOv7 for deployment in practical applications.
+
+## Reference Phrase
+|Term|Description|
+|---|---|
+|OpenVINO|Open Visual Inference & Neural Network Optimization|
+|ONNX|Open Neural Network Exchange|
+|YOLO|You Only Look Once|
+|IR|Intermediate Representation|
+
+## Reference Document
+|Doc|Link|
+|---|---|
+|OpenVINO|[openvino_2_0_transition_guide](https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html)|
+|YOLOv7|[yolov7](https://github.com/WongKinYiu/yolov7)|
+
+# Convert Weight File to ONNX
+* Copy YOLOv7 Repository from GitHub
+```
+git clone https://github.com/WongKinYiu/yolov7.git
+```
+
+* Set Environment for Installing YOLOv7
+```
+cd yolov7
+python3 -m venv yolo_env // Create a virtual python environment
+source yolo_env/bin/activate // Activate environment
+pip install -r requirements.txt // Install yolov7 prerequisites
+pip install onnx // Install ONNX
+pip install nvidia-pyindex // Add NVIDIA PIP index
+pip install onnx-graphsurgeon // Install GraphSurgeon
+```
+
+* Download PyTorch Weights
+```
+mkdir model_convert && cd model_convert
+wget "https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7.pt"
+```
+
+* Convert PyTorch weights to ONNX weights
+YOLOv7 repository provides export.py script, which can be used to convert PyTorch weight to ONNX weight.
+```
+cd ..
+python3 export.py --weights model_convert/yolov7.pt
+```
+
+# Convert ONNX files to IR files
+After obtaining the ONNX weight file from the previous section [Convert Weight File to ONNX](#convert-weight-file-to-onnx), we can use the model optimizer to convert it to an IR file.
+
+* Install the OpenVINO Model Optimizer Environment
+To use the model optimizer, you need to run the following command to install some necessary components (if you are still in the yolo_env virtual environment, you need to run the **deactivate** command to exit the environment or start a new terminal).
+```
+python3 -m venv ov_env // Create openVINO virtual environment
+source ov_env/bin/activate // Activate environment
+python -m pip install --upgrade pip // Upgrade pip
+pip install openvino[onnx]==2022.3.0 // Install OpenVINO for ONNX
+pip install openvino-dev[onnx]==2022.3.0 // Install OpenVINO Dev Tool for ONNX
+```
+
+* Generate IR file
+```
+cd model_convert
+mo --input_model yolov7.onnx
+```
+Then we will get three files: yolov7.xml, yolov7.bin, and yolov7.mapping under the model_convert folder.
+
+# Move to the Recommended Model Path
+```
+cd ~/yolov7/model_convert
+mkdir -p /opt/openvino_toolkit/models/convert/public/yolov7/FP32/
+sudo cp yolov7.bin yolov7.mapping yolov7.xml /opt/openvino_toolkit/models/convert/public/yolov7/FP32/
+```
+
+# yolov7 optimize to yolov7-int8
+```
+The yolov7 optimize to yolov7-int8 refer to the link:
+
+https://github.com/openvinotoolkit/openvino_notebooks/tree/main/notebooks/226-yolov7-optimization
+
+The installation guide
+https://github.com/openvinotoolkit/openvino_notebooks/blob/main/README.md#-installation-guide
+
+```
+
+
+# FAQ
+
+
+
+How to install the python3-venv package?
+
+On Debian/Ubuntu systems, you need to install the python3-venv package using the following command.
+```
+apt-get update
+apt-get install python3-venv
+```
+You may need to use sudo with that command. After installing, recreate your virtual environment.
+
+
diff --git a/doc/quick_start/tutorial_for_yolov8_converted.md b/doc/quick_start/tutorial_for_yolov8_converted.md
new file mode 100644
index 00000000..5d9793fe
--- /dev/null
+++ b/doc/quick_start/tutorial_for_yolov8_converted.md
@@ -0,0 +1,99 @@
+# Tutorial_For_yolov8_Converted
+
+# Introduction
+Ultralytics YOLOv8 is a cutting-edge, state-of-the-art (SOTA) model that builds upon the success of previous YOLO versions and introduces new features and improvements to further boost performance and flexibility.
+YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of object detection and tracking, instance segmentation,
+image classification and pose estimation tasks.
+This document describes a method to convert YOLOv8 nano PyTorch weight files with the .pt extension to ONNX weight files, and a method to convert ONNX weight files to IR
+files using the OpenVINO model optimizer. This method can help OpenVINO users optimize YOLOv8 for deployment in practical applications.
+
+## Documentation
+
+See below for a quickstart installation and usage example, and see the [YOLOv8 Docs](https://docs.ultralytics.com) for full documentation on training, validation, prediction and deployment.
+
+
+Install
+
+
+Pip install the ultralytics package including all [requirements](https://github.com/ultralytics/ultralytics/blob/main/requirements.txt) in a [**Python>=3.7**](https://www.python.org/) environment with [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/).
+
+```bash
+mkdir -p yolov8 && cd yolov8
+pip install ultralytics
+apt install python3.8-venv
+python3 -m venv openvino_env
+source openvino_env/bin/activate
+```
+
+ #### Train
+Train YOLOv8n on the COCO128 dataset for 100 epochs at image size 640. For a full list of available arguments seethe Configuration page.
+YOLOv8 may be used directly in the Command Line Interface (CLI) with a `yolo` command:
+
+```CLI
+# Build a new model from YAML and start training from scratch
+yolo detect train data=coco128.yaml model=yolov8n.yaml epochs=100 imgsz=640
+
+# Start training from a pretrained *.pt model
+yolo detect train data=coco128.yaml model=yolov8n.pt epochs=100 imgsz=640
+```
+
+
+#### Val
+
+Validate trained YOLOv8n model accuracy on the COCO128 dataset. No argument need to passed as the model retains it's training data and arguments as model attributes.
+```CLI
+# val official model
+yolo detect val model=yolov8n.pt
+
+```
+
+#### Predict
+Use a trained YOLOv8n model to run predictions on images.
+``` CLI
+# predict with official model
+yolo detect predict model=yolov8n.pt source='https://ultralytics.com/images/bus.jpg'
+```
+
+#### Export
+Export a YOLOv8n model to a different format like ONNX, CoreML, etc.
+```
+# export official model
+yolo export model=yolov8n.pt format=openvino
+
+```
+
+# Move to the Recommended Model Path
+```
+cd yolov8n_openvino_model
+
+mkdir -p /opt/openvino_toolkit/models/convert/public/FP32/yolov8n
+
+sudo cp yolov8* /opt/openvino_toolkit/models/convert/public/FP32/yolov8n
+
+```
+
+# yolov8n optimize to yolov8n-int8
+```
+The yolov8n optimize to yolov8n-int8 refer to the link:
+
+https://github.com/openvinotoolkit/openvino_notebooks/blob/main/notebooks/230-yolov8-optimization/230-yolov8-optimization.ipynb
+
+The installation guide
+https://github.com/openvinotoolkit/openvino_notebooks/blob/main/README.md#-installation-guide
+
+```
+
+# FAQ
+
+
+
+Reference link
+
+```
+https://github.com/ultralytics/ultralytics
+https://docs.ultralytics.com/tasks/detect/#predict
+
+```
+
+
+
diff --git a/doc/quick_start/yaml_configuration_guide.md b/doc/quick_start/yaml_configuration_guide.md
new file mode 100644
index 00000000..b6a08a2a
--- /dev/null
+++ b/doc/quick_start/yaml_configuration_guide.md
@@ -0,0 +1,130 @@
+# Introduction
+
+The contents in .yaml config file should be well structured and follow the supported rules and entity names.
+
+# Sample
+## [pipeline_people.yaml](../../sample/param/pipeline_people.yaml)
+```bash
+Pipelines:
+- name: people
+ inputs: [StandardCamera]
+ infers:
+ - name: FaceDetection
+ model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml
+ engine: CPU
+ label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels
+ batch: 1
+ confidence_threshold: 0.5
+ enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame
+ - name: AgeGenderRecognition
+ model: /opt/openvino_toolkit/models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml
+ engine: CPU
+ label: to/be/set/xxx.labels
+ batch: 16
+ - name: EmotionRecognition
+ model: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml
+ engine: CPU
+ label: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.labels
+ batch: 16
+ - name: HeadPoseEstimation
+ model: /opt/openvino_toolkit/models/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml
+ engine: CPU
+ label: to/be/set/xxx.labels
+ batch: 16
+ outputs: [ImageWindow, RosTopic, RViz]
+ connects:
+ - left: StandardCamera
+ right: [FaceDetection]
+ - left: FaceDetection
+ right: [AgeGenderRecognition, EmotionRecognition, HeadPoseEstimation, ImageWindow, RosTopic, RViz]
+ - left: AgeGenderRecognition
+ right: [ImageWindow, RosTopic, RViz]
+ - left: EmotionRecognition
+ right: [ImageWindow, RosTopic, RViz]
+ - left: HeadPoseEstimation
+ right: [ImageWindow, RosTopic, RViz]
+
+Common:
+```
+## Interface Description
+
+### Specify pipeline name
+The name value of this pipeline can be anyone other than null.
+
+### Specify inputs
+**Note:** The input parameter can only have one value.
+Currently, options for inputs are:
+
+|Input Option|Description|Configuration|
+|--------------------|------------------------------------------------------------------|-----------------------------------------|
+|StandardCamera|Any RGB camera with USB port supporting. Currently only the first USB camera if many are connected.|```inputs: [StandardCamera]```|
+|RealSenseCamera| Intel RealSense RGB-D Camera, directly calling RealSense Camera via librealsense plugin of openCV.|```inputs: [RealSenseCamera]```|
+|RealSenseCameraTopic| Any ROS topic which is structured in image message.|```inputs: [RealSenseCameraTopic]```|
+|Image| Any image file which can be parsed by openCV, such as .png, .jpeg.|```inputs: [Image]```|
+|Video| Any video file which can be parsed by openCV.|```inputs: [Video]```|
+|IpCamera| Any RTSP server which can push video stream.|```inputs: [IpCamera]```|
+
+**Note:** Please refer to this opensource repo [RTSP_server_install_guide](https://github.com/EasyDarwin/EasyDarwin) to install RTSP server for IpCamera input.
+
+### Specify input_path
+The input_path need to be specified when input is Image, Video and Ipcamera.
+
+|Input Option|Configuration|
+|--------------------|------------------------------------------------------------------|
+|Image|```input_path: to/be/set/image_path```|
+|Video|```input_path: to/be/set/video_path```|
+|IpCamera|```input_path: "rtsp://localhost/test"```|
+
+### Specify infers
+The Inference Engine is a set of C++ classes to provides an API to read the Intermediate Representation, set the input and output formats, and execute the model on devices.
+
+* #### name
+The name of inference engine need to be specified here. Currently, the inference feature list is supported:
+
+|Inference|Description|
+|-----------------------|------------------------------------------------------------------|
+|FaceDetection|Object Detection task applied to face recognition using a sequence of neural networks.|
+|EmotionRecognition| Emotion recognition based on detected face image.|
+|AgeGenderRecognition| Age and gener recognition based on detected face image.|
+|HeadPoseEstimation| Head pose estimation based on detected face image.|
+|ObjectDetection| object detection based on SSD-based trained models.|
+|VehicleDetection| Vehicle and passenger detection based on Intel models.|
+|ObjectSegmentation| object detection and segmentation.|
+|ObjectSegmentationMaskrcnn| object segmentation based on Maskrcnn model.|
+
+* #### model
+The path of model need to be specified here. The scheme below illustrates the typical workflow for deploying a trained deep learning model.
+
+
+* #### engine
+**Note:** Currently, only CPU and GPU are supported.
+Target device options are:
+
+|Target Device|
+|-----------------------|
+|CPU|
+|Intel® Integrated Graphics|
+|FPGA|
+|Intel® Movidius™ Neural Compute Stick|
+
+* #### label
+Currently, this parameter does not work.
+
+* #### batch
+Enable dynamic batch size for the inference engine net.
+
+### Specify outputs
+**Note:** The output parameter can be one or more.
+Currently, the output options are:
+
+|Option|Description|Configuration|
+|--------------------|-----------------------------------------------------|---------------------------------------------|
+|ImageWindow| Window showing results|```outputs: [ImageWindow, RosTopic, RViz]```|
+|RosTopic| Output the topic|```outputs: [ImageWindow, RosTopic, RViz]```|
+|RViz| Display the result in rviz|```outputs: [ImageWindow, RosTopic, RViz]```|
+
+### Specify confidence_threshold
+Set the threshold of detection probability.
+
+### Specify connects
+The topology of a pipe can only have one value on the left and multiple values on the right. The value of the first left node should be the same as the specified **inputs**.
diff --git a/doc/tables_of_contents/Design_Architecture_and_logic_flow.md b/doc/tables_of_contents/Design_Architecture_and_logic_flow.md
deleted file mode 100644
index 86c48bb3..00000000
--- a/doc/tables_of_contents/Design_Architecture_and_logic_flow.md
+++ /dev/null
@@ -1,27 +0,0 @@
-# Design Architecture
-From the view of hirarchical architecture design, the package is divided into different functional components, as shown in below picture.
-
-
-
-- **Intel® OpenVINO™ toolkit** is leveraged to provide deep learning basic implementation for data inference. is free software that helps developers and data scientists speed up computer vision workloads, streamline deep learning inference and deployments,
-and enable easy, heterogeneous execution across Intel® platforms from edge to cloud. It helps to:
- - Increase deep learning workload performance up to 19x1 with computer vision accelerators from Intel.
- - Unleash convolutional neural network (CNN)-based deep learning inference using a common API.
- - Speed development using optimized OpenCV* and OpenVX* functions.
-- **ROS2 OpenVINO Runtime Framework** is the main body of this repo. it provides key logic implementation for pipeline lifecycle management, resource management and ROS system adapter, which extends Intel OpenVINO toolkit and libraries. Furthermore, this runtime framework provides ways to ease launching, configuration and data analytics and re-use.
-- **Diversal Input resources** are the data resources to be infered and analyzed with the OpenVINO framework.
-- **ROS interfaces and outputs** currently include _Topic_ and _service_. Natively, RViz output and CV image window output are also supported by refactoring topic message and inferrence results.
-- **Optimized Models** provides by Model Optimizer component of Intel® OpenVINO™ toolkit. Imports trained models from various frameworks (Caffe*, Tensorflow*, MxNet*, ONNX*, Kaldi*) and converts them to a unified intermediate representation file. It also optimizes topologies through node merging, horizontal fusion, eliminating batch normalization, and quantization.It also supports graph freeze and graph summarize along with dynamic input freezing.
-
-# Logic Flow
-From the view of logic implementation, the package introduces the definitions of parameter manager, pipeline and pipeline manager. The below picture depicts how these entities co-work together when the corresponding program is launched.
-
-
-
-Once a corresponding program is launched with a specified .yaml config file passed in the .launch.py file or via commandline, _**parameter manager**_ analyzes the configurations about pipeline and the whole framework, then shares the parsed configuration information with pipeline procedure. A _**pipeline instance**_ is created by following the configuration info and is added into _**pipeline manager**_ for lifecycle control and inference action triggering.
-
-The contents in **.yaml config file** should be well structured and follow the supported rules and entity names. Please see [the configuration guidance](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/tables_of_contents/tutorials/configuration_file_customization.md) for how to create or edit the config files.
-
-**Pipeline** fulfills the whole data handling process: initiliazing Input Component for image data gathering and formating; building up the structured inference network and passing the formatted data through the inference network; transfering the inference results and handling output, etc.
-
-**Pipeline manager** manages all the created pipelines according to the inference requests or external demands (say, system exception, resource limitation, or end user's operation). Because of co-working with resource management and being aware of the whole framework, it covers the ability of performance optimization by sharing system resource between pipelines and reducing the burden of data copy.
diff --git a/doc/tables_of_contents/prerequisite.md b/doc/tables_of_contents/prerequisite.md
deleted file mode 100644
index f42279d7..00000000
--- a/doc/tables_of_contents/prerequisite.md
+++ /dev/null
@@ -1,31 +0,0 @@
-# Development and Target Platform
-
->> The development and target platforms have the same requirements, but you can select different components during the installation, based on your intended use.
-
-## Hardware
-### Processor Supported:
-- Intel architecture processor, e.g. 6th~8th generation Intel® Core™
-- Intel® Xeon® v5 family
-- Intel® Xeon® v6 family
-- Intel® Pentium® processor N4200/5, N3350/5, N3450/5 with Intel® HD Graphics
-
-**Notes**:
-- Processor graphics are not included in all processors. See [Product Specifications](https://ark.intel.com/) for information about your processor.
-- A chipset that supports processor graphics is required for Intel® Xeon® processors.
-- Use one of the following methods to determine the GPU on your hardware:
- * [lspci] command: GPU info may lie in the [VGA compatible controller] line.
- * Ubuntu system: Menu [System Settings] --> [Details] may help you find the graphics information.
- * Openvino: Download the install package, install_GUI.sh inside will check the GPU information before installation.
-
-### Pripheral Depended:
-- Intel® Movidius™ Neural Compute Stick
-- Intel® Neural Compute Stick 2
-- Intel® Vision Accelerator Design with Intel® Movidius™ VPU
-- RGB Camera, e.g. RealSense D400 Series or standard USB camera
-
-## Operating Systems
-- Ubuntu 16.04 or 18.04 long-term support (LTS), 64-bit: Minimum supported kernel is 4.14
-- CentOS 7.4, 64-bit (for target only)
-- Yocto Project Poky Jethro v2.0.3, 64-bit (for target only and requires modifications)
-
-**Note**: Since **Ubuntu 18.04** in the list is the only one well supported by ROS2 core, it is highly recommended to use as the OS.
diff --git a/doc/tables_of_contents/supported_features/Supported_features.md b/doc/tables_of_contents/supported_features/Supported_features.md
deleted file mode 100644
index 3117ac71..00000000
--- a/doc/tables_of_contents/supported_features/Supported_features.md
+++ /dev/null
@@ -1,33 +0,0 @@
-# Supported Features
-## Input Resources
-Currently, the package supports RGB frame data from several kinds of input resources:
-- Standard USB Camera
-- Realsense Camera
-- Image Topic
-- Image File
-- Video File
-
-See more from [the input resource description](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/tables_of_contents/supported_features/input_resource.md).
-
-## Inference Implementations
-Inferences shown in below list are supported:
-- Face Detection
-- Emotion Recognition
-- Age and Gender Recognition
-- Head Pose Estimation
-- Object Detection
-- Vehicle and License Detection
-- Object Segmentation
-- Person Re-Identification
-- Face Re-Identification
-
-[Inference functionality overview](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/tables_of_contents/supported_features/inference_functionality_overview.md).
-
-## Output Types
-The inference results can be output in several types. One or more types can be enabled for any infernece pipeline:
-- Topic Publishing
-- Image View Window
-- RViz Showing
-- Service (as a mechanism responding user's request about object detection results.)
-
-See more from [output types](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/tables_of_contents/supported_features/output_types.md) page.
diff --git a/doc/tables_of_contents/supported_features/inference_functionality_overview.md b/doc/tables_of_contents/supported_features/inference_functionality_overview.md
deleted file mode 100644
index 35afb571..00000000
--- a/doc/tables_of_contents/supported_features/inference_functionality_overview.md
+++ /dev/null
@@ -1,16 +0,0 @@
-# Infernece Feature List
-Currently, the inference feature list is supported:
-
-|Inference Label|Description|Outputs Topic|
-|---|---|---|
-|FaceDetection|Object Detection task applied to face recognition using a sequence of neural networks.|```/ros2_openvino_toolkit/face_detection```([object_msgs:msg:ObjectsInBoxes](https://github.com/intel/ros2_object_msgs/blob/master/msg/ObjectsInBoxes.msg))|
-|EmotionRecognition| Emotion recognition based on detected face image.|```/ros2_openvino_toolkit/emotions_recognition```([people_msgs:msg:EmotionsStamped](https://github.com/intel/ros2_openvino_toolkit/blob/master/people_msgs/msg/EmotionsStamped.msg))|
-|AgeGenderRecognition| Age and gener recognition based on detected face image.|```/ros2_openvino_toolkit/age_genders_Recognition```([people_msgs:msg:AgeGenderStamped](https://github.com/intel/ros2_openvino_toolkit/blob/master/people_msgs/msg/AgeGenderStamped.msg))|
-|HeadPoseEstimation| Head pose estimation based on detected face image.|```/ros2_openvino_toolkit/headposes_estimation```([people_msgs:msg:HeadPoseStamped](https://github.com/intel/ros2_openvino_toolkit/blob/master/people_msgs/msg/HeadPoseStamped.msg))|
-|ObjectDetection| object detection based on SSD-based trained models.|```/ros2_openvino_toolkit/detected_objects```([object_msgs::msg::ObjectsInBoxes](https://github.com/intel/ros2_object_msgs/blob/master/msg/ObjectsInBoxes.msg))|
-|VehicleAttribsDetection| Vehicle detection based on Intel models.|```/ros2_openvino_toolkit/detected_vehicles_attribs```([people_msgs::msg::VehicleAttribsStamped](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/VehicleAttribsStamped.msg))|
-|LicensePlateDetection| License detection based on Intel models.|```/ros2_openvino_toolkit/detected_license_plates```([people_msgs::msg::LicensePlateStamped](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/LicensePlateStamped.msg))|
-|ObjectSegmentation| object detection and segmentation.|```/ros2_openvino_toolkit/segmented_obejcts```([people_msgs::msg::ObjectsInMasks](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/ObjectsInMasks.msg))|
-|PersonReidentification| Person Reidentification based on object detection.|```/ros2_openvino_toolkit/reidentified_persons```([people_msgs::msg::ReidentificationStamped](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/ReidentificationStamped.msg))|
-|LandmarksDetection| Landmark regression based on face detection.|```/ros2_openvino_toolkit/detected_landmarks```([people_msgs::msg::LandmarkStamped](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/LandmarkStamped.msg))|
-|FaceReidentification| Face Reidentification based on face detection.|```/ros2_openvino_toolkit/reidentified_faces```([people_msgs::msg::ReidentificationStamped](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/ReidentificationStamped.msg))|
diff --git a/doc/tables_of_contents/supported_features/input_resource.md b/doc/tables_of_contents/supported_features/input_resource.md
deleted file mode 100644
index 43cd3af0..00000000
--- a/doc/tables_of_contents/supported_features/input_resource.md
+++ /dev/null
@@ -1,8 +0,0 @@
-# Full list of supported Input Resources
-|Input Resource Name|Description|
-|---|-------------------------------------------|
-|StandardCamera|Any RGB camera with USB port supporting. Currently only the first USB camera if many are connected.|
-|RealSenseCamera| Intel RealSense RGB-D Camera,directly calling RealSense Camera via librealsense plugin of openCV.|
-|RealSenseCameraTopic| any ROS topic which is structured in image message.The topic to be inputted must be remapped to name ```/openvino_toolkit/image_raw```(type [sensor_msgs::msg::Image](https://github.com/ros2/common_interfaces/blob/master/sensor_msgs/msg/Image.msg))|
-|Image| Any image file which can be parsed by openCV, such as .png, .jpeg.|
-|Video| Any video file which can be parsed by openCV.|
\ No newline at end of file
diff --git a/doc/tables_of_contents/supported_features/output_types.md b/doc/tables_of_contents/supported_features/output_types.md
deleted file mode 100644
index 315c0cb9..00000000
--- a/doc/tables_of_contents/supported_features/output_types.md
+++ /dev/null
@@ -1,43 +0,0 @@
-# Output Types
->> The inference results can be output in several types. One or more types can be enabled for any inference pipeline.
-## Topic Publishing
->> Specific topic(s) can be generated and published according to the given inference functionalities.
-
-|Inference|Published Topic|
-|---|---|
-|People Detection|```/ros2_openvino_toolkit/face_detection```([object_msgs:msg:ObjectsInBoxes](https://github.com/intel/ros2_object_msgs/blob/master/msg/ObjectsInBoxes.msg))|
-|Emotion Recognition|```/ros2_openvino_toolkit/emotions_recognition```([people_msgs:msg:EmotionsStamped](https://github.com/intel/ros2_openvino_toolkit/blob/master/people_msgs/msg/EmotionsStamped.msg))|/ros2_openvino_toolkit/face_detection(object_msgs:msg:ObjectsInBoxes)
-|Age and Gender Recognition|```/ros2_openvino_toolkit/age_genders_Recognition```([people_msgs:msg:AgeGenderStamped](https://github.com/intel/ros2_openvino_toolkit/blob/master/people_msgs/msg/AgeGenderStamped.msg))|
-|Head Pose Estimation|```/ros2_openvino_toolkit/headposes_estimation```([people_msgs:msg:HeadPoseStamped](https://github.com/intel/ros2_openvino_toolkit/blob/master/people_msgs/msg/HeadPoseStamped.msg))|
-|Object Detection|```/ros2_openvino_toolkit/detected_objects```([object_msgs::msg::ObjectsInBoxes](https://github.com/intel/ros2_object_msgs/blob/master/msg/ObjectsInBoxes.msg))|
-|Object Segmentation|```/ros2_openvino_toolkit/segmented_obejcts```([people_msgs::msg::ObjectsInMasks](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/ObjectsInMasks.msg))|
-|Person Reidentification|```/ros2_openvino_toolkit/reidentified_persons```([people_msgs::msg::ReidentificationStamped](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/ReidentificationStamped.msg))|
-|Face Reidenfication|```/ros2_openvino_toolkit/reidentified_faces```([people_msgs::msg::ReidentificationStamped](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/ReidentificationStamped.msg))|
-|Vehicle Detection|```/ros2_openvino_toolkit/detected_license_plates```([people_msgs::msg::VehicleAttribsStamped](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/VehicleAttribsStamped.msg))|
-|Vehicle License Detection|```/ros2_openvino_toolkit/detected_license_plates```([people_msgs::msg::LicensePlateStamped](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/LicensePlateStamped.msg))|
-
-## Image View Window
->> The original image and the inference results are rendered together and shown in a CV window.
-## RViz Showing
->> The Rendered image (rendering inference results into the original image) was transformed into sensor_msgs::msg::Image topic, that can be shown in RViz application.
-- RViz Published Topic
-```/ros2_openvino_toolkit/image_rviz```([sensor_msgs::msg::Image](https://github.com/ros2/common_interfaces/blob/master/sensor_msgs/msg/Image.msg))
-
-## Service
->> Several ROS2 Services are created, expecting to be used in client/server mode, especially when synchronously getting inference results for a given image frame or when managing inference pipeline's lifecycle.
-
-- **Face Detection or Object Detection for a given Image file**
-
-|Inference|Service|
-|---|---|
-|Object Detection Service|```/detect_object```([object_msgs::srv::DetectObject](https://github.com/intel/ros2_object_msgs/blob/master/srv/DetectObject.srv))|
-|Face Detection Service|```/detect_face```([object_msgs::srv::DetectObject](https://github.com/intel/ros2_object_msgs/blob/master/srv/DetectObject.srv))|
-|Age Gender Detection Service|```/detect_age_gender```([people_msgs::srv::AgeGender](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/srv/AgeGender.srv))|
-|Headpose Detection Service|```/detect_head_pose```([people_msgs::srv::HeadPose](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/srv/HeadPose.srv))|
-|Emotion Detection Service|```/detect_emotion```([people_msgs::srv::Emotion](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/srv/Emotion.srv))|
-
-- **Inference Pipeline Lifecycle Management**
- - Create new pipeline
- - Start/Stop/Pause a pipeline
- - Get pipeline list or status
-
diff --git a/doc/tables_of_contents/tutorials/Multiple_Pipelines.md b/doc/tables_of_contents/tutorials/Multiple_Pipelines.md
deleted file mode 100644
index cd03aec7..00000000
--- a/doc/tables_of_contents/tutorials/Multiple_Pipelines.md
+++ /dev/null
@@ -1,54 +0,0 @@
-# Multiple Pipelines
->> This is a way to run more than one pipeline in the same process.Having multiple pipelines in a single instance allows each pipeline to have custom configuration and different performance.
-
-## prerequest
-see [this guide](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/tables_of_contents/tutorials/configuration_file_customization.md) to see how to customize a pipeline.
-
-## A demo for multiple pipeline
-```bash
-1 Pipelines:
- 2 - name: object1
- 3 inputs: [StandardCamera]
- 4 infers:
- 5 - name: ObjectDetection
- 6 model: /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP32/mobilenet-ssd.xml
- 7 engine: CPU
- 8 label: to/be/set/xxx.labels
- 9 batch: 1
- 10 confidence_threshold: 0.5
- 11 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame
- 12 outputs: [ImageWindow, RosTopic, RViz]
- 13 connects:
- 14 - left: StandardCamera
- 15 right: [ObjectDetection]
- 16 - left: ObjectDetection
- 17 right: [ImageWindow]
- 18 - left: ObjectDetection
- 19 right: [RosTopic]
- 20 - left: ObjectDetection
- 21 right: [RViz]
- 22
- 23 - name: object2
- 24 inputs: [RealSenseCamera]
- 25 infers:
- 26 - name: ObjectDetection
- 27 model: /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP32/mobilenet-ssd.xml
- 28 engine: CPU
- 29 label: to/be/set/xxx.labels
- 30 batch: 1
- 31 confidence_threshold: 0.5
- 32 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame
- 33 outputs: [ImageWindow, RosTopic, RViz]
- 34 connects:
- 35 - left: RealSenseCamera
- 36 right: [ObjectDetection]
- 37 - left: ObjectDetection
- 38 right: [ImageWindow]
- 39 - left: ObjectDetection
- 40 right: [RosTopic]
- 41 - left: ObjectDetection
- 42 right: [RViz]
- 43
- 44 OpenvinoCommon:
-
-```
diff --git a/doc/tables_of_contents/tutorials/configuration_file_customization.md b/doc/tables_of_contents/tutorials/configuration_file_customization.md
deleted file mode 100644
index 703459b6..00000000
--- a/doc/tables_of_contents/tutorials/configuration_file_customization.md
+++ /dev/null
@@ -1,58 +0,0 @@
-# Configuration File Customization
-
-One of the key added values of ROS2 OpenVINO is automatically create new pipeline on demand according to the given configuration files. In order to create new pipelines, the end user only need to create a new configuration file or update one already existed. The configuration file must be written by following some rules.
-
- 1 Pipelines:
- 2 - name: object
- 3 inputs: [RealSenseCamera]
- 4 infers:
- 5 - name: ObjectDetection
- 6 model: /opt/intel/openvino/deployment_tools/tools/model_downloader/object_detection/common/mobilenet-ssd/caffe/output/FP16/mobilenet-ssd.xml
- 7 engine: MYRIAD
- 8 label: to/be/set/xxx.labels
- 9 batch: 1
- 10 confidence_threshold: 0.5
- 11 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame
- 12 outputs: [ImageWindow, RosTopic, RViz]
- 13 connects:
- 14 - left: RealSenseCamera
- 15 right: [ObjectDetection]
- 16 - left: ObjectDetection
- 17 right: [ImageWindow]
- 18 - left: ObjectDetection
- 19 right: [RosTopic]
- 20 - left: ObjectDetection
- 21 right: [RViz]
-
-In this sample, a pipeline is to be created with this topology:
-
-```flow
-input=operation: RealSenseCamera
-infer=operation: ObjectDetection
-output1=operation: ImageWindow
-output2=operation: RosTopic
-output3=operation: RViz
-
-input-infer-output1
-infer-output2
-infer-output3
-```
-
-Detail Description for each line shows in below tabel:
-
-|Line No.|Description|
-|-------------|---|
-| 1 |Keyword, label for pipeline parameters. The pipeline configuration must be started by this line.|
-|2|Pipeline name, the published topics bound to this name. (e.g. /openvino_toolkit/**object**/face_detection)|
-|3|The name of chosen input device, should be one and only one of [the list](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/tables_of_contents/supported_features/Supported_features.md#input-resources) (taking the item "Input Resource Name").|
-|4|key word for inference section. one or more inferences can be included in a pipeline's inference section.|
-|5|The name of Inference instance, should be in [the list](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/tables_of_contents/supported_features/Supported_features.md#inference-implementations).
**NOTE**: if a pipeline contains 2 or more inference instances, the first one should be a detection inference.
-|6|Model description file with absolute path, generated by model_optimizer tool|
-|7|The name of Inference engine, should be one of:CPU, GPU and MYRIAD.|
-|8|The file name with absolute path of object labels.
**NOTE**: not enabled in the current version. The labels file with the same name as model description file under the same folder is searched and used.|
-|9|The number of input data to be enqueued and handled by inference engine in parallel.|
-|10|Set the inference result filtering by confidence ratio.|
-|11|set *enable_roi_constraint* to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame.|
-|12|A list of output method enabled for inference result showing/notifying. Should be one or some of:
• ImageWindow
• RosTopic
• Rviz
• RosService(*)
**NOTE**: RosService can only be used in ROS2 service server pipeline.|
-|13|keyword for pipeline entities' relationship topology.|
-|14~21|The detailed connection topology for the pipeline.
A pair of "left" and "right" parameters, whose contents are the names of inputs(line3), infers(line5) and outputs(line12) defines a connection between the two entities, it also defines that the data would be moved from *entity left* to *entity right*.|
diff --git a/docker/Dockerfile b/docker/Dockerfile
new file mode 100644
index 00000000..2ee938b7
--- /dev/null
+++ b/docker/Dockerfile
@@ -0,0 +1,52 @@
+ARG ROS_PRE_INSTALLED_PKG
+FROM osrf/ros:${ROS_PRE_INSTALLED_PKG}
+ARG VERSION
+
+SHELL ["/bin/bash", "-c"]
+
+# ignore the warning
+ARG DEBIAN_FRONTEND=noninteractive
+ARG APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=1
+
+# install openvino 2022.3
+# https://docs.openvino.ai/2022.3/openvino_docs_install_guides_installing_openvino_apt.html
+RUN apt update && apt install --assume-yes curl wget gnupg2 lsb-release \
+&& wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB && \
+apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB && echo "deb https://apt.repos.intel.com/openvino/2022 focal main" | tee /etc/apt/sources.list.d/intel-openvino-2022.list && \
+apt update && apt-cache search openvino && apt install -y openvino-2022.3.0
+
+
+# install librealsense2
+RUN apt-get install -y --no-install-recommends \
+software-properties-common \
+&& apt-key adv --keyserver keyserver.ubuntu.com --recv-key F6E65AC044F831AC80A06380C8B3A55A6F3EFCDE || apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-key F6E65AC044F831AC80A06380C8B3A55A6F3EFCDE \
+&& add-apt-repository "deb https://librealsense.intel.com/Debian/apt-repo $(lsb_release -cs) main" -u \
+&& apt-get install -y --no-install-recommends \
+librealsense2-dkms \
+librealsense2-utils \
+librealsense2-dev \
+librealsense2-dbg \
+libgflags-dev \
+libboost-all-dev \
+&& rm -rf /var/lib/apt/lists/*
+
+# other dependencies
+RUN apt-get update && apt-get install -y python3-pip && python3 -m pip install -U \
+numpy \
+networkx \
+pyyaml \
+requests \
+&& apt-get install --assume-yes apt-utils \
+&& apt-get install -y --no-install-recommends libboost-all-dev \
+&& apt-get install ros-${VERSION}-diagnostic-updater \
+&& pip install --upgrade pip
+RUN cd /usr/lib/x86_64-linux-gnu && ln -sf libboost_python-py36.so libboost_python37.so
+COPY jpg /root/jpg
+# build ros2 openvino toolkit
+RUN cd /root && mkdir -p catkin_ws/src && cd /root/catkin_ws/src \
+&& git clone https://github.com/intel/ros2_object_msgs.git
+WORKDIR /root/catkin_ws/src
+RUN git clone -b ros2 https://github.com/intel/ros2_openvino_toolkit.git
+WORKDIR /root/catkin_ws
+RUN source /opt/ros/${VERSION}/setup.bash && colcon build --cmake-args -DCMAKE_BUILD_TYPE=Release
+
diff --git a/docker/docker_instructions_ov2.0.md b/docker/docker_instructions_ov2.0.md
new file mode 100644
index 00000000..c9cdd202
--- /dev/null
+++ b/docker/docker_instructions_ov2.0.md
@@ -0,0 +1,130 @@
+# Run Docker Images For ROS2_OpenVINO_Toolkit
+
+**NOTE:**
+Below steps have been tested on **Ubuntu 20.04**.
+Supported ROS2 versions include foxy and galactic.
+
+## 1. Environment Setup
+* Install docker.
+Refer to: [Docker_install_guide](https://docs.docker.com/engine/install/ubuntu/)
+
+## 2. Build docker image by dockerfile
+```
+cd ~/ros2_openvino_toolkit/docker/Dockerfile
+vi ~/ros2_openvino_toolkit/docker/Dockerfile
+docker build --build-arg ROS_PRE_INSTALLED_PKG= --build-arg VERSION= --build-arg "HTTP_PROXY=set_your_proxy" -t ros2_openvino_202203 .
+```
+For example:
+* Build image for ros_galactic
+```
+cd ~/ros2_openvino_toolkit/docker/Dockerfile
+vi ~/ros2_openvino_toolkit/docker/Dockerfile
+docker build --build-arg ROS_PRE_INSTALLED_PKG=galactic-desktop --build-arg VERSION=galactic --build-arg "HTTP_PROXY=set_your_proxy" -t ros2_galactic_openvino_202203 .
+```
+* Build image for ros_foxy
+```
+cd ~/ros2_openvino_toolkit/docker/Dockerfile
+vi ~/ros2_openvino_toolkit/docker/Dockerfile
+docker build --build-arg ROS_PRE_INSTALLED_PKG=foxy-desktop --build-arg VERSION=foxy --build-arg "HTTP_PROXY=set_your_proxy" -t ros2_foxy_openvino_202203 .
+```
+
+## 3. Download and load docker image
+* Download docker image
+```
+ # ros2_openvino_202203 for demo
+ cd ~/Downloads/
+ wget
+```
+* Load docker image
+```
+cd ~/Downloads/
+docker load -i
+docker images
+// (show in the list)
+```
+
+## 4. Running the Demos
+* Install dependency
+```
+ sudo apt install x11-xserver-utils
+ xhost +
+```
+* Run docker image
+```
+ docker images
+ docker run -itd -e DISPLAY=$DISPLAY -v /tmp/.X11-unix:/tmp/.X11-unix -v /dev:/dev --privileged=true --name
+```
+* In Docker Container
+
+* Preparation
+```
+source /opt/ros//setup.bash
+cd ~/catkin_ws
+source ./install/local_setup.bash
+```
+
+* See all available models
+OMZ tools are provided for downloading and converting OMZ models in ov2022.
+Refer to: [OMZtool_guide](https://pypi.org/project/openvino-dev/)
+
+```
+omz_downloader --print_all
+```
+
+* Download the optimized Intermediate Representation (IR) of model (execute once), for example:
+```
+cd ~/catkin_ws/src/ros2_openvino_toolkit/data/model_list
+omz_downloader --list download_model.lst -o /opt/openvino_toolkit/models/
+```
+
+* If the model (tensorflow, caffe, MXNet, ONNX, Kaldi) need to be converted to intermediate representation (such as the model for object detection):
+```
+cd ~/catkin_ws/src/ros2_openvino_toolkit/data/model_list
+omz_converter --list convert_model.lst -d /opt/openvino_toolkit/models/ -o /opt/openvino_toolkit/models/convert
+```
+* Copy label files (execute once)
+**Note**:Need to make label_dirs if skip steps for set output_dirs above.
+```
+sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP32/
+sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/
+sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/emotions-recognition/FP32/emotions-recognition-retail-0003.labels /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/
+sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/intel/semantic-segmentation-adas-0001/FP32/
+sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/intel/semantic-segmentation-adas-0001/FP16/
+sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels /opt/openvino_toolkit/models/intel/vehicle-license-plate-detection-barrier-0106/FP32
+```
+
+* Check the parameter configuration in ros2_openvino_toolkit/sample/param/xxxx.yaml before lauching, make sure parameters such as model_path, label_path and input_path are set correctly. Please refer to the quick start document for [yaml configuration guidance](../doc/quick_start/yaml_configuration_guide.md) for detailed configuration guidance.
+ * run face detection sample code input from StandardCamera.
+ ```
+ ros2 launch openvino_node pipeline_people.launch.py
+ ```
+ * run person reidentification sample code input from StandardCamera.
+ ```
+ ros2 launch openvino_node pipeline_reidentification.launch.py
+ ```
+ * run face detection sample code input from Image.
+ ```
+ ros2 launch openvino_node pipeline_image.launch.py
+ ```
+ * run object segmentation sample code input from RealSenseCameraTopic.
+ ```
+ ros2 launch openvino_node pipeline_segmentation.launch.py
+ ```
+ * run object segmentation sample code input from Image.
+ ```
+ ros2 launch openvino_node pipeline_segmentation_image.launch.py
+ ```
+ * run vehicle detection sample code input from StandardCamera.
+ ```
+ ros2 launch openvino_node pipeline_vehicle_detection.launch.py
+ ```
+ * run person attributes sample code input from StandardCamera.
+ ```
+ ros2 launch openvino_node pipeline_person_attributes.launch.py
+ ```
+
+# More Information
+* ROS2 OpenVINO discription writen in Chinese: https://mp.weixin.qq.com/s/BgG3RGauv5pmHzV_hkVAdw
+
+###### *Any security issue should be reported using process at https://01.org/security*
+
diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine_manager.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine_manager.hpp
deleted file mode 100644
index ed5923f3..00000000
--- a/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine_manager.hpp
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright (c) 2018-2019 Intel Corporation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/**
- * @brief A header file with declaration for NetworkEngine class
- * @file engine.h
- */
-#ifndef DYNAMIC_VINO_LIB__ENGINES__ENGINE_MANAGER_HPP_
-#define DYNAMIC_VINO_LIB__ENGINES__ENGINE_MANAGER_HPP_
-
-#pragma once
-
-#include "dynamic_vino_lib/models/base_model.hpp"
-#include "dynamic_vino_lib/engines/engine.hpp"
-#include "inference_engine.hpp"
-
-namespace Engines
-{
-/**
- * @class EngineManager
- * @brief This class is used to create and manage Inference engines.
- */
-class EngineManager
-{
-public:
- /**
- * @brief Create InferenceEngine instance by given Engine Name and Network.
- * @return The shared pointer of created Engine instance.
- */
- std::shared_ptr createEngine(
- const std::string &, const std::shared_ptr &);
-
-private:
-#if(defined(USE_OLD_E_PLUGIN_API))
- std::map plugins_for_devices_;
- std::unique_ptr
- makePluginByName(
- const std::string & device_name, const std::string & custom_cpu_library_message,
- const std::string & custom_cldnn_message, bool performance_message);
- std::shared_ptr createEngine_beforeV2019R2(
- const std::string &, const std::shared_ptr &);
-#endif
-
- std::shared_ptr createEngine_V2019R2_plus(
- const std::string &, const std::shared_ptr &);
-
-};
-} // namespace Engines
-
-#endif // DYNAMIC_VINO_LIB__ENGINES__ENGINE_MANAGER_HPP_
diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_filter.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_filter.hpp
deleted file mode 100644
index ec46271e..00000000
--- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_filter.hpp
+++ /dev/null
@@ -1,195 +0,0 @@
-// Copyright (c) 2018 Intel Corporation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/**
- * @brief A header file with declaration for BaseFilter Class
- * @file base_filter.hpp
- */
-#ifndef DYNAMIC_VINO_LIB__INFERENCES__BASE_FILTER_HPP_
-#define DYNAMIC_VINO_LIB__INFERENCES__BASE_FILTER_HPP_
-
-#include
-#include
-#include
-#include
-#include "dynamic_vino_lib/inferences/base_inference.hpp"
-
-namespace dynamic_vino_lib
-{
-
-/**
- * @class BaseFilter
- * @brief Base class for result filter.
- */
-class BaseFilter
-{
-public:
- BaseFilter();
- /**
- * @brief Initiate a result filter.
- */
- virtual void init() = 0;
-
- /**
- * @brief Get the filtered results' ROIs.
- * @return The filtered ROIs.
- */
- virtual std::vector getFilteredLocations() = 0;
-
- /**
- * @brief Check if the filter conditions is valid for filtering.
- * @param[in] Filter conditions.
- * @return true if some of the conditions are valid, otherwise false.
- */
- bool isValidFilterConditions(const std::string &);
-
- /**
- * @brief Accept the filter conditions for filtering.
- * @param[in] Filter conditions.
- */
- void acceptFilterConditions(const std::string &);
-
- /**
- * @brief Decide whether the input string is a relational operator or not.
- * @param[in] A string to be decided.
- * @return True if the input string is a relational operator, false if not.
- */
- bool isRelationOperator(const std::string &);
-
- /**
- * @brief Decide whether the input string is a logic operator or not.
- * @param[in] A string to be decided.
- * @return True if the input string is a logic operator, false if not.
- */
- bool isLogicOperator(const std::string &);
-
- /**
- * @brief Decide whether the an operator has a higher priority than anthor.
- * @param[in] The two operators.
- * @return True if the first operator has higher priority, false if not.
- */
- bool isPriorTo(const std::string &, const std::string &);
-
- /**
- * @brief Convert the input bool variable to a string type.
- * @param[in] A bool type to be converted.
- * @return A converted string result.
- */
- std::string boolToStr(bool);
-
- /**
- * @brief Convert the input string variable to a bool type.
- * @param[in] A string type to be converted.
- * @return A converted bool result.
- */
- bool strToBool(const std::string &);
-
- /**
- * @brief Get the filter conditions in the suffix order.
- * @return A vector with suffix-order filter conditions.
- */
- const std::vector & getSuffixConditions() const;
-
- /**
- * @brief Do logic operation with the given bool values and the operator.
- * @param[in] A bool string, an logic operator, the other bool string.
- * @return The logic operation result.
- */
- bool logicOperation(const std::string &, const std::string &, const std::string &);
-
- /**
- * @brief Compare two strings with a given relational operator.
- * @param[in] A string, an relational operator, the other string.
- * @return True if valid, false if not.
- */
- static bool stringCompare(const std::string &, const std::string &, const std::string &);
-
- /**
- * @brief Compare two floats with a given relational operator.
- * @param[in] A float number, an relational operator, the other float number.
- * @return True if valid, false if not.
- */
- static bool floatCompare(float, const std::string &, float);
-
- /**
- * @brief Convert a string into a float number.
- * @param[in] A string to be converted.
- * @return The converted float number, 0 if string is invalid.
- */
- static float stringToFloat(const std::string &);
-
- /**
- * @brief A macro to decide whether a given result satisfies the filter condition.
- * @param[in] A key to function mapping, a given result.
- * @return True if valid, false if not.
- */
- #define ISVALIDRESULT(key_to_function, result) \
- { \
- std::vector suffix_conditons = getSuffixConditions(); \
- std::stack result_stack; \
- for (auto elem : suffix_conditons) { \
- if (!isRelationOperator(elem) && !isLogicOperator(elem)) { \
- result_stack.push(elem); \
- } else { \
- try { \
- std::string str1 = result_stack.top(); \
- result_stack.pop(); \
- std::string str2 = result_stack.top(); \
- result_stack.pop(); \
- if (key_to_function.count(str2)) { \
- result_stack.push(boolToStr(key_to_function[str2](result, elem, str1))); \
- } else { \
- result_stack.push(boolToStr(logicOperation(str1, elem, str2))); \
- } \
- } \
- catch (...) { \
- slog::err << "Invalid filter conditions format!" << slog::endl; \
- } \
- } \
- } \
- if (result_stack.empty()) { \
- return true; \
- } \
- return strToBool(result_stack.top()); \
- }
-
-private:
- /**
- * @brief Parse the filter conditions and stores it into a vector.
- * @param[in] A string form filter conditions.
- * @return The vector form filter conditions.
- */
- std::vector split(const std::string & filter_conditions);
-
- /**
- * @brief Convert the infix expression into suffix expression.
- * @param[in] The infix form filter conditions.
- */
- void infixToSuffix(std::vector&infix_conditions);
-
- /**
- * @brief Strip the extra space in a string.
- * @param[in] A string to be striped.
- * @return The striped string.
- */
- std::string strip(const std::string & str);
-
- std::string striped_conditions_ = "";
- std::vector suffix_conditons_;
- std::vector relation_operators_ = {"==", "!=", "<=", ">=", "<", ">"};
- std::vector logic_operators_ = {"&&", "||"};
-};
-} // namespace dynamic_vino_lib
-
-#endif // DYNAMIC_VINO_LIB__INFERENCES__BASE_FILTER_HPP_
diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/inference_manager.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/inference_manager.hpp
deleted file mode 100644
index 0966a96a..00000000
--- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/inference_manager.hpp
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright (c) 2018 Intel Corporation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/**
- * @brief a header file with declaration of Inference Manager class
- * @file inference_manager.hpp
- */
-#ifndef DYNAMIC_VINO_LIB__INFERENCES__INFERENCE_MANAGER_HPP_
-#define DYNAMIC_VINO_LIB__INFERENCES__INFERENCE_MANAGER_HPP_
-
-#include
-#include
-#include
-#include