diff --git a/.ci_local_test/Jenkinsfile b/.ci_local_test/Jenkinsfile index 8dcb84ac..2264f86e 100644 --- a/.ci_local_test/Jenkinsfile +++ b/.ci_local_test/Jenkinsfile @@ -3,28 +3,62 @@ pipeline { environment { // Test_Server is the local test machine. Test_Server = "robotics-testNUC11" - WORKSPACE_PATH = "/home/intel/ros2_openvino_toolkit" + Test_WORKSPACE = "/home/intel/ros2_openvino_toolkit_test" } stages { - stage('Test Ros2 Galatic') { + stage('Check The Conflict') { steps { script { - def flag = sh script: "ssh intel@$Test_Server 'cd $WORKSPACE_PATH && docker images | grep ros2_openvino_test'", returnStatus: true - if (flag == 0) { - docker rmi -f ros2_openvino_test - } - def test_result = sh script: "ssh intel@$Test_Server 'cd $WORKSPACE_PATH && ./self_host_test_ros2_openvino.sh '", returnStatus: true + sh script: "ssh intel@$Test_Server 'cd $Test_WORKSPACE && ./check_conflict.sh'", returnStatus: true + echo "no conflict, the task continue" + } + } + } + stage('Get The env') { + steps { + script { + // rm the old env + sh script: "ssh intel@$Test_Server 'rm -rf $Test_WORKSPACE/env'", returnStatus: true + // get new env + sh script: "export | tee -a env", returnStatus: true + sh script: "scp -r env intel@$Test_Server:$Test_WORKSPACE", returnStatus: true + } + } + } + stage('Moving The Code To Test Machine') { + steps { + script { + sh script: "ssh intel@$Test_Server 'rm -rf $Test_WORKSPACE/ros2_openvino_toolkit'", returnStatus: true + sh script: "scp -r $WORKSPACE intel@$Test_Server:$Test_WORKSPACE/ros2_openvino_toolkit", returnStatus: true + // sh script: "ssh intel@$Test_Server 'docker cp $Test_WORKSPACE/ros2_openvino_toolkit:/root/catkin_ws/src'", returnStatus: true + } + } + } + stage('Klocwork Code check') { + steps { + script { + echo 'klocwork code check' + sh script: "sudo docker cp $WORKSPACE klocwork_test:/home/intel/catkin_ws/src/ros2_openvino_toolkit", returnStatus: true + sh script: "sudo docker exec -i klocwork_test bash -c 'source ~/.bashrc && cd catkin_ws && ./klocwork_scan.sh'", returnStatus: true + } + + } + } + stage('The Ros2_openvino container run') { + steps { + script { + def test_result = sh script: "ssh intel@$Test_Server 'cd $Test_WORKSPACE && ./self_container_ros2_openvino_test.sh '", returnStatus: true if (test_result == 0) { echo "test pass" } else { echo "test fail" exit -1 } - + } + } } - } } diff --git a/.ci_local_test/ros2_openvino_toolkit_test/docker_run.sh b/.ci_local_test/ros2_openvino_toolkit_test/docker_run.sh new file mode 100755 index 00000000..91a19139 --- /dev/null +++ b/.ci_local_test/ros2_openvino_toolkit_test/docker_run.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +export DISPLAY=:0 + +export work_dir=$PWD + + +function run_container() { + + docker images | grep ros2_openvino_docker + + if [ $? -eq 0 ] + then + echo "the image of ros2_openvino_docker:01 existence" + docker rmi -f ros2_openvino_docker:01 + fi + + docker ps -a | grep ros2_openvino_container + if [ $? -eq 0 ] + then + docker rm -f ros2_openvino_container + fi + + # Removing some docker image .. + # Using jenkins server ros2_openvino_toolkit code instead of git clone code. + cd $work_dir && sed -i '/RUN git clone -b ros2/d' Dockerfile + # add the jpg for test. + cd $work_dir && sed -i '$i COPY jpg /root/jpg' Dockerfile + + cd $work_dir && docker build --build-arg ROS_PRE_INSTALLED_PKG=galactic-desktop --build-arg VERSION=galactic -t ros2_openvino_docker:01 . + cd $work_dir && docker images + docker run -i --privileged=true --device=/dev/dri -v $work_dir/ros2_openvino_toolkit:/root/catkin_ws/src/ros2_openvino_toolkit -v $HOME/.Xauthority:/root/.Xauthority -e GDK_SCALE -v $work_dir/test_cases:/root/test_cases --name ros2_openvino_container ros2_openvino_docker:01 bash -c "cd /root/test_cases && ./run.sh galactic" + +} + +run_container +if [ $? -ne 0 ] +then + echo "Test fail" + exit -1 +fi + + diff --git a/.ci_local_test/ros2_openvino_toolkit_test/jpg/car.jpg b/.ci_local_test/ros2_openvino_toolkit_test/jpg/car.jpg new file mode 100644 index 00000000..f53b0339 Binary files /dev/null and b/.ci_local_test/ros2_openvino_toolkit_test/jpg/car.jpg differ diff --git a/.ci_local_test/ros2_openvino_toolkit_test/test_cases/config.sh b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/config.sh new file mode 100755 index 00000000..0efee6ce --- /dev/null +++ b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/config.sh @@ -0,0 +1,13 @@ +#/bin/bash + +if [[ $1 == '' ]] +then + export ros2_branch=galactic +else + export ros2_branch=$1 +fi + +export dynamic_vino_sample=/root/catkin_ws/install/openvino_node/share/openvino_node + + +source /opt/ros/$ros2_branch/setup.bash diff --git a/.ci_local_test/ros2_openvino_toolkit_test/test_cases/ros2_openvino_tool_model_download.sh b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/ros2_openvino_tool_model_download.sh new file mode 100755 index 00000000..e2678f36 --- /dev/null +++ b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/ros2_openvino_tool_model_download.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +mkdir -p /opt/openvino_toolkit/models +#apt install -y python-pip +apt install -y python3.8-venv +cd ~ && python3 -m venv openvino_env && source openvino_env/bin/activate +python -m pip install --upgrade pip +pip install openvino-dev[tensorflow2,onnx]==2022.3 + + +#Download the optimized Intermediate Representation (IR) of model (execute once) +cd ~/catkin_ws/src/ros2_openvino_toolkit/data/model_list && omz_downloader --list download_model.lst -o /opt/openvino_toolkit/models/ + +cd ~/catkin_ws/src/ros2_openvino_toolkit/data/model_list && omz_converter --list convert_model.lst -d /opt/openvino_toolkit/models/ -o /opt/openvino_toolkit/models/convert + + +#Copy label files (execute once) +cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP32/ +cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/ +cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/emotions-recognition/FP32/emotions-recognition-retail-0003.labels /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/ +cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/intel/semantic-segmentation-adas-0001/FP32/ +cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/intel/semantic-segmentation-adas-0001/FP16/ +cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels /opt/openvino_toolkit/models/intel/vehicle-license-plate-detection-barrier-0106/FP32 + +mkdir -p /opt/openvino_toolkit/models/public/mask_rcnn_inception_resnet_v2_atrous_coco/FP16/ +cp /opt/openvino_toolkit/models/convert/public/mask_rcnn_inception_resnet_v2_atrous_coco/FP16/* /opt/openvino_toolkit/models/public/mask_rcnn_inception_resnet_v2_atrous_coco/FP16/ + +cd /root/test_cases/ && ./yolov5_model_download.sh +cd /root/test_cases/ && ./yolov8_model_download.sh + diff --git a/.ci_local_test/ros2_openvino_toolkit_test/test_cases/run.sh b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/run.sh new file mode 100755 index 00000000..d13dd828 --- /dev/null +++ b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/run.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +export ros2_branch=$1 +if [[ $1 == '' ]] +then + export ros2_branch=galactic +else + export ros2_branch=$1 +fi +source /root/test_cases/config.sh $ros2_branch + +cd /root/catkin_ws && colcon build --symlink-install +cd /root/catkin_ws && source ./install/local_setup.bash + +apt-get update +# apt-get install -y ros-$ros2_branch-diagnostic-updater +apt-get install python3-defusedxml +apt-get install -y python3-pip +pip3 install XTestRunner==1.5.0 + +cd /root/test_cases && ./ros2_openvino_tool_model_download.sh +mkdir -p /root/test_cases/log +echo "===cat pipeline_people_ci.yaml" +cat /root/catkin_ws/install/openvino_node/share/openvino_node/param/pipeline_people_ci.yaml + +cd /root/test_cases/unittest && python3 run_all.py +result=$? +#echo "cat segmentation maskrcnn" +#cat /root/test_cases/log/pipeline_segmentation_maskrcnn_test_ci.log + +echo "Test ENV:" && df -h && free -g +if [ $result -ne 0 ] +then + exit -1 +fi + diff --git a/.ci_local_test/ros2_openvino_toolkit_test/test_cases/unittest/run_all.py b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/unittest/run_all.py new file mode 100755 index 00000000..09e05d3f --- /dev/null +++ b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/unittest/run_all.py @@ -0,0 +1,47 @@ +#!/usr/opt/python3 +import unittest +from test_cases import Test_Cases +from XTestRunner import HTMLTestRunner + +def main(): + + suite = unittest.TestSuite() + + all_cases = [Test_Cases('test_1_pipeline_people_ci'), + Test_Cases('test_2_pipeline_reidentification_ci'), + Test_Cases('test_3_pipeline_image_ci'), + Test_Cases('test_4_pipeline_segmentation_ci'), + Test_Cases('test_5_pipeline_vehicle_detection_ci'), + Test_Cases('test_6_pipeline_person_attributes_ci'), + Test_Cases('test_7_pipeline_segmentation_image_ci'), + Test_Cases('test_8_pipeline_object_yolov5_ci'), + Test_Cases('test_9_pipeline_object_yolov8_ci')] + #Test_Cases('test_10_pipeline_segmentation_instance_ci')] + suite.addTests(all_cases) + + with (open('./result.html', 'wb')) as fp: + runner = HTMLTestRunner( + stream=fp, + title='ROS2 Openvino Test Report', + description='Test ROS2-galactic openvino all cases', + language='en', + ) + result = runner.run( + testlist=suite, + rerun=1, + save_last_run=False + ) + + failure_count = len(all_cases) - result.success_count + print(f"all count: {len(all_cases)}") + print(f"success count: {result.success_count}") + print(f"failure count: {failure_count}") + if result.success_count == len(all_cases) and failure_count == 0: + print(f"Test ALL PASS") + else: + print(f"Test FAIL") + exit(-1) + +if __name__=="__main__": + main() + diff --git a/.ci_local_test/ros2_openvino_toolkit_test/test_cases/unittest/test_cases.py b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/unittest/test_cases.py new file mode 100755 index 00000000..a9bbb34b --- /dev/null +++ b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/unittest/test_cases.py @@ -0,0 +1,121 @@ +#from asyncio import sleep +from time import sleep +import unittest +import subprocess +import pdb +import os + +class Test_Cases(unittest.TestCase): + + def test_pipeline(self, launch_file, log_file, topic_list=['/rosout']): + print(f"{log_file} topic_list", topic_list) + subprocess.Popen([f"ros2 launch openvino_node {launch_file} > {log_file} &"], shell=True) + for topic in topic_list: + name=topic.split('/', -1)[-1] + sleep(3) + print(f"{topic} {name}.log") + subprocess.Popen([f"ros2 topic echo {topic} > {name}.log &"], shell=True) + if name == "segmented_obejcts": + subprocess.Popen([f"ros2 topic echo {topic} >> {name}.log &"], shell=True) + kill_ros2_process() + print(f"kill the test process done") + with open(f"{log_file}") as handle: + log = handle.read() + check_log = log.split("user interrupted with ctrl-c (SIGINT)")[0] + self.assertIn('One Pipeline Created!', check_log) + self.assertNotIn('ERROR', check_log) + for topic in topic_list: + name = topic.split('/', -1)[-1] + with open(f"{name}.log") as topic_handle: + topic_info = topic_handle.read() + if "header" not in topic_info: + print(f"the {launch_file} topic {name} failed") + else: + print(f"the {launch_file} topic {name} pass") + self.assertIn("header", topic_info) + print(f"check all done") + + + def test_1_pipeline_people_ci(self): + topic_ls = ["/ros2_openvino_toolkit/age_genders_Recognition", \ + "/ros2_openvino_toolkit/headposes_estimation", \ + "/ros2_openvino_toolkit/face_detection", \ + "/ros2_openvino_toolkit/emotions_recognition"] + launch_file = f"pipeline_people_ci_test.py" + log_file = f"/root/test_cases/log/pipeline_people_test_ci.log" + self.test_pipeline(launch_file, log_file, topic_list=topic_ls) + + def test_2_pipeline_reidentification_ci(self): + topic_ls = ["/ros2_openvino_toolkit/reidentified_persons",] + launch_file = f"pipeline_reidentification_ci_test.py" + log_file = f"/root/test_cases/log/pipeline_reidentification_test_ci.log" + self.test_pipeline(launch_file, log_file, topic_list=topic_ls) + + def test_3_pipeline_image_ci(self): + topic_ls = ["/ros2_openvino_toolkit/emotions_recognition", \ + "/ros2_openvino_toolkit/headposes_estimation", \ + "/ros2_openvino_toolkit/people/age_genders_Recognition"] + launch_file = f"pipeline_image_ci_test.py" + log_file = f"/root/test_cases/log/pipeline_image_test_ci.log" + self.test_pipeline(launch_file, log_file, topic_list=topic_ls) + + def test_4_pipeline_segmentation_ci(self): + topic_ls = ["/ros2_openvino_toolkit/segmented_obejcts"] + launch_file = f"pipeline_segmentation_ci_test.py" + log_file = f"/root/test_cases/log/pipeline_segmentation_test_ci.log" + self.test_pipeline(launch_file, log_file, topic_list=topic_ls) + + def test_5_pipeline_vehicle_detection_ci(self): + topic_ls = ["/ros2_openvino_toolkit/detected_license_plates", + "/ros2_openvino_toolkit/detected_vehicles_attribs"] + launch_file = f"pipeline_vehicle_detection_ci_test.py" + log_file = f"/root/test_cases/log/pipeline_vehicle_detection_test_ci.log" + self.test_pipeline(launch_file, log_file, topic_list=topic_ls) + + def test_6_pipeline_person_attributes_ci(self): + topic_ls = ["/ros2_openvino_toolkit/detected_objects", \ + "/ros2_openvino_toolkit/person_attributes"] + launch_file = f"pipeline_person_attributes_ci_test.py" + log_file = f"/root/test_cases/log/pipeline_person_attributes_test_ci.log" + self.test_pipeline(launch_file, log_file, topic_list=topic_ls) + + def test_7_pipeline_segmentation_image_ci(self): + topic_ls = ["/ros2_openvino_toolkit/segmented_obejcts"] + launch_file = f"pipeline_segmentation_image_ci_test.py" + log_file = f"/root/test_cases/log/pipeline_segmentation_image_test_ci.log" + self.test_pipeline(launch_file, log_file, topic_list=topic_ls) + + def test_8_pipeline_object_yolov5_ci(self): + topic_ls = ["/ros2_openvino_toolkit/detected_objects"] + launch_file = f"pipeline_object_yolov5_ci_test.py" + log_file = f"/root/test_cases/log/pipeline_object_yolov5_test_ci.log" + self.test_pipeline(launch_file, log_file, topic_list=topic_ls) + + def test_9_pipeline_object_yolov8_ci(self): + topic_ls = ["/ros2_openvino_toolkit/detected_objects"] + launch_file = f"pipeline_object_yolov8_ci_test.py" + log_file = f"/root/test_cases/log/pipeline_object_yolov8_test_ci.log" + self.test_pipeline(launch_file, log_file, topic_list=topic_ls) + + def test_10_pipeline_segmentation_instance_ci(self): + topic_ls = ["/ros2_openvino_toolkit/segmented_obejcts"] + launch_file = f"pipeline_segmentation_instance_ci_test.py" + log_file = f"/root/test_cases/log/pipeline_segmentation_instance.log" + self.test_pipeline(launch_file, log_file, topic_list=topic_ls) + + + @unittest.skip("skip case") + def test_9_pipeline_segmentation_maskrcnn_ci(self): + topic_ls = ["/ros2_openvino_toolkit/segmented_obejcts"] + launch_file = f"pipeline_segmentation_maskrcnn_ci_test.py" + log_file = f"/root/test_cases/log/pipeline_segmentation_maskrcnn_test_ci.log" + self.test_pipeline(launch_file, log_file, topic_list=topic_ls) + + +def kill_ros2_process(sleep_z=30): + sleep(sleep_z) + process_result = subprocess.Popen(["ps -ef | grep ros2 | grep -v 'grep' | awk '{print $2}'"],stdout=subprocess.PIPE, shell=True).communicate() + print(process_result[0].decode('utf-8').replace('\n', ' ')) + kill_process = 'kill -9 ' + process_result[0].decode('utf-8').replace('\n', ' ') + subprocess.Popen([kill_process], shell=True).communicate() + diff --git a/.ci_local_test/ros2_openvino_toolkit_test/test_cases/yolov5_model_download.sh b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/yolov5_model_download.sh new file mode 100755 index 00000000..f3e50d3b --- /dev/null +++ b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/yolov5_model_download.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +#1. Copy YOLOv5 Repository from GitHub +cd /root && git clone https://github.com/ultralytics/yolov5.git + +#Set Environment for Installing YOLOv5 + +cd yolov5 +python3 -m venv yolo_env # Create a virtual python environment +source yolo_env/bin/activate # Activate environment +pip install -r requirements.txt # Install yolov5 prerequisites +pip install wheel +pip install onnx + +# Download PyTorch Weights +mkdir -p /root/yolov5/model_convert && cd /root/yolov5/model_convert +wget https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt + +cd /root/yolov5 +python3 export.py --weights model_convert/yolov5n.pt --include onnx + + +#2. Convert ONNX files to IR files +cd /root/yolov5/ +python3 -m venv ov_env # Create openVINO virtual environment +source ov_env/bin/activate # Activate environment +python -m pip install --upgrade pip # Upgrade pip +pip install openvino[onnx]==2022.3.0 # Install OpenVINO for ONNX +pip install openvino-dev[onnx]==2022.3.0 # Install OpenVINO Dev Tool for ONNX + + +cd /root/yolov5/model_convert +mo --input_model yolov5n.onnx + + +mkdir -p /opt/openvino_toolkit/models/convert/public/yolov5n/FP32/ +sudo cp yolov5n.bin yolov5n.mapping yolov5n.xml /opt/openvino_toolkit/models/convert/public/yolov5n/FP32/ + diff --git a/.ci_local_test/ros2_openvino_toolkit_test/test_cases/yolov8_model_download.sh b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/yolov8_model_download.sh new file mode 100755 index 00000000..a3879291 --- /dev/null +++ b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/yolov8_model_download.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +#Pip install the ultralytics package including all requirements in a Python>=3.7 environment with PyTorch>=1.7. + +mkdir -p yolov8 && cd yolov8 +pip install ultralytics +apt install python3.8-venv +python3 -m venv openvino_env +source openvino_env/bin/activate + + +#Export a YOLOv8n model to a different format like ONNX, CoreML, etc. +# export official model +yolo export model=yolov8n.pt format=openvino +yolo export model=yolov8n-seg.pt format=openvino + + +# Move to the Recommended Model Path +mkdir -p /opt/openvino_toolkit/models/convert/public/FP32/yolov8n +mkdir -p /opt/openvino_toolkit/models/convert/public/FP32/yolov8n-seg + +cp yolov8n_openvino_model/* /opt/openvino_toolkit/models/convert/public/FP32/yolov8n +cp yolov8n-seg_openvino_model/* /opt/openvino_toolkit/models/convert/public/FP32/yolov8n-seg + diff --git a/.clang-format b/.clang-format new file mode 100644 index 00000000..aa8a2aff --- /dev/null +++ b/.clang-format @@ -0,0 +1,65 @@ +--- +BasedOnStyle: Google +AccessModifierOffset: -2 +ConstructorInitializerIndentWidth: 2 +AlignEscapedNewlinesLeft: false +AlignTrailingComments: true +AllowAllParametersOfDeclarationOnNextLine: false +AllowShortIfStatementsOnASingleLine: false +AllowShortLoopsOnASingleLine: false +AllowShortFunctionsOnASingleLine: None +AlwaysBreakTemplateDeclarations: true +AlwaysBreakBeforeMultilineStrings: true +BreakBeforeBinaryOperators: false +BreakBeforeTernaryOperators: false +BreakConstructorInitializersBeforeComma: true +BinPackParameters: true +ColumnLimit: 120 +ConstructorInitializerAllOnOneLineOrOnePerLine: true +DerivePointerBinding: false +PointerBindsToType: true +ExperimentalAutoDetectBinPacking: false +IndentCaseLabels: true +MaxEmptyLinesToKeep: 1 +NamespaceIndentation: None +ObjCSpaceBeforeProtocolList: true +PenaltyBreakBeforeFirstCallParameter: 19 +PenaltyBreakComment: 60 +PenaltyBreakString: 1 +PenaltyBreakFirstLessLess: 1000 +PenaltyExcessCharacter: 1000 +PenaltyReturnTypeOnItsOwnLine: 90 +SpacesBeforeTrailingComments: 2 +Cpp11BracedListStyle: false +Standard: Auto +IndentWidth: 2 +TabWidth: 2 +UseTab: Never +IndentFunctionDeclarationAfterType: false +SpacesInParentheses: false +SpacesInAngles: false +SpaceInEmptyParentheses: false +SpacesInCStyleCastParentheses: false +SpaceAfterControlStatementKeyword: true +SpaceBeforeAssignmentOperators: true +ContinuationIndentWidth: 4 +SortIncludes: false +SpaceAfterCStyleCast: false + +# Configure each individual brace in BraceWrapping +BreakBeforeBraces: Custom + +# Control of individual brace wrapping cases +BraceWrapping: { + AfterClass: 'true' + AfterControlStatement: 'false' + AfterEnum : 'true' + AfterFunction : 'true' + AfterNamespace : 'true' + AfterStruct : 'true' + AfterUnion : 'true' + BeforeCatch : 'false' + BeforeElse : 'false' + IndentBraces : 'false' +} +... diff --git a/.github/workflows/basic_func_tests.yml b/.github/workflows/basic_func_tests.yml new file mode 100644 index 00000000..3973960c --- /dev/null +++ b/.github/workflows/basic_func_tests.yml @@ -0,0 +1,52 @@ +# This is a basic workflow to help you get started with Actions + +name: Basic_Func_CI + +# Controls when the workflow will run +on: + # Triggers the workflow on push or pull request events but only for the "master" branch + push: + branches: [ "master", "ros2" ] + pull_request: + branches: [ "master", "ros2" ] + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +# Set default top-level permissions, no write permission is granted at top-level. +permissions: read-all + +# A workflow run is made up of one or more jobs that can run sequentially or in parallel +jobs: + # Removed the old artifacts + remove-old-artifacts: + runs-on: ubuntu-20.04 + timeout-minutes: 10 + steps: + - name: Remove old artifacts + uses: c-hive/gha-remove-artifacts@v1 + with: + age: '15 days' + # This workflow contains a single job called "build" + build: + # The type of runner that the job will run on + runs-on: ubuntu-20.04 + # Steps represent a sequence of tasks that will be executed as part of the job + steps: + # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it + - uses: actions/checkout@v3 + # Runs a set of commands using the runners shell + - name: ros2_openvino_toolkit_test + run: | + df -h + sudo docker rmi $(docker image ls -aq) || true + sudo swapoff /swapfile || true + sudo rm -rf /usr/share/dotnet /usr/local/lib/android /opt/ghc || true + mkdir -p ../workspace + cp -r ${GITHUB_WORKSPACE}/.ci_local_test/ros2_openvino_toolkit_test ../workspace + cp -r ${GITHUB_WORKSPACE} ../workspace/ros2_openvino_toolkit_test + ls ${GITHUB_WORKSPACE}/docker/Dockerfile + cp ${GITHUB_WORKSPACE}/docker/Dockerfile ../workspace/ros2_openvino_toolkit_test + ls ../workspace/ros2_openvino_toolkit_test/Dockerfile + cd ../workspace/ros2_openvino_toolkit_test && ./docker_run.sh + diff --git a/.github/workflows/code_format.yml b/.github/workflows/code_format.yml new file mode 100644 index 00000000..b0fb096d --- /dev/null +++ b/.github/workflows/code_format.yml @@ -0,0 +1,43 @@ + +name: Code_Format_Check + +# Controls when the workflow will run +on: + # Triggers the workflow on push or pull request events but only for the "master" branch + push: + branches: [ "master", "ros2" ] + pull_request: + branches: [ "master", "ros2" ] + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +# Set default top-level permissions, no write permission is granted at top-level. +permissions: read-all + +# A workflow run is made up of one or more jobs that can run sequentially or in parallel +jobs: + # Removed the old artifacts + remove-old-artifacts: + runs-on: ubuntu-22.04 + timeout-minutes: 10 + steps: + - name: Remove old artifacts + uses: c-hive/gha-remove-artifacts@v1 + with: + age: '15 days' + pre-commit: + # The type of runner that the job will run on + runs-on: ubuntu-22.04 + # Steps represent a sequence of tasks that will be executed as part of the job + steps: + # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it + - uses: actions/checkout@v3 + # Runs a set of commands using the runners shell + - name: code_format_check + run: | + sudo apt-get install clang-format -y + find . -name '*.h' -or -name '*.hpp' -or -name '*.cpp' | xargs clang-format -i -style=file + git diff --exit-code + + diff --git a/.github/workflows/dev-ov_2020-3.yml b/.github/workflows/dev-ov_2020-3.yml index b6c82595..6f946fda 100644 --- a/.github/workflows/dev-ov_2020-3.yml +++ b/.github/workflows/dev-ov_2020-3.yml @@ -8,6 +8,9 @@ on: pull_request: branches: [ dev-ov.2020.3 ] +# Set default top-level permissions, no write permission is granted at top-level. +permissions: read-all + # A workflow run is made up of one or more jobs that can run sequentially or in parallel jobs: # This workflow contains a single job called "build" diff --git a/.github/workflows/dev-ov_2021-3.yml b/.github/workflows/dev-ov_2021-3.yml index b7a708fc..145ad66d 100644 --- a/.github/workflows/dev-ov_2021-3.yml +++ b/.github/workflows/dev-ov_2021-3.yml @@ -8,6 +8,9 @@ on: pull_request: branches: [ dev-ov.2021.3 ] +# Set default top-level permissions, no write permission is granted at top-level. +permissions: read-all + # A workflow run is made up of one or more jobs that can run sequentially or in parallel jobs: # This workflow contains a single job called "build" diff --git a/README.md b/README.md index 26f481e0..869fa51c 100644 --- a/README.md +++ b/README.md @@ -1,49 +1,280 @@ # ros2_openvino_toolkit -ROS2 Version supported: +# Table of Contents +* [➤ Overview](#overview) + * [ROS2 Version Supported](#ros2-version-supported) + * [Inference Features Supported](#inference-features-supported) +* [➤ Prerequisite](#prerequisite) +* [➤ Introduction](#introduction) + * [Design Architecture](#design-architecture) + * [Logic Flow](#logic-flow) +* [➤ Supported Features](#supported-features) + * [Multiple Input Components](#multiple-input-components) + * [Inference Implementations](#inference-implementations) + * [ROS Interfaces and Outputs](#ros-interfaces-and-outputs) + * [Demo Result Snapshots](#demo-result-snapshots) +* [➤ Installation & Launching](#installation-and-launching) + * [Deploy in Local Environment](#deploy-in-local-environment) + * [Deploy in Docker](#deploy-in-docker) +* [➤ Reference](#reference) +* [➤ FAQ](#faq) +* [➤ Feedback](#feedback) +* [➤ More Information](#more-information) -* [x] ROS2 Dashing -* [x] ROS2 Eloquent -* [x] ROS2 Foxy -* [x] ROS2 Galactic +# Overview +## ROS2 Version Supported -Inference Features supported: +|Branch Name|ROS2 Version Supported|Openvino Version|OS Version| +|-----------------------|-----------------------|--------------------------------|----------------------| +|[ros2](https://github.com/intel/ros2_openvino_toolkit/tree/ros2)|Galactic, Foxy, Humble|V2022.1, V2022.2, V2022.3|Ubuntu 20.04, Ubuntu 22.04| +|[dashing](https://github.com/intel/ros2_openvino_toolkit/tree/dashing)|Dashing|V2022.1, V2022.2, V2022.3|Ubuntu 18.04| +|[foxy-ov2021.4](https://github.com/intel/ros2_openvino_toolkit/tree/foxy)|Foxy|V2021.4|Ubuntu 20.04| +|[galactic-ov2021.4](https://github.com/intel/ros2_openvino_toolkit/tree/galactic-ov2021.4)|Galactic|V2021.4|Ubuntu 20.04| +## Inference Features Supported * [x] Object Detection * [x] Face Detection -* [x] Age-Gender Recognition +* [x] Age Gender Recognition * [x] Emotion Recognition * [x] Head Pose Estimation -* [x] Object Segmentation +* [x] Object Segmentation (Semantic & Instance) * [x] Person Re-Identification * [x] Vehicle Attribute Detection * [x] Vehicle License Plate Detection -## Introduction +# Prerequisite -The OpenVINO™ (Open visual inference and neural network optimization) toolkit provides a ROS-adaptered runtime framework of neural network which quickly deploys applications and solutions for vision inference. By leveraging Intel® OpenVINO™ toolkit and corresponding libraries, this ROS2 runtime framework extends workloads across Intel® hardware (including accelerators) and maximizes performance. +|Prerequisite|Mandatory?|Description| +|-----------------------|-----------------------|--------------------------------| +|**Processor**|Mandatory|A platform with Intel processors assembled. (Refer to [here](https://software.intel.com/content/www/us/en/develop/articles/openvino-2020-3-lts-relnotes.html) for the full list of Intel processors supported.)| +|**OS**|Mandatory|We only tested this project under Ubuntu distros. It is recommended to install the corresponding Ubuntu Distro according to the ROS distro that you select to use. **For example: Ubuntu 18.04 for dashing, Ubuntu 20.04 for Foxy and Galactic, Ubuntu 22.04 for Humble.**| +|**ROS2**|Mandatory|We have already supported active ROS distros (Humble, Galactic, Foxy and Dashing (deprecated)). Choose the one matching your needs. You may find the corresponding branch from the table above in section [**ROS2 Version Supported**](#ros2-version-supported).| +|**OpenVINO**|Mandatory|The version of OpenVINO toolkit is decided by the OS and ROS2 distros you use. See the table above in Section [**ROS2 Version Supported**](#ros2-version-supported).| +|**Realsense Camera**|Optional|Realsense Camera is optional, you may choose these alternatives as the input: Standard Camera, ROS Image Topic, Video/Image File or RTSP camera.| +# Introduction +## Design Architecture +

Architecture Design +From the view of hirarchical architecture design, the package is divided into different functional components, as shown in below picture. + +![OpenVINO_Architecture](./data/images/design_arch.PNG "OpenVINO RunTime Architecture") + +

+

+Intel® OpenVINO™ toolkit + +- **Intel® OpenVINO™ toolkit** provides a ROS-adapted runtime framework of neural network which quickly deploys applications and solutions for vision inference. By leveraging Intel® OpenVINO™ toolkit and corresponding libraries, this ROS2 runtime framework extends workloads across Intel® hardware (including accelerators) and maximizes performance. + - Increase deep learning workload performance up to 19x1 with computer vision accelerators from Intel. + - Unleash convolutional neural network (CNN)-based deep learning inference using a common API. + - Speed development using optimized OpenCV* and OpenVX* functions. See more from [here](https://github.com/openvinotoolkit/openvino) for Intel OpenVINO™ introduction. +
+

+ +

+

+ROS OpenVINO Runtime Framework + +- **ROS OpenVINO Runtime Framework** is the main body of this repo. It provides key logic implementation for pipeline lifecycle management, resource management and ROS system adapter, which extends Intel OpenVINO toolkit and libraries. Furthermore, this runtime framework provides ways to simplify launching, configuration, data analysis and re-use. +
+

+ +

+

+ROS Input & Output + +- **Diversal Input resources** are data resources to be infered and analyzed with the OpenVINO framework. +- **ROS interfaces and outputs** currently include _Topic_ and _service_. Natively, RViz output and CV image window output are also supported by refactoring topic message and inferrence results. +
+

+ +

+

+Optimized Models + +- **Optimized Models** provided by Model Optimizer component of Intel® OpenVINO™ toolkit. Imports trained models from various frameworks (Caffe*, Tensorflow*, MxNet*, ONNX*, Kaldi*) and converts them to a unified intermediate representation file. It also optimizes topologies through node merging, horizontal fusion, eliminating batch normalization, and quantization. It also supports graph freeze and graph summarize along with dynamic input freezing. +
+

+

+ +## Logic Flow +

Logic Flow +From the view of logic implementation, the package introduces the definitions of parameter manager, pipeline and pipeline manager. The following picture depicts how these entities co-work together when the corresponding program is launched. + +![Logic_Flow](./data/images/impletation_logic.PNG "OpenVINO RunTime Logic Flow") + +Once a corresponding program is launched with a specified .yaml config file passed in the .launch file or via commandline, _**parameter manager**_ analyzes the configurations about pipeline and the whole framework, then shares the parsed configuration information with pipeline procedure. A _**pipeline instance**_ is created by following the configuration info and is added into _**pipeline manager**_ for lifecycle control and inference action triggering. + +The contents in **.yaml config file** should be well structured and follow the supported rules and entity names. Please see [yaml configuration guidance](./doc/quick_start/yaml_configuration_guide.md) for how to create or edit the config files. + +

+

+Pipeline + +**Pipeline** fulfills the whole data handling process: initiliazing Input Component for image data gathering and formating; building up the structured inference network and passing the formatted data through the inference network; transfering the inference results and handling output, etc. +
+

+ +

+

+Pipeline manager + +**Pipeline manager** manages all the created pipelines according to the inference requests or external demands (say, system exception, resource limitation, or end user's operation). Because of co-working with resource management and being aware of the whole framework, it covers the ability of performance optimization by sharing system resource between pipelines and reducing the burden of data copy. +
+

+

+ +# Supported Features +## Multiple Input Components +Currently, the package supports several input resources for acquiring image data. The following tables are listed: + +

+

+Input Resource Table + +|Input Resource|Description| +|--------------------|------------------------------------------------------------------| +|StandardCamera|Any RGB camera with USB port supporting. Currently only the first USB camera if many are connected.| +|RealSenseCamera| Intel RealSense RGB-D Camera, directly calling RealSense Camera via librealsense plugin of openCV.| +|ImageTopic| Any ROS topic which is structured in image message.| +|Image| Any image file which can be parsed by openCV, such as .png, .jpeg.| +|Video| Any video file which can be parsed by openCV.| +|IpCamera| Any RTSP server which can push video stream.| +
+

-## Prerequisite +## Inference Implementations +Currently, the corresponding relation of supported inference features, models used and yaml configurations are listed as follows: -* Processor: A platform with Intel processors assembled. (see [here](https://software.intel.com/content/www/us/en/develop/articles/openvino-2021-4-lts-relnotes.html) for the full list of Intel processors supported.) -* OS: Ubuntu 20.04 -* ROS2: Galactic Geochelone -* OpenVINO: V2021.4, see [the release notes](https://software.intel.com/content/www/us/en/develop/articles/openvino-relnotes.html) for more info. -* [Optional] RealSense D400 Series Camera -* [Optional] Intel NCS2 Stick -## Tables of contents -* [Design Architecture and Logic Flow](./doc/tables_of_contents/Design_Architecture_and_logic_flow.md) -* [Supported Features](./doc/tables_of_contents/supported_features/Supported_features.md) -* Tutorials - - [How to configure a inference pipeline?](./doc/tables_of_contents/tutorials/configuration_file_customization.md) - - [How to create multiple pipelines in a process?](./doc/tables_of_contents/tutorials/Multiple_Pipelines.md) +

+

+Inference Feature Correspondence Table -## Installation & Launching -See Getting Start Pages for [ROS2 Dashing](./doc/getting_started_with_Dashing.md) or [ROS2 Foxy](./doc/getting_started_with_Foxy_Ubuntu20.04.md) or [ROS2 Galactic](./doc/getting_started_with_Galactic_Ubuntu20.04.md) for detailed installation & lauching instructions. +|Inference|Description|YAML Configuration|Model Used| +|-----------------------|------------------------------------------------------------------|----------------------|----------------------| +|Face Detection| Object Detection task applied to face recognition using a sequence of neural networks.|[pipeline_image.yaml](./sample/param/pipeline_image.yaml)
[pipeline_image_video.yaml](./sample/param/pipeline_image_video.yaml)
[pipeline_people.yaml](./sample/param/pipeline_people.yaml)
[pipeline_people_ip.yaml](./sample/param/pipeline_people_ip.yaml)|[face-detection-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/face-detection-adas-0001)
[age-gender-recognition-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/age-gender-recognition-retail-0013)
[emotions-recognition-retail-0003](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/emotions-recognition-retail-0003)
[head-pose-estimation-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/head-pose-estimation-adas-0001)| +|Emotion Recognition| Emotion recognition based on detected face image.|[pipeline_image.yaml](./sample/param/pipeline_image.yaml)
[pipeline_image_video.yaml](./sample/param/pipeline_image_video.yaml)
[pipeline_people.yaml](./sample/param/pipeline_people.yaml)
[pipeline_people_ip.yaml](./sample/param/pipeline_people_ip.yaml)|[emotions-recognition-retail-0003](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/emotions-recognition-retail-0003)| +|Age & Gender Recognition| Age and gender recognition based on detected face image.|[pipeline_image.yaml](./sample/param/pipeline_image.yaml)
[pipeline_image_video.yaml](./sample/param/pipeline_image_video.yaml)
[pipeline_people.yaml](./sample/param/pipeline_people.yaml)
[pipeline_people_ip.yaml](./sample/param/pipeline_people_ip.yaml)|[age-gender-recognition-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/age-gender-recognition-retail-0013)| +|Head Pose Estimation| Head pose estimation based on detected face image.|[pipeline_image.yaml](./sample/param/pipeline_image.yaml)
[pipeline_image_video.yaml](./sample/param/pipeline_image_video.yaml)
[pipeline_people.yaml](./sample/param/pipeline_people.yaml)
[pipeline_people_ip.yaml](./sample/param/pipeline_people_ip.yaml)|[head-pose-estimation-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/head-pose-estimation-adas-0001)| +|Object Detection| Object detection based on SSD-based trained models.|[pipeline_object.yaml](./sample/param/pipeline_object.yaml)
[pipeline_object_topic.yaml](./sample/param/pipeline_object_topic.yaml)|[mobilenet-ssd](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/public/mobilenet-ssd)
[yolov5](https://github.com/openvinotoolkit/openvino_notebooks/tree/main/notebooks/111-yolov5-quantization-migration)
[yolov7](https://github.com/openvinotoolkit/openvino_notebooks/tree/main/notebooks/226-yolov7-optimization)
[yolov8](https://github.com/openvinotoolkit/openvino_notebooks/tree/main/notebooks/230-yolov8-optimization)| +|Vehicle and License Detection| Vehicle and license detection based on Intel models.|[pipeline_vehicle_detection.yaml](./sample/param/pipeline_vehicle_detection.yaml)|[vehicle-license-plate-detection-barrier-0106](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/vehicle-license-plate-detection-barrier-0106)
[vehicle-attributes-recognition-barrier-0039](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/vehicle-attributes-recognition-barrier-0039)
[license-plate-recognition-barrier-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/license-plate-recognition-barrier-0001)| +|Object Segmentation - Semantic| semantic segmentation, assign a class label to each pixel in an image. |[pipeline_segmentation.yaml](./sample/param/pipeline_segmentation.yaml)
[pipeline_segmentation_image.yaml](./sample/param/pipeline_segmentation_image.yaml)
[pipeline_video.yaml](./sample/param/pipeline_video.yaml)|[semantic-segmentation-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/semantic-segmentation-adas-0001)
[deeplabv3](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/public/deeplabv3)| +| Object Segmentation - Instance | Instance Segmentation, combination of semantic segmentation & object detection. | [pipeline_segmentation_instance.launch.yaml](./sample/param/pipeline_segmentation_instance.yaml) | [yolov8-seg](https://github.com/openvinotoolkit/openvino_notebooks/tree/main/notebooks/230-yolov8-optimization)
[mask_rcnn_inception_v2_coco_2018_01_28](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/public/mask_rcnn_inception_resnet_v2_atrous_coco)| +|Person Attributes| Person attributes based on object detection.|[pipeline_person_attributes.yaml](./sample/param/pipeline_person_attributes.yaml)|[person-attributes-recognition-crossroad-0230](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/person-attributes-recognition-crossroad-0230)
[person-detection-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/person-detection-retail-0013)| +|Person Reidentification|Person reidentification based on object detection.|[pipeline_person_reidentification.yaml](./sample/param/pipeline_reidentification.yaml)|[person-detection-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/person-detection-retail-0013)
[person-reidentification-retail-0277](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/person-reidentification-retail-0277)| +|Object Segmentation Maskrcnn| Object segmentation and detection based on maskrcnn model.[_Deprecated, it is recommended to use `object segementation - instance` for first try._]|[pipeline_segmentation_maskrcnn.yaml](./sample/param/pipeline_segmentation_maskrcnn.yaml)|[mask_rcnn_inception_v2_coco_2018_01_28](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/public/mask_rcnn_inception_resnet_v2_atrous_coco)| +
+

+ +## ROS interfaces and outputs +The inference results can be output in several types. One or more types can be enabled for any inference pipeline. +### Topic +Specific topic(s) can be generated and published according to the given inference functionalities. + +

+

+Published Topic Correspondence Table + +|Inference|Published Topic| +|---|---| +|People Detection|```/ros2_openvino_toolkit/face_detection```([object_msgs:msg:ObjectsInBoxes](https://github.com/intel/ros2_object_msgs/blob/master/msg/ObjectsInBoxes.msg))| +|Emotion Recognition|```/ros2_openvino_toolkit/emotions_recognition```([object_msgs:msg:EmotionsStamped](../../../object_msgs/msg/EmotionsStamped.msg))| +|Age and Gender Recognition|```/ros2_openvino_toolkit/age_genders_Recognition```([object_msgs:msg:AgeGenderStamped](../../../object_msgs/msg/AgeGenderStamped.msg))| +|Head Pose Estimation|```/ros2_openvino_toolkit/headposes_estimation```([object_msgs:msg:HeadPoseStamped](../../../object_msgs/msg/HeadPoseStamped.msg))| +|Object Detection|```/ros2_openvino_toolkit/detected_objects```([object_msgs::msg::ObjectsInBoxes](https://github.com/intel/ros2_object_msgs/blob/master/msg/ObjectsInBoxes.msg))| +|Object Segmentation|```/ros2_openvino_toolkit/segmented_obejcts```([object_msgs::msg::ObjectsInMasks](../../../object_msgs/msg/ObjectsInMasks.msg))| +|Object Segmentation Maskrcnn|```/ros2_openvino_toolkit/segmented_obejcts```([object_msgs::msg::ObjectsInMasks](../../../object_msgs/msg/ObjectsInMasks.msg))| +|Person Reidentification|```/ros2_openvino_toolkit/reidentified_persons```([object_msgs::msg::ReidentificationStamped](../../../object_msgs/msg/ReidentificationStamped.msg))| +|Vehicle Detection|```/ros2_openvino_toolkit/detected_vehicles_attribs```([object_msgs::msg::VehicleAttribsStamped](../../../object_msgs/msg/PersonAttributeStamped.msg))| +|Vehicle License Detection|```/ros2_openvino_toolkit/detected_license_plates```([object_msgs::msg::LicensePlateStamped](../../../object_msgs/msg/LicensePlateStamped.msg))| +
+

+ +### Service +Several ROS2 Services are created, expecting to be used in client/server mode, especially when synchronously getting inference results for a given image frame or when managing inference pipeline's lifecycle.
+ +

+

+Service Correspondence Table + +|Inference|Service| +|---|---| +|Object Detection Service|```/detect_object```([object_msgs::srv::DetectObject](https://github.com/intel/ros2_object_msgs/blob/master/srv/DetectObject.srv))| +|Face Detection Service|```/detect_face```([object_msgs::srv::DetectObject](https://github.com/intel/ros2_object_msgs/blob/master/srv/DetectObject.srv))| +|Age Gender Detection Service|```/detect_age_gender```([object_msgs::srv::AgeGender](./object_msgs/srv/AgeGenderSrv.srv))| +|Headpose Detection Service|```/detect_head_pose```([object_msgs::srv::HeadPose](./object_msgs/srv/HeadPoseSrv.srv))| +|Emotion Detection Service|```/detect_emotion```([object_msgs::srv::Emotion](./object_msgs/srv/EmotionSrv.srv))| +
+

+ +### RViz +RViz display is also supported by the composited topic of original image frame with inference result. +To show in RViz tool, add an image marker with the composited topic: +```/ros2_openvino_toolkit/image_rviz```([sensor_msgs::Image](https://docs.ros.org/en/api/sensor_msgs/html/msg/Image.html)) + +### Image Window +OpenCV based image window is natively supported by the package. +To enable window, Image Window output should be added into the output choices in .yaml config file. Refer to [the config file guidance](./doc/quick_start/yaml_configuration_guide.md) for more information about checking/adding this feature in your launching. + +## Demo Result Snapshots +

Demo Snapshots +For the snapshot of demo results, refer to the following picture. + +* Face detection input from standard camera +![face_detection_demo_image](./data/images/face_detection.png "face detection demo image") + +* Object detection input from realsense camera +![object_detection_demo_realsense](./data/images/object_detection.gif "object detection demo realsense") + +* Object segmentation input from video +![object_segmentation_demo_video](./data/images/object_segmentation.gif "object segmentation demo video") + +* Person reidentification input from standard camera +![person_reidentification_demo_video](./data/images/person-reidentification.gif "person reidentification demo video") +

+ +# Installation and Launching +## Deploy in Local Environment +* Refer to the quick start document for [getting_started_with_ros2](./doc/quick_start/getting_started_with_ros2_ov2.0.md) for detailed installation & lauching instructions. +* Refer to the quick start document for [yaml configuration guidance](./doc/quick_start/yaml_configuration_guide.md) for detailed configuration guidance. + +## Deploy in Docker +* Refer to the docker instruction for [docker_instructions](./docker/docker_instructions_ov2.0.md) for detailed information about building docker image and launching. +* Refer to the quick start document for [yaml configuration guidance](./doc/quick_start/yaml_configuration_guide.md) for detailed configuration guidance. + +# Reference +* Open_model_zoo: Refer to the OpenVINO document for [open_model_zoo](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3) for detailed model structure and demo samples. +* OpenVINO api 2.0: Refer to the OpenVINO document for [OpenVINO_api_2.0](https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html) for latest api 2.0 transition guide. + +# FAQ +* How to get the IR file for [yolov5](./doc/quick_start/tutorial_for_yolov5_converted.md) | [yolov7](./doc/quick_start/tutorial_for_yolov7_converted.md) | [yolov8](./doc/quick_start/tutorial_for_yolov8_converted.md) ? +* [How to build OpenVINO by source?](https://github.com/openvinotoolkit/openvino/wiki#how-to-build) +* [How to build RealSense by source?](https://github.com/IntelRealSense/librealsense/blob/master/doc/installation.md) +* [What is the basic command of Docker CLI?](https://docs.docker.com/engine/reference/commandline/docker/) +* [What is the canonical C++ API for interacting with ROS?](https://docs.ros2.org/latest/api/rclcpp/) +

How to change logging level? + This project provides to logging levels: *DEBUG* & *INFO*.
+ You may follow the steps to change logging level:
+ + - Update ./openvino_wrapper_lib/CMakeLists.txt by uncommenting (for DEBUG level) or commenting (for INFO level) this line: + ```code + #add_definitions(-DLOG_LEVEL_DEBUG) + ``` + - Rebuild project
+ Refer corresponding quick-start documents to rebuild this project. e.g.:
+ ```code + source /opt/ros//setup.bash + colcon build --symlink-install + ``` + - Launch OpenVINO Node
+ You will see the logging is changed. +

+ +# Feedback +* Report questions, issues and suggestions, using: [issue](https://github.com/intel/ros2_openvino_toolkit/issues). # More Information -* ROS2 OpenVINO discription writen in Chinese: https://mp.weixin.qq.com/s/BgG3RGauv5pmHzV_hkVAdw +* ROS2 OpenVINO discription written in Chinese: https://mp.weixin.qq.com/s/BgG3RGauv5pmHzV_hkVAdw ###### *Any security issue should be reported using process at https://01.org/security* + diff --git a/data/labels/object_detection/coco.names b/data/labels/object_detection/coco.names new file mode 100755 index 00000000..16315f2b --- /dev/null +++ b/data/labels/object_detection/coco.names @@ -0,0 +1,80 @@ +person +bicycle +car +motorbike +aeroplane +bus +train +truck +boat +traffic light +fire hydrant +stop sign +parking meter +bench +bird +cat +dog +horse +sheep +cow +elephant +bear +zebra +giraffe +backpack +umbrella +handbag +tie +suitcase +frisbee +skis +snowboard +sports ball +kite +baseball bat +baseball glove +skateboard +surfboard +tennis racket +bottle +wine glass +cup +fork +knife +spoon +bowl +banana +apple +sandwich +orange +broccoli +carrot +hot dog +pizza +donut +cake +chair +sofa +pottedplant +bed +diningtable +toilet +tvmonitor +laptop +mouse +remote +keyboard +cell phone +microwave +oven +toaster +sink +refrigerator +book +clock +vase +scissors +teddy bear +hair drier +toothbrush \ No newline at end of file diff --git a/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels b/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels index 23d4cd9a..827dc158 100644 --- a/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels +++ b/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels @@ -1,2 +1,3 @@ +background vehicle license diff --git a/data/labels/object_segmentation/frozen_inference_graph.labels b/data/labels/object_segmentation/frozen_inference_graph.labels index b4427edc..744de27d 100644 --- a/data/labels/object_segmentation/frozen_inference_graph.labels +++ b/data/labels/object_segmentation/frozen_inference_graph.labels @@ -1,3 +1,4 @@ +_background person bicycle car @@ -87,4 +88,4 @@ vase scissors teddy_bear hair_drier -toothbrush \ No newline at end of file +toothbrush diff --git a/data/model_list/convert_model.lst b/data/model_list/convert_model.lst new file mode 100644 index 00000000..0cfc7f5b --- /dev/null +++ b/data/model_list/convert_model.lst @@ -0,0 +1,5 @@ +# This file can be used with the --list option of the model converter. +mobilenet-ssd +deeplabv3 +mask_rcnn_inception_resnet_v2_atrous_coco + diff --git a/data/model_list/download_model.lst b/data/model_list/download_model.lst new file mode 100644 index 00000000..0744a846 --- /dev/null +++ b/data/model_list/download_model.lst @@ -0,0 +1,18 @@ +# This file can be used with the --list option of the model downloader. +face-detection-adas-0001 +age-gender-recognition-retail-0013 +emotions-recognition-retail-0003 +landmarks-regression-retail-0009 +license-plate-recognition-barrier-0001 +person-detection-retail-0013 +person-attributes-recognition-crossroad-0230 +person-reidentification-retail-0277 +vehicle-attributes-recognition-barrier-0039 +vehicle-license-plate-detection-barrier-0106 +head-pose-estimation-adas-0001 +human-pose-estimation-0001 +semantic-segmentation-adas-0001 +mobilenet-ssd +deeplabv3 +mask_rcnn_inception_resnet_v2_atrous_coco + diff --git a/doc/design/Pipeline_service.png b/doc/design/Pipeline_service.png new file mode 100644 index 00000000..b5907d9d Binary files /dev/null and b/doc/design/Pipeline_service.png differ diff --git a/doc/design/arch_design-configurable_pipeline_management.PNG b/doc/design/arch_design-configurable_pipeline_management.PNG new file mode 100644 index 00000000..2f076d68 Binary files /dev/null and b/doc/design/arch_design-configurable_pipeline_management.PNG differ diff --git a/doc/design/arch_design-decoupling.PNG b/doc/design/arch_design-decoupling.PNG new file mode 100644 index 00000000..9e31e9c5 Binary files /dev/null and b/doc/design/arch_design-decoupling.PNG differ diff --git a/doc/design/arch_design-hierarchical_components.PNG b/doc/design/arch_design-hierarchical_components.PNG new file mode 100644 index 00000000..f36cda9c Binary files /dev/null and b/doc/design/arch_design-hierarchical_components.PNG differ diff --git a/doc/design/arch_design-pipeline_composition.PNG b/doc/design/arch_design-pipeline_composition.PNG new file mode 100644 index 00000000..2cfc7ae1 Binary files /dev/null and b/doc/design/arch_design-pipeline_composition.PNG differ diff --git a/doc/design/config_example-result_filtering_for_vehicle_analytics.png b/doc/design/config_example-result_filtering_for_vehicle_analytics.png new file mode 100644 index 00000000..a08fa33d Binary files /dev/null and b/doc/design/config_example-result_filtering_for_vehicle_analytics.png differ diff --git a/doc/design/config_example-vehicle_analytics.png b/doc/design/config_example-vehicle_analytics.png new file mode 100644 index 00000000..1b89aa44 Binary files /dev/null and b/doc/design/config_example-vehicle_analytics.png differ diff --git a/doc/design/data_filtering_for_inference_results.png b/doc/design/data_filtering_for_inference_results.png new file mode 100644 index 00000000..67e0bc56 Binary files /dev/null and b/doc/design/data_filtering_for_inference_results.png differ diff --git a/doc/design/filtering_example-vehicle_analytics.png b/doc/design/filtering_example-vehicle_analytics.png new file mode 100644 index 00000000..a73bcc89 Binary files /dev/null and b/doc/design/filtering_example-vehicle_analytics.png differ diff --git a/doc/design/inference_example-vehicle_analytics_pipeline.png b/doc/design/inference_example-vehicle_analytics_pipeline.png new file mode 100644 index 00000000..ea2decc1 Binary files /dev/null and b/doc/design/inference_example-vehicle_analytics_pipeline.png differ diff --git a/doc/design/inference_examples.png b/doc/design/inference_examples.png new file mode 100644 index 00000000..6e8a856b Binary files /dev/null and b/doc/design/inference_examples.png differ diff --git a/doc/inferences/Face_Detection.md b/doc/inferences/Face_Detection.md deleted file mode 100644 index 3bd2c8fa..00000000 --- a/doc/inferences/Face_Detection.md +++ /dev/null @@ -1,21 +0,0 @@ -# Face Detection - -## Demo Result Snapshots -See below pictures for the demo result snapshots. -* face detection input from image -![face_detection_demo_image](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/data/images/face_detection.png "face detection demo image") -## Download Models -* download the optimized Intermediate Representation (IR) of model (excute _once_)
- ```bash - cd $model_downloader - sudo python3 downloader.py --name face-detection-adas-0001 --output_dir /opt/openvino_toolkit/models/face_detection/output - sudo python3 downloader.py --name age-gender-recognition-retail-0013 --output_dir /opt/openvino_toolkit/models/age-gender-recognition/output - sudo python3 downloader.py --name emotions-recognition-retail-0003 --output_dir /opt/openvino_toolkit/models/emotions-recognition/output - sudo python3 downloader.py --name head-pose-estimation-adas-0001 --output_dir /opt/openvino_toolkit/models/head-pose-estimation/output - ``` -* copy label files (excute _once_)
- ```bash - sudo cp /opt/openvino_toolkit/ros2_openvino_toolkit/data/labels/emotions-recognition/FP32/emotions-recognition-retail-0003.labels /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/ - sudo cp /opt/openvino_toolkit/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP32/ - sudo cp /opt/openvino_toolkit/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/ - ``` diff --git a/doc/inferences/Face_Reidentification.md b/doc/inferences/Face_Reidentification.md deleted file mode 100644 index 9a496fff..00000000 --- a/doc/inferences/Face_Reidentification.md +++ /dev/null @@ -1,10 +0,0 @@ -# Face Reidentification -## Download Models -* download the optimized Intermediate Representation (IR) of model (excute _once_)
- ```bash - cd $model_downloader - sudo python3 downloader.py --name landmarks-regression-retail-0009 --output_dir /opt/openvino_toolkit/models/landmarks-regression/output - sudo python3 downloader.py --name face-reidentification-retail-0095 --output_dir /opt/openvino_toolkit/models/face-reidentification/output - ``` - - diff --git a/doc/inferences/Object_Detection.md b/doc/inferences/Object_Detection.md deleted file mode 100644 index 905b134d..00000000 --- a/doc/inferences/Object_Detection.md +++ /dev/null @@ -1,91 +0,0 @@ -# Object Detection -## Introduction -The section depict the kind of Object Detection, which produces object classification and its location based ROI. -Two kinds of models are supported currently: -- SSD based Object Detection Models - * SSD300-VGG16, SSD500-VGG16, Mobilenet-SSD (both caffe and tensorflow) -- YoloV2 - -## Demo Result Snapshots -* object detection input from realsense camera - -![object_detection_demo_realsense](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/data/images/object_detection.gif "object detection demo realsense") - -## Download Models ->> Before using the supported models, you need to first downloand and optimize them into OpenVINO mode. mobilenet-SSD caffe model is the default one used in the Object Detection configuration. - -#### mobilenet-ssd -* download and convert a trained model to produce an optimized Intermediate Representation (IR) of the model - ```bash - cd $model_downloader - sudo python3 ./downloader.py --name mobilenet-ssd - #FP32 precision model - sudo python3 $model_optimizer/mo.py --input_model $model_downloader/public/mobilenet-ssd/mobilenet-ssd.caffemodel --output_dir /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP32 --mean_values [127.5,127.5,127.5] --scale_values [127.5] - #FP16 precision model - sudo python3 $model_optimizer/mo.py --input_model $model_downloader/public/mobilenet-ssd/mobilenet-ssd.caffemodel --output_dir /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP16 --data_type=FP16 --mean_values [127.5,127.5,127.5] --scale_values [127.5] - ``` -* copy label files (excute _once_)
- ```bash - sudo cp $openvino_labels/object_detection/mobilenet-ssd.labels /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP32 - sudo cp $openvino_labels/object_detection/mobilenet-ssd.labels /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP16 - ``` -#### YOLOv2-voc -* Darkflow to protobuf(.pb) - - install [darkflow](https://github.com/thtrieu/darkflow) - - install prerequsites - ```bash - pip3 install tensorflow opencv-python numpy networkx cython - ``` - - Get darkflow and YOLO-OpenVINO - ```bash - mkdir -p ~/code && cd ~/code - git clone https://github.com/thtrieu/darkflow - git clone https://github.com/chaoli2/YOLO-OpenVINO - sudo ln -sf ~/code/darkflow /opt/openvino_toolkit/ - ``` - - modify the line self.offset = 16 in the ./darkflow/utils/loader.py file and replace with self.offset = 20 - - Install darkflow - ```bash - cd ~/code/darkflow - pip3 install . - ``` - - Copy voc.names in YOLO-OpenVINO/common to labels.txt in darkflow. - ```bash - cp ~/code/YOLO-OpenVINO/common/voc.names ~/code/darkflow/labels.txt - ``` - - Get yolov2 weights and cfg - ```bash - cd ~/code/darkflow - mkdir -p models - cd models - wget -c https://pjreddie.com/media/files/yolov2-voc.weights - wget https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolov2-voc.cfg - ``` - - Run convert script - ```bash - cd ~/code/darkflow - flow --model models/yolov2-voc.cfg --load models/yolov2-voc.weights --savepb - ``` -* Convert YOLOv2-voc TensorFlow Model to the optimized Intermediate Representation (IR) of model - ```bash - cd ~/code/darkflow - # FP32 precision model - sudo python3 $model_optimizer/mo_tf.py \ - --input_model built_graph/yolov2-voc.pb \ - --batch 1 \ - --tensorflow_use_custom_operations_config $model_optimizer/extensions/front/tf/yolo_v2_voc.json \ - --data_type FP32 \ - --output_dir /opt/openvino_toolkit/models/object_detection/YOLOv2-voc/tf/output/FP32 - # FP16 precision model - sudo python3 $model_optimizer/mo_tf.py \ - --input_model built_graph/yolov2-voc.pb \ - --batch 1 \ - --tensorflow_use_custom_operations_config $model_optimizer/extensions/front/tf/yolo_v2_voc.json \ - --data_type FP16 \ - --output_dir /opt/openvino_toolkit/models/object_detection/YOLOv2-voc/tf/output/FP16 - ``` -* copy label files (excute _once_)
- ```bash - sudo cp $openvino_labels/object_detection/yolov2-voc.labels /opt/openvino_toolkit/models/object_detection/YOLOv2-voc/tf/output/FP32 - sudo cp $openvino_labels/object_detection/yolov2-voc.labels /opt/openvino_toolkit/models/object_detection/YOLOv2-voc/tf/output/FP16 - ``` diff --git a/doc/inferences/Object_Segmentation.md b/doc/inferences/Object_Segmentation.md deleted file mode 100644 index 7e998af9..00000000 --- a/doc/inferences/Object_Segmentation.md +++ /dev/null @@ -1,24 +0,0 @@ -# Object Segmentation -## Demo Result Snapshots -See below pictures for the demo result snapshots. -* object segmentation input from video -![object_segmentation_demo_video](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/data/images/object_segmentation.gif "object segmentation demo video") -## Download Models -* download and convert a trained model to produce an optimized Intermediate Representation (IR) of the model - ```bash - #object segmentation model - mkdir -p ~/Downloads/models - cd ~/Downloads/models - wget http://download.tensorflow.org/models/object_detection/mask_rcnn_inception_v2_coco_2018_01_28.tar.gz - tar -zxvf mask_rcnn_inception_v2_coco_2018_01_28.tar.gz - cd mask_rcnn_inception_v2_coco_2018_01_28 - #FP32 - sudo python3 $model_optimizer/mo_tf.py --input_model frozen_inference_graph.pb --tensorflow_use_custom_operations_config $model_optimizer/extensions/front/tf/mask_rcnn_support.json --tensorflow_object_detection_api_pipeline_config pipeline.config --reverse_input_channels --output_dir /opt/openvino_toolkit/models/segmentation/output/FP32 - #FP16 - sudo python3 $model_optimizer/mo_tf.py --input_model frozen_inference_graph.pb --tensorflow_use_custom_operations_config $model_optimizer/extensions/front/tf/mask_rcnn_support.json --tensorflow_object_detection_api_pipeline_config pipeline.config --reverse_input_channels --data_type=FP16 --output_dir /opt/openvino_toolkit/models/segmentation/output/FP16 - ``` -* copy label files (excute _once_)
- ```bash - sudo cp $openvino_labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/segmentation/output/FP32 - sudo cp $openvino_labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/segmentation/output/FP16 - ``` diff --git a/doc/inferences/People_Reidentification.md b/doc/inferences/People_Reidentification.md deleted file mode 100644 index 39c276d6..00000000 --- a/doc/inferences/People_Reidentification.md +++ /dev/null @@ -1,13 +0,0 @@ -# People Reidentification -## Demo Result Snapshots -See below pictures for the demo result snapshots. -* Person Reidentification input from standard camera -![person_reidentification_demo_video](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/data/images/person-reidentification.gif "person reidentification demo video") -## Download Models -* download the optimized Intermediate Representation (IR) of model (excute _once_)
- ```bash - cd $model_downloader - sudo python3 downloader.py --name person-detection-retail-0013 --output_dir /opt/openvino_toolkit/models/person-detection/output - sudo python3 downloader.py --name person-reidentification-retail-0076 --output_dir /opt/openvino_toolkit/models/person-reidentification/output - ``` - diff --git a/doc/inferences/Vehicle_Detection.md b/doc/inferences/Vehicle_Detection.md deleted file mode 100644 index 8fdb1a5b..00000000 --- a/doc/inferences/Vehicle_Detection.md +++ /dev/null @@ -1,14 +0,0 @@ -# Vehicle Detection -## Download Models -### OpenSource Version -* download the optimized Intermediate Representation (IR) of model (excute _once_)
- ```bash - cd $model_downloader - sudo python3 downloader.py --name vehicle-license-plate-detection-barrier-0106 --output_dir /opt/openvino_toolkit/models/vehicle-license-plate-detection/output - sudo python3 downloader.py --name vehicle-attributes-recognition-barrier-0039 --output_dir /opt/openvino_toolkit/models/vehicle-attributes-recongnition/output - sudo python3 downloader.py --name license-plate-recognition-barrier-0001 --output_dir /opt/openvino_toolkit/models/license-plate-recognition/output - ``` -* copy label files (excute _once_)
- ```bash - sudo cp $openvino_labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels /opt/openvino_toolkit/models/vehicle-license-plate-detection/output - ``` diff --git a/doc/installation/BINARY_INSTALLATION.md b/doc/installation/BINARY_INSTALLATION.md deleted file mode 100644 index ebe1cf71..00000000 --- a/doc/installation/BINARY_INSTALLATION.md +++ /dev/null @@ -1,74 +0,0 @@ -# ros2_openvino_toolkit -## 1. Prerequisite -- An x86_64 computer running Ubuntu 18.04. Below processors are supported: - * 6th-8th Generation Intel® Core™ - * Intel® Xeon® v5 family - * Intel® Xeon® v6 family -- ROS2 [Dashing](https://github.com/ros2/ros2/wiki) -- [OpenVINO™ Toolkit](https://software.intel.com/en-us/openvino-toolkit) -- RGB Camera, e.g. RealSense D400 Series or standard USB camera or Video/Image File -- Graphics are required only if you use a GPU. The official system requirements for GPU are: - * 6th to 8th generation Intel® Core™ processors with Iris® Pro graphics and Intel® HD Graphics - * 6th to 8th generation Intel® Xeon® processors with Iris Pro graphics and Intel HD Graphics (excluding the e5 product family, which does not have graphics) - * Intel® Pentium® processors N4200/5, N3350/5, N3450/5 with Intel HD Graphics - -- Use one of the following methods to determine the GPU on your hardware: - * [lspci] command: GPU info may lie in the [VGA compatible controller] line. - * Ubuntu system: Menu [System Settings] --> [Details] may help you find the graphics information. - * Openvino: Download the install package, install_GUI.sh inside will check the GPU information before installation. - -## 2. Environment Setup -**Note**:You can choose to build the environment using *./environment_setup_binary.sh* script in the script subfolder.The *modules.conf* file in the same directory as the .sh file is the configuration file that controls the installation process.You can modify the *modules.conf* to customize your installation process. -```bash -./environment_setup_binary.sh -``` -**Note**:You can also choose to follow the steps below to build the environment step by step. -* Install ROS2 [Dashing](https://github.com/ros2/ros2/wiki) ([guide](https://index.ros.org/doc/ros2/Installation/Dashing/Linux-Development-Setup/))
-* Install [OpenVINO™ Toolkit 2019R3.1](https://software.intel.com/en-us/articles/OpenVINO-Install-Linux) ([download](https://software.intel.com/en-us/openvino-toolkit/choose-download/free-download-linux))
- **Note**: Please use *root privileges* to run the installer when installing the core components. -* Install [the Intel® Graphics Compute Runtime for OpenCL™ driver components required to use the GPU plugin](https://docs.openvinotoolkit.org/latest/_docs_install_guides_installing_openvino_linux.html#additional-GPU-steps) - -- Install Intel® RealSense™ SDK 2.0 [(tag v2.30.0)](https://github.com/IntelRealSense/librealsense/tree/v2.30.0)
- * [Install from package](https://github.com/IntelRealSense/librealsense/blob/v2.30.0/doc/distribution_linux.md)
- -## 3. Building and Installation -* Build sample code under openvino toolkit - ```bash - # root is required instead of sudo - source /opt/intel/openvino/bin/setupvars.sh - cd /opt/intel/openvino/deployment_tools/inference_engine/samples/ - mkdir build - cd build - cmake .. - make - ``` -* set ENV CPU_EXTENSION_LIB and GFLAGS_LIB - ```bash - export CPU_EXTENSION_LIB=/opt/intel/openvino/deployment_tools/inference_engine/samples/build/intel64/Release/lib/libcpu_extension.so - export GFLAGS_LIB=/opt/intel/openvino/deployment_tools/inference_engine/samples/build/intel64/Release/lib/libgflags_nothreads.a - ``` -* Install ROS2_OpenVINO packages - ```bash - mkdir -p ~/ros2_overlay_ws/src - cd ~/ros2_overlay_ws/src - git clone https://github.com/intel/ros2_openvino_toolkit - git clone https://github.com/intel/ros2_object_msgs - git clone https://github.com/ros-perception/vision_opencv -b ros2 - git clone https://github.com/ros2/message_filters.git - git clone https://github.com/ros-perception/image_common.git -b dashing - git clone https://github.com/intel/ros2_intel_realsense.git -b refactor - ``` - -* Build package - ``` - source ~/ros2_ws/install/local_setup.bash - source /opt/intel/openvino/bin/setupvars.sh - cd ~/ros2_overlay_ws - colcon build --symlink-install - source ./install/local_setup.bash - sudo mkdir -p /opt/openvino_toolkit - sudo ln -sf ~/ros2_overlay_ws/src/ros2_openvino_toolkit /opt/openvino_toolkit/ - ``` - - - diff --git a/doc/installation/OPEN_SOURCE_INSTALLATION.md b/doc/installation/OPEN_SOURCE_INSTALLATION.md deleted file mode 100644 index cba2ce0c..00000000 --- a/doc/installation/OPEN_SOURCE_INSTALLATION.md +++ /dev/null @@ -1,82 +0,0 @@ -# ros2_openvino_toolkit - -## 1. Prerequisite -- An x86_64 computer running Ubuntu 18.04. Below processors are supported: - * 6th-8th Generation Intel® Core™ - * Intel® Xeon® v5 family - * Intel® Xeon® v6 family -- ROS2 [Dashing](https://github.com/ros2/ros2/wiki) - -- OpenVINO™ Toolkit Open Source
- * The [Deep Learning Deployment Toolkit](https://github.com/openvinotoolkit/openvino) that helps to enable fast, heterogeneous deep learning inferencing for Intel® processors (CPU and GPU/Intel® Processor Graphics), and supports more than 100 public and custom models.
- * [Open Model Zoo](https://github.com/opencv/open_model_zoo) includes 20+ pre-trained deep learning models to expedite development and improve deep learning inference on Intel® processors (CPU, Intel Processor Graphics, FPGA, VPU), along with many samples to easily get started. - -- RGB Camera, e.g. RealSense D400 Series or standard USB camera or Video/Image File -- Graphics are required only if you use a GPU. The official system requirements for GPU are: - * 6th to 8th generation Intel® Core™ processors with Iris® Pro graphics and Intel® HD Graphics - * 6th to 8th generation Intel® Xeon® processors with Iris Pro graphics and Intel HD Graphics (excluding the e5 product family, which does not have graphics) - * Intel® Pentium® processors N4200/5, N3350/5, N3450/5 with Intel HD Graphics - -- Use one of the following methods to determine the GPU on your hardware: - * [lspci] command: GPU info may lie in the [VGA compatible controller] line. - * Ubuntu system: Menu [System Settings] --> [Details] may help you find the graphics information. - * Openvino: Download the install package, install_GUI.sh inside will check the GPU information before installation. - -## 2. Environment Setup -**Note**:You can choose to build the environment using *./environment_setup_binary.sh* script in the script subfolder.The *modules.conf* file in the same directory as the .sh file is the configuration file that controls the installation process.You can modify the *modules.conf* to customize your installation process. -```bash -./environment_setup.sh -``` -**Note**:You can also choose to follow the steps below to build the environment step by step. -* Install ROS2 [Dashing](https://github.com/ros2/ros2/wiki) ([guide](https://index.ros.org/doc/ros2/Installation/Dashing/Linux-Development-Setup/))
-* Install OpenVINO™ Toolkit Open Source
- * Install OpenCL Driver for GPU
- ```bash - cd ~/Downloads - wget https://github.com/intel/compute-runtime/releases/download/19.04.12237/intel-gmmlib_18.4.1_amd64.deb - wget https://github.com/intel/compute-runtime/releases/download/19.04.12237/intel-igc-core_18.50.1270_amd64.deb - wget https://github.com/intel/compute-runtime/releases/download/19.04.12237/intel-igc-opencl_18.50.1270_amd64.deb - wget https://github.com/intel/compute-runtime/releases/download/19.04.12237/intel-opencl_19.04.12237_amd64.deb - wget https://github.com/intel/compute-runtime/releases/download/19.04.12237/intel-ocloc_19.04.12237_amd64.deb - sudo dpkg -i *.deb - ``` - * Install [Deep Learning Deployment Toolkit](https://github.com/openvinotoolkit/openvino)([tag 2019_R3.1](https://github.com/openvinotoolkit/openvino/blob/2019_R3.1/inference-engine/README.md))
- * Install [Open Model Zoo](https://github.com/opencv/open_model_zoo)([tag 2019_R3.1](https://github.com/opencv/open_model_zoo/blob/2019_R3.1/demos/README.md))
- -- Install Intel® RealSense™ SDK 2.0 [(tag v2.30.0)](https://github.com/IntelRealSense/librealsense/tree/v2.30.0)
- * [Install from package](https://github.com/IntelRealSense/librealsense/blob/v2.30.0/doc/distribution_linux.md)
- -## 3. Building and Installation - -* set ENV InferenceEngine_DIR, CPU_EXTENSION_LIB and GFLAGS_LIB - ```bash - export InferenceEngine_DIR=/opt/openvino_toolkit/dldt/inference-engine/build/ - export CPU_EXTENSION_LIB=/opt/openvino_toolkit/dldt/inference-engine/bin/intel64/Release/lib/libcpu_extension.so - export GFLAGS_LIB=/opt/openvino_toolkit/dldt/inference-engine/bin/intel64/Release/lib/libgflags_nothreads.a - ``` -* Install ROS2_OpenVINO packages - ```bash - mkdir -p ~/ros2_overlay_ws/src - cd ~/ros2_overlay_ws/src - git clone https://github.com/intel/ros2_openvino_toolkit - git clone https://github.com/intel/ros2_object_msgs - git clone https://github.com/ros-perception/vision_opencv -b ros2 - git clone https://github.com/ros2/message_filters.git - git clone https://github.com/ros-perception/image_common.git -b dashing - git clone https://github.com/intel/ros2_intel_realsense.git -b refactor - ``` - -* Build package - ``` - source ~/ros2_ws/install/local_setup.bash - cd ~/ros2_overlay_ws - colcon build --symlink-install - source ./install/local_setup.bash - sudo mkdir -p /opt/openvino_toolkit - sudo ln -sf ~/ros2_overlay_ws/src/ros2_openvino_toolkit /opt/openvino_toolkit/ - ``` - - - - - diff --git a/doc/installation/installation.md b/doc/installation/installation.md deleted file mode 100644 index 6596a35a..00000000 --- a/doc/installation/installation.md +++ /dev/null @@ -1,11 +0,0 @@ - -# Installation ->> Intel releases 2 different series of OpenVINO Toolkit, we call them as [OpenSource Version](https://github.com/openvinotoolkit/openvino/) and [Binary Version](https://software.intel.com/en-us/openvino-toolkit). You may choose any of them to install. - -**NOTE:** If you are not sure which version you would use, it is recommended for you to choose [Binary Version](https://software.intel.com/en-us/openvino-toolkit), which can simplify your environment setup. - -## OpenSource Version -One-step installation scripts are provided for the dependencies' installation. Please see [the guide](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/installation/OPEN_SOURCE_INSTALLATION.md) for details. - -## Binary Version -One-step installation scripts are provided for the dependencies' installation. Please see [the guide](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/installation/BINARY_INSTALLATION.md) for details. diff --git a/doc/launching/launch.md b/doc/launching/launch.md deleted file mode 100644 index efc1d1ae..00000000 --- a/doc/launching/launch.md +++ /dev/null @@ -1,37 +0,0 @@ -# Launching -## 1. Setup Environment -Please refer to this [guide](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/launching/set_environment.md) for details. - -**NOTE:** Configure *once* the Neural Compute Stick USB Driver by following between instructions, in case you have a NCS or NCS2 in hand. - ```bash - cd ~/Downloads - SUBSYSTEM=="usb", ATTRS{idProduct}=="2150", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1" - SUBSYSTEM=="usb", ATTRS{idProduct}=="2485", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1" - SUBSYSTEM=="usb", ATTRS{idProduct}=="f63b", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1" - EOF - sudo cp 97-usbboot.rules /etc/udev/rules.d/ - sudo udevadm control --reload-rules - sudo udevadm trigger - sudo ldconfig - rm 97-usbboot.rules - ``` -## 2. Launch Program -### Topic -Each inference listed in [section Inference Implementations](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/tables_of_contents/supported_features/Supported_features.md#inference-implementations) is created default launching configurations( xxx.launch.py) in OpenVINO Sample package. You can follow the utility of ROS2 launch instruction to launch them. For example: - ```bash - ros2 launch dynamic_vino_sample pipeline_object.launch.py - ``` - -The full list of xxx.launch.py is shown in below tabel: - -|Download Models|Launch File|Description| -|---|---|---| -|[Object Detection](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/inferences/Object_Detection.md)|pipeline_object.launch.py|Launching file for **Object Detection**, by default mobilenet_ssd model and standard USB camera are used.| -|[Face Detection](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/inferences/Face_Detection.md)|pipeline_people.launch.py|Launching file for **Face Detection**, also including **Age/Gender Recognition, HeadPose Estimation, and Emotion Recognition**.| -|[Object Segmentation](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/inferences/Object_Segmentation.md)|pipeline_segmentation.launch.py|Launching file for **Object Segmentation**.| -|[Person Re-Identification](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/inferences/People_Reidentification.md)|pipeline_person_reid.launch.py|Launching file for **Person Re-Identification**.| -|[Face Re-Identification](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/inferences/Face_Reidentification.md)|pipeline_face_reid.launch.py|Launching file for **Face Segmentation**, in which **Face Landmark Detection** is included.| -|[Vehicle Detection](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/inferences/Vehicle_Detection.md)|pipeline_vehicle_detection.launch.py|Launching file for **vehicle detection**, in which **license plate recognition** is included.| - -### Service -See [service Page](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/launching/service.md) for detailed launching instructions. diff --git a/doc/launching/service.md b/doc/launching/service.md deleted file mode 100644 index c5f5701f..00000000 --- a/doc/launching/service.md +++ /dev/null @@ -1,27 +0,0 @@ -# Service -## Download Models -### Object Detection Service -* See [object detection download model](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/inferences/Object_Detection.md#mobilenet-ssd) section for detailed instructions. - -### People Detection Service -* See [People Detection download model](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/inferences/Face_Detection.md#opensource-version) section for detaild instructions. - -## Launching -* run object detection service sample code input from Image - Run image processing service: - ```bash - ros2 launch dynamic_vino_sample image_object_server.launch.py - ``` - Run example application with an absolute path of an image on another console: - ```bash - ros2 run dynamic_vino_sample image_object_client /opt/openvino_toolkit/ros2_openvino_toolkit/data/images/car.png - ``` -* run face detection service sample code input from Image - Run image processing service: - ```bash - ros2 launch dynamic_vino_sample image_people_server.launch.py - ``` - Run example application with an absolute path of an image on another console: - ```bash - ros2 run dynamic_vino_sample image_people_client /opt/openvino_toolkit/ros2_openvino_toolkit/data/images/team.jpg - ``` diff --git a/doc/launching/set_environment.md b/doc/launching/set_environment.md deleted file mode 100644 index d50006a3..00000000 --- a/doc/launching/set_environment.md +++ /dev/null @@ -1,32 +0,0 @@ -# Set Environment -## OpenSource Version -* Set ENV LD_LIBRARY_PATH and openvino_version - ```bash - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/openvino_toolkit/dldt/inference-engine/bin/intel64/Release/lib - export openvino_version=opensource - ``` -* Install prerequisites - ```bash - cd /opt/openvino_toolkit/dldt/model-optimizer/install_prerequisites - sudo ./install_prerequisites.sh - ``` -* Set model tool variable - ```bash - source /opt/openvino_toolkit/ros2_openvino_toolkit/script/set_variable.sh - ``` -## Binary Version -* Set ENV LD_LIBRARY_PATH and openvino_version - ```bash - source /opt/intel/openvino/bin/setupvars.sh - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/intel/openvino/deployment_tools/inference_engine/samples/build/intel64/Release/lib - export openvino_version=binary - ``` -* Install prerequisites - ```bash - cd /opt/intel/openvino/deployment_tools/model_optimizer/install_prerequisites - sudo ./install_prerequisites.sh - ``` -* Set model tool variable - ```bash - source /opt/openvino_toolkit/ros2_openvino_toolkit/script/set_variable.sh - ``` diff --git a/doc/quick_start/getting_started_with_Foxy_Ubuntu20.04.md b/doc/quick_start/getting_started_with_Foxy_Ubuntu20.04.md deleted file mode 100644 index 0f43cc9f..00000000 --- a/doc/quick_start/getting_started_with_Foxy_Ubuntu20.04.md +++ /dev/null @@ -1,136 +0,0 @@ -# ROS2_FOXY_OpenVINO_Toolkit - -**NOTE:** -Below steps have been tested on **Ubuntu 20.04**. - -## 1. Environment Setup -* Install ROS2 Foxy ([guide](https://docs.ros.org/en/foxy/Installation/Ubuntu-Install-Debians.html)) -* Install Intel® OpenVINO™ Toolkit Version: 2021.4 ([guide]https://docs.openvino.ai/2021.4/openvino_docs_install_guides_installing_openvino_linux.html)) -* Install Intel® RealSense ™ SDK ([guide](https://github.com/IntelRealSense/librealsense/blob/master/doc/distribution_linux.md)) - -## 2. Building and Installation -* Install ROS2_OpenVINO packages -``` -mkdir -p ~/catkin_ws/src -cd ~/catkin_ws/src -git clone https://github.com/intel/ros2_openvino_toolkit -b foxy_dev -git clone https://github.com/intel/ros2_object_msgs -git clone https://github.com/IntelRealSense/realsense-ros.git -b ros2 -git clone https://github.com/ros-perception/vision_opencv.git -b ros2 -``` -* Install dependencies -``` -sudo apt-get install ros-foxy-diagnostic-updater -``` -* Build package -``` -source /opt/ros/foxy/setup.bash -source /opt/intel/openvino_2021/bin/setupvars.sh -cd ~/catkin_ws -colcon build --symlink-install -source ./install/local_setup.bash -``` - -## 3. Running the Demo -* Preparation -``` -source /opt/intel/openvino_2021/bin/setupvars.sh -sudo mkdir -p /opt/openvino_toolkit -sudo ln -s /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader /opt/openvino_toolkit/models -sudo chmod 777 -R /opt/openvino_toolkit/models -``` - -* See all available models -``` -cd /opt/intel//deployment_tools/open_model_zoo/tools/downloader -sudo python3 downloader.py --print_all -``` - -* Download the optimized Intermediate Representation (IR) of model (execute once), for example: -``` -cd /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader -sudo python3 downloader.py --name face-detection-adas-0001 --output_dir /opt/openvino_toolkit/models/face_detection/output -sudo python3 downloader.py --name age-gender-recognition-retail-0013 --output_dir /opt/openvino_toolkit/models/age-gender-recognition/output -sudo python3 downloader.py --name emotions-recognition-retail-0003 --output_dir /opt/openvino_toolkit/models/emotions-recognition/output -sudo python3 downloader.py --name head-pose-estimation-adas-0001 --output_dir /opt/openvino_toolkit/models/head-pose-estimation/output -sudo python3 downloader.py --name person-detection-retail-0013 --output_dir /opt/openvino_toolkit/models/person-detection/output -sudo python3 downloader.py --name person-reidentification-retail-0277 --output_dir /opt/openvino_toolkit/models/person-reidentification/output -sudo python3 downloader.py --name landmarks-regression-retail-0009 --output_dir /opt/openvino_toolkit/models/landmarks-regression/output -sudo python3 downloader.py --name semantic-segmentation-adas-0001 --output_dir /opt/openvino_toolkit/models/semantic-segmentation/output -sudo python3 downloader.py --name vehicle-license-plate-detection-barrier-0106 --output_dir /opt/openvino_toolkit/models/vehicle-license-plate-detection/output -sudo python3 downloader.py --name vehicle-attributes-recognition-barrier-0039 --output_dir /opt/openvino_toolkit/models/vehicle-attributes-recognition/output -sudo python3 downloader.py --name license-plate-recognition-barrier-0001 --output_dir /opt/openvino_toolkit/models/license-plate-recognition/output -sudo python3 downloader.py --name person-attributes-recognition-crossroad-0230 --output_dir /opt/openvino_toolkit/models/person-attributes/output -``` - -* copy label files (execute once) -* Before launch, copy label files to the same model path, make sure the model path and label path match the ros_openvino_toolkit/vino_launch/param/xxxx.yaml. -``` - sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP32/ - sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/ - sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/emotions-recognition/FP32/emotions-recognition-retail-0003.labels /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/ - sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/semantic-segmentation/output/FP32/ - sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/semantic-segmentation/output/FP16/ - sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels /opt/openvino_toolkit/models/vehicle-license-plate-detection/output/intel/vehicle-license-plate-detection-barrier-0106/FP32 -``` - -* If the model (tensorflow, caffe, MXNet, ONNX, Kaldi)need to be converted to intermediate representation (For example the model for object detection) -* (Note: Tensorflow=1.15.5, Python<=3.7) - * ssd_mobilenet_v2_coco - ``` - cd /opt/openvino_toolkit/models/ - sudo python3 downloader/downloader.py --name ssd_mobilenet_v2_coco - sudo python3 /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader/converter.py --name=ssd_mobilenet_v2_coco --mo /opt/intel/openvino_2021/deployment_tools/model_optimizer/mo.py - ``` - * deeplabv3 - ``` - cd /opt/openvino_toolkit/models/ - sudo python3 downloader/downloader.py --name deeplabv3 - sudo python3 /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader/converter.py --name=deeplabv3 --mo /opt/intel/openvino_2021/deployment_tools/model_optimizer/mo.py - ``` - * YOLOV2 - ``` - cd /opt/openvino_toolkit/models/ - sudo python3 downloader/downloader.py --name yolo-v2-tf - sudo python3 /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader/converter.py --name=yolo-v2-tf --mo /opt/intel/openvino_2021/deployment_tools/model_optimizer/mo.py - ``` - -* Before launch, check the parameter configuration in ros2_openvino_toolkit/sample/param/xxxx.yaml, make sure the paramter like model path, label path, inputs are right. - * run face detection sample code input from StandardCamera. - ``` - ros2 launch dynamic_vino_sample pipeline_people.launch.py - ``` - * run person reidentification sample code input from StandardCamera. - ``` - ros2 launch dynamic_vino_sample pipeline_reidentification.launch.py - ``` - * run person face reidentification sample code input from RealSenseCamera. - ``` - ros2 launch dynamic_vino_sample pipeline_face_reidentification.launch.py - ``` - * run face detection sample code input from Image. - ``` - ros2 launch dynamic_vino_sample pipeline_image.launch.py - ``` - * run object segmentation sample code input from RealSenseCameraTopic. - ``` - ros2 launch dynamic_vino_sample pipeline_segmentation.launch.py - ``` - * run object segmentation sample code input from Image. - ``` - ros2 launch dynamic_vino_sample pipeline_segmentation_image.launch.py - ``` - * run vehicle detection sample code input from StandardCamera. - ``` - ros2 launch dynamic_vino_sample pipeline_vehicle_detection.launch.py - ``` - * run person attributes sample code input from StandardCamera. - ``` - ros2 launch dynamic_vino_sample pipeline_person_attributes.launch.py - ``` - -# More Information -* ROS2 OpenVINO discription writen in Chinese: https://mp.weixin.qq.com/s/BgG3RGauv5pmHzV_hkVAdw - -###### *Any security issue should be reported using process at https://01.org/security* - diff --git a/doc/quick_start/getting_started_with_Galactic_Ubuntu20.04.md b/doc/quick_start/getting_started_with_Galactic_Ubuntu20.04.md deleted file mode 100644 index a5125268..00000000 --- a/doc/quick_start/getting_started_with_Galactic_Ubuntu20.04.md +++ /dev/null @@ -1,156 +0,0 @@ -# ROS2_GALACTIC_OpenVINO_Toolkit - -**NOTE:** -Below steps have been tested on **Ubuntu 20.04**. - -## 1. Environment Setup -* Install ROS2 Galactic ([guide](https://docs.ros.org/en/galactic/Installation/Ubuntu-Install-Debians.html)) -* Install Intel® OpenVINO™ Toolkit Version: 2021.4 ([guide](https://docs.openvino.ai/2021.4/openvino_docs_install_guides_installing_openvino_linux.html)) or building by source code ([guide](https://github.com/openvinotoolkit/openvino/wiki/BuildingForLinux)) - - * version **intel-openvino-dev-ubuntu20-2021.4.752** was tested. It is recommend to use 2021.4.752 or the newer. -* Install Intel® RealSense ™ SDK ([guide](https://github.com/IntelRealSense/librealsense/blob/master/doc/distribution_linux.md)) - -## 2. Building and Installation -* Install ROS2_OpenVINO_Toolkit packages -``` -mkdir -p ~/catkin_ws/src -cd ~/catkin_ws/src -git clone https://github.com/intel/ros2_openvino_toolkit -b galactic -git clone https://github.com/intel/ros2_object_msgs -git clone https://github.com/IntelRealSense/realsense-ros.git -b ros2 -git clone https://github.com/ros-perception/vision_opencv.git -b ros2 -``` -* Install dependencies -``` -sudo apt-get install ros-galactic-diagnostic-updater -sudo pip3 install networkx -sudo apt-get install python3-defusedxml -sudo pip3 install tensorflow==2.4.1 -``` -* Build package -``` -source /opt/ros/galactic/setup.bash -source /opt/intel/openvino_2021/bin/setupvars.sh -cd ~/catkin_ws -colcon build --symlink-install -source ./install/local_setup.bash -``` - -## 3. Running the Demo -* Preparation -``` -source /opt/intel/openvino_2021/bin/setupvars.sh -sudo mkdir -p /opt/openvino_toolkit -sudo ln -s /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader /opt/openvino_toolkit/models -sudo chmod 777 -R /opt/openvino_toolkit/models -``` - -* See all available models -``` -cd /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader -sudo python3 downloader.py --print_all -``` - -* Download the optimized Intermediate Representation (IR) of model (execute once), for example: -``` -cd /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader -sudo python3 downloader.py --name face-detection-adas-0001 --output_dir /opt/openvino_toolkit/models/face_detection/output -sudo python3 downloader.py --name age-gender-recognition-retail-0013 --output_dir /opt/openvino_toolkit/models/age-gender-recognition/output -sudo python3 downloader.py --name emotions-recognition-retail-0003 --output_dir /opt/openvino_toolkit/models/emotions-recognition/output -sudo python3 downloader.py --name head-pose-estimation-adas-0001 --output_dir /opt/openvino_toolkit/models/head-pose-estimation/output -sudo python3 downloader.py --name person-detection-retail-0013 --output_dir /opt/openvino_toolkit/models/person-detection/output -sudo python3 downloader.py --name person-reidentification-retail-0277 --output_dir /opt/openvino_toolkit/models/person-reidentification/output -sudo python3 downloader.py --name landmarks-regression-retail-0009 --output_dir /opt/openvino_toolkit/models/landmarks-regression/output -sudo python3 downloader.py --name semantic-segmentation-adas-0001 --output_dir /opt/openvino_toolkit/models/semantic-segmentation/output -sudo python3 downloader.py --name vehicle-license-plate-detection-barrier-0106 --output_dir /opt/openvino_toolkit/models/vehicle-license-plate-detection/output -sudo python3 downloader.py --name vehicle-attributes-recognition-barrier-0039 --output_dir /opt/openvino_toolkit/models/vehicle-attributes-recognition/output -sudo python3 downloader.py --name license-plate-recognition-barrier-0001 --output_dir /opt/openvino_toolkit/models/license-plate-recognition/output -sudo python3 downloader.py --name person-attributes-recognition-crossroad-0230 --output_dir /opt/openvino_toolkit/models/person-attributes/output -``` - -* copy label files (execute once) -* Before launch, copy label files to the same model path, make sure the model path and label path match the ros_openvino_toolkit/vino_launch/param/xxxx.yaml. -``` - # Lables for Face-Detection - sudo mkdir -p /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP32/ - sudo mkdir -p /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/ - sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP32/ - sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/ - - # Lables for Emotions-Recognition - sudo mkdir -p /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/ - sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/emotions-recognition/FP32/emotions-recognition-retail-0003.labels /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/ - - # Labels for Sementic-Segmentation - sudo mkdir -p /opt/openvino_toolkit/models/semantic-segmentation/output/FP32/ - sudo mkdir -p /opt/openvino_toolkit/models/semantic-segmentation/output/FP16/ - sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/semantic-segmentation/output/FP32/ - sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/semantic-segmentation/output/FP16/ - - # Labels for Vehicle-License_Plate - sudo mkdir -p /opt/openvino_toolkit/models/vehicle-license-plate-detection/output/intel/vehicle-license-plate-detection-barrier-0106/FP32 - sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels /opt/openvino_toolkit/models/vehicle-license-plate-detection/output/intel/vehicle-license-plate-detection-barrier-0106/FP32 -``` - -* If the model (tensorflow, caffe, MXNet, ONNX, Kaldi)need to be converted to intermediate representation (For example the model for object detection) -* (Note: Tensorflow=2.4.1, Python<=3.7) - * ssd_mobilenet_v2_coco - ``` - cd /opt/openvino_toolkit/models/ - sudo python3 downloader/downloader.py --name ssd_mobilenet_v2_coco - sudo python3 /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader/converter.py --name=ssd_mobilenet_v2_coco --mo /opt/intel/openvino_2021/deployment_tools/model_optimizer/mo.py - ``` - * deeplabv3 - ``` - cd /opt/openvino_toolkit/models/ - sudo python3 downloader/downloader.py --name deeplabv3 - sudo python3 /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader/converter.py --name=deeplabv3 --mo /opt/intel/openvino_2021/deployment_tools/model_optimizer/mo.py -d /opt/openvino_toolkit/models/ - ``` - * YOLOV2 - ``` - cd /opt/openvino_toolkit/models/ - sudo python3 downloader/downloader.py --name yolo-v2-tf - sudo python3 /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader/converter.py --name=yolo-v2-tf --mo /opt/intel/openvino_2021/deployment_tools/model_optimizer/mo.py - ``` - -* Before launch, check the parameter configuration in ros2_openvino_toolkit/sample/param/xxxx.yaml, make sure the paramter like model path, label path, inputs are right. - * run face detection sample code input from StandardCamera. - ``` - ros2 launch dynamic_vino_sample pipeline_people.launch.py - ``` - * run person reidentification sample code input from StandardCamera. - ``` - ros2 launch dynamic_vino_sample pipeline_reidentification.launch.py - ``` - * run person face reidentification sample code input from RealSenseCamera. - ``` - ros2 launch dynamic_vino_sample pipeline_face_reidentification.launch.py - ``` - * run face detection sample code input from Image. - ``` - ros2 launch dynamic_vino_sample pipeline_image.launch.py - ``` - * run object segmentation sample code input from RealSenseCamera. - ``` - ros2 launch dynamic_vino_sample pipeline_segmentation.launch.py - ``` - * run object segmentation sample code input from Image. - ``` - sudo mkdir -p /opt/openvino_toolkit/ros2_openvino_toolkit/data/images - sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/images/expressway.jpg /opt/openvino_toolkit/ros2_openvino_toolkit/data/images/ - ros2 launch dynamic_vino_sample pipeline_segmentation_image.launch.py - ``` - * run vehicle detection sample code input from StandardCamera. - ``` - ros2 launch dynamic_vino_sample pipeline_vehicle_detection.launch.py - ``` - * run person attributes sample code input from StandardCamera. - ``` - ros2 launch dynamic_vino_sample pipeline_person_attributes.launch.py - ``` - -# More Information -* ROS2 OpenVINO discription writen in Chinese: https://mp.weixin.qq.com/s/BgG3RGauv5pmHzV_hkVAdw - -###### *Any security issue should be reported using process at https://01.org/security* - diff --git a/doc/quick_start/getting_started_with_ros2_ov2.0.md b/doc/quick_start/getting_started_with_ros2_ov2.0.md new file mode 100644 index 00000000..45f79670 --- /dev/null +++ b/doc/quick_start/getting_started_with_ros2_ov2.0.md @@ -0,0 +1,135 @@ +# ROS2_OpenVINO_Toolkit + +**NOTE:** +Below steps have been tested on **Ubuntu 20.04** and **Ubuntu 22.04**. +Supported ROS2 versions include foxy,galactic and humble. + +## 1. Environment Setup +For ROS2 foxy and galactic on ubuntu 20.04: + * Install ROS2.
+ Refer to: [ROS_foxy_install_guide](https://docs.ros.org/en/foxy/Installation/Ubuntu-Install-Debians.html) & [ROS_galactic_install_guide](https://docs.ros.org/en/galactic/Installation/Ubuntu-Install-Debians.html) + + * Install Intel® OpenVINO™ Toolkit Version: 2022.3.
+ Refer to: [OpenVINO_install_guide](https://docs.openvino.ai/2022.3/openvino_docs_install_guides_installing_openvino_apt.html#doxid-openvino-docs-install-guides-installing-openvino-apt) + * Install from an achive file. Both runtime and development tool are needed, `pip` is recommended for installing the development tool.
+ Refer to: [OpenVINO_devtool_install_guide](https://www.intel.com/content/www/us/en/developer/tools/openvino-toolkit/download.html) + + * Install Intel® RealSense™ SDK.
+ Refer to: [RealSense_install_guide](https://github.com/IntelRealSense/librealsense/blob/master/doc/distribution_linux.md) + +For ROS2 humble on ubuntu 22.04: + * Install ROS2.
+ Refer to: [ROS_humble_install_guide](https://docs.ros.org/en/humble/Installation/Ubuntu-Install-Debians.html) + + * Install Intel® OpenVINO™ Toolkit Latest Version by Source.
+ Refer to: [OpenVINO_install_guide](https://github.com/openvinotoolkit/openvino/wiki/BuildingCode) + + * Install Intel® RealSense™ SDK by Source.
+ Refer to: [RealSense_install_guide](https://github.com/IntelRealSense/librealsense/blob/master/doc/installation.md) + +## 2. Building and Installation +* Install ROS2_OpenVINO_Toolkit packages +``` +mkdir -p ~/catkin_ws/src +cd ~/catkin_ws/src +git clone https://github.com/intel/ros2_openvino_toolkit -b ros2 +git clone https://github.com/intel/ros2_object_msgs +git clone https://github.com/IntelRealSense/realsense-ros.git -b ros2-development +git clone https://github.com/ros-perception/vision_opencv.git -b +``` +* Install dependencies +``` +sudo apt-get install ros--diagnostic-updater +sudo apt install python3-colcon-common-extensions +``` +* Build package +``` +source /opt/ros//setup.bash +source /setupvars.sh +cd ~/catkin_ws +colcon build --symlink-install +source ./install/local_setup.bash +``` + +## 3. Running the Demo +### Install OpenVINO 2022.3 by PIP +OMZ tools are provided for downloading and converting models of open_model_zoo in ov2022.
+Refer to: [OMZtool_guide](https://pypi.org/project/openvino-dev/) + +* See all available models +``` +omz_downloader --print_all +``` + +* Download the optimized Intermediate Representation (IR) of model (execute once), for example: +``` +cd ~/catkin_ws/src/ros2_openvino_toolkit/data/model_list +omz_downloader --list download_model.lst -o /opt/openvino_toolkit/models/ +``` + +* If the model (tensorflow, caffe, MXNet, ONNX, Kaldi) need to be converted to intermediate representation (such as the model for object detection): +``` +cd ~/catkin_ws/src/ros2_openvino_toolkit/data/model_list +omz_converter --list convert_model.lst -d /opt/openvino_toolkit/models/ -o /opt/openvino_toolkit/models/convert +``` +### Install OpenVINO 2022.3 by source code +* See all available models +``` +cd ~/openvino/thirdparty/open_model_zoo/tools/model_tools +sudo python3 downloader.py --print_all +``` + +* Download the optimized Intermediate Representation (IR) of models (execute once), for example: +``` +cd ~/openvino/thirdparty/open_model_zoo/tools/model_tools +sudo python3 downloader.py --list download_model.lst -o /opt/openvino_toolkit/models/ +``` + +* If the model (tensorflow, caffe, MXNet, ONNX, Kaldi) need to be converted to Intermediate Representation (such as the model for object detection): +``` +cd ~/openvino/thirdparty/open_model_zoo/tools/model_tools +sudo python3 converter.py --list convert_model.lst -d /opt/openvino_toolkit/models/ -o /opt/openvino_toolkit/models/convert +``` + +* Copy label files (execute once) +**Note**:Need to make label_dirs if skip steps for set output_dirs above. +``` +sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP32/ +sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/ +sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/emotions-recognition/FP32/emotions-recognition-retail-0003.labels /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/ +sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/intel/semantic-segmentation-adas-0001/FP32/ +sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/intel/semantic-segmentation-adas-0001/FP16/ +sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels /opt/openvino_toolkit/models/intel/vehicle-license-plate-detection-barrier-0106/FP32 +``` + +* Check the parameter configuration in ros2_openvino_toolkit/sample/param/xxxx.yaml before lauching, make sure parameters such as model_path, label_path and input_path are set correctly. Please refer to the quick start document for [yaml configuration guidance](./yaml_configuration_guide.md) for detailed configuration guidance. + * run face detection sample code input from StandardCamera. + ``` + ros2 launch openvino_node pipeline_people.launch.py + ``` + * run person reidentification sample code input from StandardCamera. + ``` + ros2 launch openvino_node pipeline_reidentification.launch.py + ``` + * run face detection sample code input from Image. + ``` + ros2 launch openvino_node pipeline_image.launch.py + ``` + * run object segmentation sample code input from RealSenseCameraTopic. + ``` + ros2 launch openvino_node pipeline_segmentation.launch.py + ``` + * run vehicle detection sample code input from StandardCamera. + ``` + ros2 launch openvino_node pipeline_vehicle_detection.launch.py + ``` + * run person attributes sample code input from StandardCamera. + ``` + ros2 launch openvino_node pipeline_person_attributes.launch.py + ``` + +# More Information +* ROS2 OpenVINO discription writen in Chinese: https://mp.weixin.qq.com/s/BgG3RGauv5pmHzV_hkVAdw + +###### *Any security issue should be reported using process at https://01.org/security* + diff --git a/doc/quick_start/tutorial_for_yolov5_converted.md b/doc/quick_start/tutorial_for_yolov5_converted.md new file mode 100644 index 00000000..dfc82ed8 --- /dev/null +++ b/doc/quick_start/tutorial_for_yolov5_converted.md @@ -0,0 +1,99 @@ +# Tutorial_For_yolov5_Converted + +# Introduction +This document describes a method to convert YOLOv5 nano PyTorch weight files with the. pt extension to ONNX weight files, and a method to convert ONNX weight files to IR files using the OpenVINO model optimizer. This method can help OpenVINO users optimize YOLOv5n for deployment in practical applications. + +## Reference Phrase +|Term|Description| +|---|---| +|OpenVINO|Open Visual Inference & Neural Network Optimization| +|ONNX|Open Neural Network Exchange| +|YOLO|You Only Look Once| +|IR|Intermediate Representation| + +## Reference Document +|Doc|Link| +|---|---| +|OpenVINO|[openvino_2_0_transition_guide](https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html)| +|YOLOv5|[yolov5](https://github.com/ultralytics/yolov5)| + +# Convert Weight File to ONNX +* Copy YOLOv5 Repository from GitHub +``` +git clone https://github.com/ultralytics/yolov5.git +``` + +* Set Environment for Installing YOLOv5 +``` +cd yolov5 +python3 -m venv yolo_env // Create a virtual python environment +source yolo_env/bin/activate // Activate environment +pip install -r requirements.txt // Install yolov5 prerequisites +pip install onnx // Install ONNX +``` + +* Download PyTorch Weights +``` +mkdir model_convert && cd model_convert +wget https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt +``` + +* Convert PyTorch weights to ONNX weights +YOLOv5 repository provides export.py script, which can be used to convert PyTorch weight to ONNX weight. +``` +cd .. +python3 export.py --weights model_convert/yolov5n.pt --include onnx +``` + +# Convert ONNX files to IR files +After obtaining the ONNX weight file from the previous section [Convert Weight File to ONNX](#convert-weight-file-to-onnx), we can use the model optimizer to convert it to an IR file. + +* Install the OpenVINO Model Optimizer Environment +To use the model optimizer, you need to run the following command to install some necessary components (if you are still in the yolo_env virtual environment, you need to run the **deactivate** command to exit the environment or start a new terminal). +``` +python3 -m venv ov_env // Create openVINO virtual environment +source ov_env/bin/activate // Activate environment +python -m pip install --upgrade pip // Upgrade pip +pip install openvino[onnx]==2022.3.0 // Install OpenVINO for ONNX +pip install openvino-dev[onnx]==2022.3.0 // Install OpenVINO Dev Tool for ONNX +``` + +* Generate IR file +``` +cd model_convert +mo --input_model yolov5n.onnx +``` +Then we will get three files: yolov5n.xml, yolov5n.bin, and yolov5n.mapping under the model_convert folder. + +# Move to the Recommended Model Path +``` +cd ~/yolov5/model_convert +mkdir -p /opt/openvino_toolkit/models/convert/public/yolov5n/FP32/ +sudo cp yolov5n.bin yolov5n.mapping yolov5n.xml /opt/openvino_toolkit/models/convert/public/yolov5n/FP32/ +``` + +# yolov5 optimize to yolov5-int8 +``` +The yolov5 optimize to yolov5-int8 refer to the link: + +https://github.com/openvinotoolkit/openvino_notebooks/tree/main/notebooks/111-yolov5-quantization-migration + +The installation guide +https://github.com/openvinotoolkit/openvino_notebooks/blob/main/README.md#-installation-guide + +``` + +# FAQ + +

+

+How to install the python3-venv package? + +On Debian/Ubuntu systems, you need to install the python3-venv package using the following command. +``` +apt-get update +apt-get install python3-venv +``` +You may need to use sudo with that command. After installing, recreate your virtual environment. +
+

diff --git a/doc/quick_start/tutorial_for_yolov7_converted.md b/doc/quick_start/tutorial_for_yolov7_converted.md new file mode 100644 index 00000000..9c476634 --- /dev/null +++ b/doc/quick_start/tutorial_for_yolov7_converted.md @@ -0,0 +1,103 @@ +# Tutorial_For_yolov7_Converted + +# Introduction +This document describes a method to convert YOLOv7 nano PyTorch weight files with the .pt extension to ONNX weight files, and a method to convert ONNX weight files to IR +files using the OpenVINO model optimizer. This method can help OpenVINO users optimize YOLOv7 for deployment in practical applications. + +## Reference Phrase +|Term|Description| +|---|---| +|OpenVINO|Open Visual Inference & Neural Network Optimization| +|ONNX|Open Neural Network Exchange| +|YOLO|You Only Look Once| +|IR|Intermediate Representation| + +## Reference Document +|Doc|Link| +|---|---| +|OpenVINO|[openvino_2_0_transition_guide](https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html)| +|YOLOv7|[yolov7](https://github.com/WongKinYiu/yolov7)| + +# Convert Weight File to ONNX +* Copy YOLOv7 Repository from GitHub +``` +git clone https://github.com/WongKinYiu/yolov7.git +``` + +* Set Environment for Installing YOLOv7 +``` +cd yolov7 +python3 -m venv yolo_env // Create a virtual python environment +source yolo_env/bin/activate // Activate environment +pip install -r requirements.txt // Install yolov7 prerequisites +pip install onnx // Install ONNX +pip install nvidia-pyindex // Add NVIDIA PIP index +pip install onnx-graphsurgeon // Install GraphSurgeon +``` + +* Download PyTorch Weights +``` +mkdir model_convert && cd model_convert +wget "https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7.pt" +``` + +* Convert PyTorch weights to ONNX weights +YOLOv7 repository provides export.py script, which can be used to convert PyTorch weight to ONNX weight. +``` +cd .. +python3 export.py --weights model_convert/yolov7.pt +``` + +# Convert ONNX files to IR files +After obtaining the ONNX weight file from the previous section [Convert Weight File to ONNX](#convert-weight-file-to-onnx), we can use the model optimizer to convert it to an IR file. + +* Install the OpenVINO Model Optimizer Environment +To use the model optimizer, you need to run the following command to install some necessary components (if you are still in the yolo_env virtual environment, you need to run the **deactivate** command to exit the environment or start a new terminal). +``` +python3 -m venv ov_env // Create openVINO virtual environment +source ov_env/bin/activate // Activate environment +python -m pip install --upgrade pip // Upgrade pip +pip install openvino[onnx]==2022.3.0 // Install OpenVINO for ONNX +pip install openvino-dev[onnx]==2022.3.0 // Install OpenVINO Dev Tool for ONNX +``` + +* Generate IR file +``` +cd model_convert +mo --input_model yolov7.onnx +``` +Then we will get three files: yolov7.xml, yolov7.bin, and yolov7.mapping under the model_convert folder. + +# Move to the Recommended Model Path +``` +cd ~/yolov7/model_convert +mkdir -p /opt/openvino_toolkit/models/convert/public/yolov7/FP32/ +sudo cp yolov7.bin yolov7.mapping yolov7.xml /opt/openvino_toolkit/models/convert/public/yolov7/FP32/ +``` + +# yolov7 optimize to yolov7-int8 +``` +The yolov7 optimize to yolov7-int8 refer to the link: + +https://github.com/openvinotoolkit/openvino_notebooks/tree/main/notebooks/226-yolov7-optimization + +The installation guide +https://github.com/openvinotoolkit/openvino_notebooks/blob/main/README.md#-installation-guide + +``` + + +# FAQ + +

+

+How to install the python3-venv package? + +On Debian/Ubuntu systems, you need to install the python3-venv package using the following command. +``` +apt-get update +apt-get install python3-venv +``` +You may need to use sudo with that command. After installing, recreate your virtual environment. +
+

diff --git a/doc/quick_start/tutorial_for_yolov8_converted.md b/doc/quick_start/tutorial_for_yolov8_converted.md new file mode 100644 index 00000000..5d9793fe --- /dev/null +++ b/doc/quick_start/tutorial_for_yolov8_converted.md @@ -0,0 +1,99 @@ +# Tutorial_For_yolov8_Converted + +# Introduction +Ultralytics YOLOv8 is a cutting-edge, state-of-the-art (SOTA) model that builds upon the success of previous YOLO versions and introduces new features and improvements to further boost performance and flexibility. +YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of object detection and tracking, instance segmentation, +image classification and pose estimation tasks. +This document describes a method to convert YOLOv8 nano PyTorch weight files with the .pt extension to ONNX weight files, and a method to convert ONNX weight files to IR +files using the OpenVINO model optimizer. This method can help OpenVINO users optimize YOLOv8 for deployment in practical applications. + +##
Documentation
+ +See below for a quickstart installation and usage example, and see the [YOLOv8 Docs](https://docs.ultralytics.com) for full documentation on training, validation, prediction and deployment. + +
+Install + + +Pip install the ultralytics package including all [requirements](https://github.com/ultralytics/ultralytics/blob/main/requirements.txt) in a [**Python>=3.7**](https://www.python.org/) environment with [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). + +```bash +mkdir -p yolov8 && cd yolov8 +pip install ultralytics +apt install python3.8-venv +python3 -m venv openvino_env +source openvino_env/bin/activate +``` + + #### Train +Train YOLOv8n on the COCO128 dataset for 100 epochs at image size 640. For a full list of available arguments seethe Configuration page. +YOLOv8 may be used directly in the Command Line Interface (CLI) with a `yolo` command: + +```CLI +# Build a new model from YAML and start training from scratch +yolo detect train data=coco128.yaml model=yolov8n.yaml epochs=100 imgsz=640 + +# Start training from a pretrained *.pt model +yolo detect train data=coco128.yaml model=yolov8n.pt epochs=100 imgsz=640 +``` + + +#### Val + +Validate trained YOLOv8n model accuracy on the COCO128 dataset. No argument need to passed as the model retains it's training data and arguments as model attributes. +```CLI +# val official model +yolo detect val model=yolov8n.pt + +``` + +#### Predict +Use a trained YOLOv8n model to run predictions on images. +``` CLI +# predict with official model +yolo detect predict model=yolov8n.pt source='https://ultralytics.com/images/bus.jpg' +``` + +#### Export +Export a YOLOv8n model to a different format like ONNX, CoreML, etc. +``` +# export official model +yolo export model=yolov8n.pt format=openvino + +``` + +# Move to the Recommended Model Path +``` +cd yolov8n_openvino_model + +mkdir -p /opt/openvino_toolkit/models/convert/public/FP32/yolov8n + +sudo cp yolov8* /opt/openvino_toolkit/models/convert/public/FP32/yolov8n + +``` + +# yolov8n optimize to yolov8n-int8 +``` +The yolov8n optimize to yolov8n-int8 refer to the link: + +https://github.com/openvinotoolkit/openvino_notebooks/blob/main/notebooks/230-yolov8-optimization/230-yolov8-optimization.ipynb + +The installation guide +https://github.com/openvinotoolkit/openvino_notebooks/blob/main/README.md#-installation-guide + +``` + +# FAQ + +

+

+Reference link + +``` +https://github.com/ultralytics/ultralytics +https://docs.ultralytics.com/tasks/detect/#predict + +``` + +
+

diff --git a/doc/quick_start/yaml_configuration_guide.md b/doc/quick_start/yaml_configuration_guide.md new file mode 100644 index 00000000..b6a08a2a --- /dev/null +++ b/doc/quick_start/yaml_configuration_guide.md @@ -0,0 +1,130 @@ +# Introduction + +The contents in .yaml config file should be well structured and follow the supported rules and entity names. + +# Sample +## [pipeline_people.yaml](../../sample/param/pipeline_people.yaml) +```bash +Pipelines: +- name: people + inputs: [StandardCamera] + infers: + - name: FaceDetection + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml + engine: CPU + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels + batch: 1 + confidence_threshold: 0.5 + enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame + - name: AgeGenderRecognition + model: /opt/openvino_toolkit/models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 16 + - name: EmotionRecognition + model: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml + engine: CPU + label: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.labels + batch: 16 + - name: HeadPoseEstimation + model: /opt/openvino_toolkit/models/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 16 + outputs: [ImageWindow, RosTopic, RViz] + connects: + - left: StandardCamera + right: [FaceDetection] + - left: FaceDetection + right: [AgeGenderRecognition, EmotionRecognition, HeadPoseEstimation, ImageWindow, RosTopic, RViz] + - left: AgeGenderRecognition + right: [ImageWindow, RosTopic, RViz] + - left: EmotionRecognition + right: [ImageWindow, RosTopic, RViz] + - left: HeadPoseEstimation + right: [ImageWindow, RosTopic, RViz] + +Common: +``` +## Interface Description + +### Specify pipeline name +The name value of this pipeline can be anyone other than null. + +### Specify inputs +**Note:** The input parameter can only have one value.
+Currently, options for inputs are: + +|Input Option|Description|Configuration| +|--------------------|------------------------------------------------------------------|-----------------------------------------| +|StandardCamera|Any RGB camera with USB port supporting. Currently only the first USB camera if many are connected.|```inputs: [StandardCamera]```| +|RealSenseCamera| Intel RealSense RGB-D Camera, directly calling RealSense Camera via librealsense plugin of openCV.|```inputs: [RealSenseCamera]```| +|RealSenseCameraTopic| Any ROS topic which is structured in image message.|```inputs: [RealSenseCameraTopic]```| +|Image| Any image file which can be parsed by openCV, such as .png, .jpeg.|```inputs: [Image]```| +|Video| Any video file which can be parsed by openCV.|```inputs: [Video]```| +|IpCamera| Any RTSP server which can push video stream.|```inputs: [IpCamera]```| + +**Note:** Please refer to this opensource repo [RTSP_server_install_guide](https://github.com/EasyDarwin/EasyDarwin) to install RTSP server for IpCamera input. + +### Specify input_path +The input_path need to be specified when input is Image, Video and Ipcamera. + +|Input Option|Configuration| +|--------------------|------------------------------------------------------------------| +|Image|```input_path: to/be/set/image_path```| +|Video|```input_path: to/be/set/video_path```| +|IpCamera|```input_path: "rtsp://localhost/test"```| + +### Specify infers +The Inference Engine is a set of C++ classes to provides an API to read the Intermediate Representation, set the input and output formats, and execute the model on devices. + +* #### name +The name of inference engine need to be specified here. Currently, the inference feature list is supported: + +|Inference|Description| +|-----------------------|------------------------------------------------------------------| +|FaceDetection|Object Detection task applied to face recognition using a sequence of neural networks.| +|EmotionRecognition| Emotion recognition based on detected face image.| +|AgeGenderRecognition| Age and gener recognition based on detected face image.| +|HeadPoseEstimation| Head pose estimation based on detected face image.| +|ObjectDetection| object detection based on SSD-based trained models.| +|VehicleDetection| Vehicle and passenger detection based on Intel models.| +|ObjectSegmentation| object detection and segmentation.| +|ObjectSegmentationMaskrcnn| object segmentation based on Maskrcnn model.| + +* #### model +The path of model need to be specified here. The scheme below illustrates the typical workflow for deploying a trained deep learning model. +![trained deep learning model](../../data/images/CVSDK_Flow.png "trained deep learning model") + +* #### engine +**Note:** Currently, only CPU and GPU are supported.
+Target device options are: + +|Target Device| +|-----------------------| +|CPU| +|Intel® Integrated Graphics| +|FPGA| +|Intel® Movidius™ Neural Compute Stick| + +* #### label +Currently, this parameter does not work. + +* #### batch +Enable dynamic batch size for the inference engine net. + +### Specify outputs +**Note:** The output parameter can be one or more.
+Currently, the output options are: + +|Option|Description|Configuration| +|--------------------|-----------------------------------------------------|---------------------------------------------| +|ImageWindow| Window showing results|```outputs: [ImageWindow, RosTopic, RViz]```| +|RosTopic| Output the topic|```outputs: [ImageWindow, RosTopic, RViz]```| +|RViz| Display the result in rviz|```outputs: [ImageWindow, RosTopic, RViz]```| + +### Specify confidence_threshold +Set the threshold of detection probability. + +### Specify connects +The topology of a pipe can only have one value on the left and multiple values on the right. The value of the first left node should be the same as the specified **inputs**. diff --git a/doc/tables_of_contents/Design_Architecture_and_logic_flow.md b/doc/tables_of_contents/Design_Architecture_and_logic_flow.md deleted file mode 100644 index 86c48bb3..00000000 --- a/doc/tables_of_contents/Design_Architecture_and_logic_flow.md +++ /dev/null @@ -1,27 +0,0 @@ -# Design Architecture -From the view of hirarchical architecture design, the package is divided into different functional components, as shown in below picture. - -![OpenVINO_Architecture](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/data/images/design_arch.PNG "OpenVINO RunTime Architecture") - -- **Intel® OpenVINO™ toolkit** is leveraged to provide deep learning basic implementation for data inference. is free software that helps developers and data scientists speed up computer vision workloads, streamline deep learning inference and deployments, -and enable easy, heterogeneous execution across Intel® platforms from edge to cloud. It helps to: - - Increase deep learning workload performance up to 19x1 with computer vision accelerators from Intel. - - Unleash convolutional neural network (CNN)-based deep learning inference using a common API. - - Speed development using optimized OpenCV* and OpenVX* functions. -- **ROS2 OpenVINO Runtime Framework** is the main body of this repo. it provides key logic implementation for pipeline lifecycle management, resource management and ROS system adapter, which extends Intel OpenVINO toolkit and libraries. Furthermore, this runtime framework provides ways to ease launching, configuration and data analytics and re-use. -- **Diversal Input resources** are the data resources to be infered and analyzed with the OpenVINO framework. -- **ROS interfaces and outputs** currently include _Topic_ and _service_. Natively, RViz output and CV image window output are also supported by refactoring topic message and inferrence results. -- **Optimized Models** provides by Model Optimizer component of Intel® OpenVINO™ toolkit. Imports trained models from various frameworks (Caffe*, Tensorflow*, MxNet*, ONNX*, Kaldi*) and converts them to a unified intermediate representation file. It also optimizes topologies through node merging, horizontal fusion, eliminating batch normalization, and quantization.It also supports graph freeze and graph summarize along with dynamic input freezing. - -# Logic Flow -From the view of logic implementation, the package introduces the definitions of parameter manager, pipeline and pipeline manager. The below picture depicts how these entities co-work together when the corresponding program is launched. - -![Logic_Flow](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/data/images/impletation_logic.PNG "OpenVINO RunTime Logic Flow") - -Once a corresponding program is launched with a specified .yaml config file passed in the .launch.py file or via commandline, _**parameter manager**_ analyzes the configurations about pipeline and the whole framework, then shares the parsed configuration information with pipeline procedure. A _**pipeline instance**_ is created by following the configuration info and is added into _**pipeline manager**_ for lifecycle control and inference action triggering. - -The contents in **.yaml config file** should be well structured and follow the supported rules and entity names. Please see [the configuration guidance](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/tables_of_contents/tutorials/configuration_file_customization.md) for how to create or edit the config files. - -**Pipeline** fulfills the whole data handling process: initiliazing Input Component for image data gathering and formating; building up the structured inference network and passing the formatted data through the inference network; transfering the inference results and handling output, etc. - -**Pipeline manager** manages all the created pipelines according to the inference requests or external demands (say, system exception, resource limitation, or end user's operation). Because of co-working with resource management and being aware of the whole framework, it covers the ability of performance optimization by sharing system resource between pipelines and reducing the burden of data copy. diff --git a/doc/tables_of_contents/prerequisite.md b/doc/tables_of_contents/prerequisite.md deleted file mode 100644 index f42279d7..00000000 --- a/doc/tables_of_contents/prerequisite.md +++ /dev/null @@ -1,31 +0,0 @@ -# Development and Target Platform - ->> The development and target platforms have the same requirements, but you can select different components during the installation, based on your intended use. - -## Hardware -### Processor Supported: -- Intel architecture processor, e.g. 6th~8th generation Intel® Core™ -- Intel® Xeon® v5 family -- Intel® Xeon® v6 family -- Intel® Pentium® processor N4200/5, N3350/5, N3450/5 with Intel® HD Graphics - -**Notes**: -- Processor graphics are not included in all processors. See [Product Specifications](https://ark.intel.com/) for information about your processor. -- A chipset that supports processor graphics is required for Intel® Xeon® processors. -- Use one of the following methods to determine the GPU on your hardware: - * [lspci] command: GPU info may lie in the [VGA compatible controller] line. - * Ubuntu system: Menu [System Settings] --> [Details] may help you find the graphics information. - * Openvino: Download the install package, install_GUI.sh inside will check the GPU information before installation. - -### Pripheral Depended: -- Intel® Movidius™ Neural Compute Stick -- Intel® Neural Compute Stick 2 -- Intel® Vision Accelerator Design with Intel® Movidius™ VPU -- RGB Camera, e.g. RealSense D400 Series or standard USB camera - -## Operating Systems -- Ubuntu 16.04 or 18.04 long-term support (LTS), 64-bit: Minimum supported kernel is 4.14 -- CentOS 7.4, 64-bit (for target only) -- Yocto Project Poky Jethro v2.0.3, 64-bit (for target only and requires modifications) - -**Note**: Since **Ubuntu 18.04** in the list is the only one well supported by ROS2 core, it is highly recommended to use as the OS. diff --git a/doc/tables_of_contents/supported_features/Supported_features.md b/doc/tables_of_contents/supported_features/Supported_features.md deleted file mode 100644 index 3117ac71..00000000 --- a/doc/tables_of_contents/supported_features/Supported_features.md +++ /dev/null @@ -1,33 +0,0 @@ -# Supported Features -## Input Resources -Currently, the package supports RGB frame data from several kinds of input resources: -- Standard USB Camera -- Realsense Camera -- Image Topic -- Image File -- Video File - -See more from [the input resource description](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/tables_of_contents/supported_features/input_resource.md). - -## Inference Implementations -Inferences shown in below list are supported: -- Face Detection -- Emotion Recognition -- Age and Gender Recognition -- Head Pose Estimation -- Object Detection -- Vehicle and License Detection -- Object Segmentation -- Person Re-Identification -- Face Re-Identification - -[Inference functionality overview](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/tables_of_contents/supported_features/inference_functionality_overview.md). - -## Output Types -The inference results can be output in several types. One or more types can be enabled for any infernece pipeline: -- Topic Publishing -- Image View Window -- RViz Showing -- Service (as a mechanism responding user's request about object detection results.) - -See more from [output types](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/tables_of_contents/supported_features/output_types.md) page. diff --git a/doc/tables_of_contents/supported_features/inference_functionality_overview.md b/doc/tables_of_contents/supported_features/inference_functionality_overview.md deleted file mode 100644 index 35afb571..00000000 --- a/doc/tables_of_contents/supported_features/inference_functionality_overview.md +++ /dev/null @@ -1,16 +0,0 @@ -# Infernece Feature List -Currently, the inference feature list is supported: - -|Inference Label|Description|Outputs Topic| -|---|---|---| -|FaceDetection|Object Detection task applied to face recognition using a sequence of neural networks.|```/ros2_openvino_toolkit/face_detection```([object_msgs:msg:ObjectsInBoxes](https://github.com/intel/ros2_object_msgs/blob/master/msg/ObjectsInBoxes.msg))| -|EmotionRecognition| Emotion recognition based on detected face image.|```/ros2_openvino_toolkit/emotions_recognition```([people_msgs:msg:EmotionsStamped](https://github.com/intel/ros2_openvino_toolkit/blob/master/people_msgs/msg/EmotionsStamped.msg))| -|AgeGenderRecognition| Age and gener recognition based on detected face image.|```/ros2_openvino_toolkit/age_genders_Recognition```([people_msgs:msg:AgeGenderStamped](https://github.com/intel/ros2_openvino_toolkit/blob/master/people_msgs/msg/AgeGenderStamped.msg))| -|HeadPoseEstimation| Head pose estimation based on detected face image.|```/ros2_openvino_toolkit/headposes_estimation```([people_msgs:msg:HeadPoseStamped](https://github.com/intel/ros2_openvino_toolkit/blob/master/people_msgs/msg/HeadPoseStamped.msg))| -|ObjectDetection| object detection based on SSD-based trained models.|```/ros2_openvino_toolkit/detected_objects```([object_msgs::msg::ObjectsInBoxes](https://github.com/intel/ros2_object_msgs/blob/master/msg/ObjectsInBoxes.msg))| -|VehicleAttribsDetection| Vehicle detection based on Intel models.|```/ros2_openvino_toolkit/detected_vehicles_attribs```([people_msgs::msg::VehicleAttribsStamped](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/VehicleAttribsStamped.msg))| -|LicensePlateDetection| License detection based on Intel models.|```/ros2_openvino_toolkit/detected_license_plates```([people_msgs::msg::LicensePlateStamped](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/LicensePlateStamped.msg))| -|ObjectSegmentation| object detection and segmentation.|```/ros2_openvino_toolkit/segmented_obejcts```([people_msgs::msg::ObjectsInMasks](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/ObjectsInMasks.msg))| -|PersonReidentification| Person Reidentification based on object detection.|```/ros2_openvino_toolkit/reidentified_persons```([people_msgs::msg::ReidentificationStamped](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/ReidentificationStamped.msg))| -|LandmarksDetection| Landmark regression based on face detection.|```/ros2_openvino_toolkit/detected_landmarks```([people_msgs::msg::LandmarkStamped](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/LandmarkStamped.msg))| -|FaceReidentification| Face Reidentification based on face detection.|```/ros2_openvino_toolkit/reidentified_faces```([people_msgs::msg::ReidentificationStamped](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/ReidentificationStamped.msg))| diff --git a/doc/tables_of_contents/supported_features/input_resource.md b/doc/tables_of_contents/supported_features/input_resource.md deleted file mode 100644 index 43cd3af0..00000000 --- a/doc/tables_of_contents/supported_features/input_resource.md +++ /dev/null @@ -1,8 +0,0 @@ -# Full list of supported Input Resources -|Input Resource Name|Description| -|---|-------------------------------------------| -|StandardCamera|Any RGB camera with USB port supporting. Currently only the first USB camera if many are connected.| -|RealSenseCamera| Intel RealSense RGB-D Camera,directly calling RealSense Camera via librealsense plugin of openCV.| -|RealSenseCameraTopic| any ROS topic which is structured in image message.The topic to be inputted must be remapped to name ```/openvino_toolkit/image_raw```(type [sensor_msgs::msg::Image](https://github.com/ros2/common_interfaces/blob/master/sensor_msgs/msg/Image.msg))| -|Image| Any image file which can be parsed by openCV, such as .png, .jpeg.| -|Video| Any video file which can be parsed by openCV.| \ No newline at end of file diff --git a/doc/tables_of_contents/supported_features/output_types.md b/doc/tables_of_contents/supported_features/output_types.md deleted file mode 100644 index 315c0cb9..00000000 --- a/doc/tables_of_contents/supported_features/output_types.md +++ /dev/null @@ -1,43 +0,0 @@ -# Output Types ->> The inference results can be output in several types. One or more types can be enabled for any inference pipeline. -## Topic Publishing ->> Specific topic(s) can be generated and published according to the given inference functionalities.
- -|Inference|Published Topic| -|---|---| -|People Detection|```/ros2_openvino_toolkit/face_detection```([object_msgs:msg:ObjectsInBoxes](https://github.com/intel/ros2_object_msgs/blob/master/msg/ObjectsInBoxes.msg))| -|Emotion Recognition|```/ros2_openvino_toolkit/emotions_recognition```([people_msgs:msg:EmotionsStamped](https://github.com/intel/ros2_openvino_toolkit/blob/master/people_msgs/msg/EmotionsStamped.msg))|/ros2_openvino_toolkit/face_detection(object_msgs:msg:ObjectsInBoxes) -|Age and Gender Recognition|```/ros2_openvino_toolkit/age_genders_Recognition```([people_msgs:msg:AgeGenderStamped](https://github.com/intel/ros2_openvino_toolkit/blob/master/people_msgs/msg/AgeGenderStamped.msg))| -|Head Pose Estimation|```/ros2_openvino_toolkit/headposes_estimation```([people_msgs:msg:HeadPoseStamped](https://github.com/intel/ros2_openvino_toolkit/blob/master/people_msgs/msg/HeadPoseStamped.msg))| -|Object Detection|```/ros2_openvino_toolkit/detected_objects```([object_msgs::msg::ObjectsInBoxes](https://github.com/intel/ros2_object_msgs/blob/master/msg/ObjectsInBoxes.msg))| -|Object Segmentation|```/ros2_openvino_toolkit/segmented_obejcts```([people_msgs::msg::ObjectsInMasks](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/ObjectsInMasks.msg))| -|Person Reidentification|```/ros2_openvino_toolkit/reidentified_persons```([people_msgs::msg::ReidentificationStamped](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/ReidentificationStamped.msg))| -|Face Reidenfication|```/ros2_openvino_toolkit/reidentified_faces```([people_msgs::msg::ReidentificationStamped](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/ReidentificationStamped.msg))| -|Vehicle Detection|```/ros2_openvino_toolkit/detected_license_plates```([people_msgs::msg::VehicleAttribsStamped](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/VehicleAttribsStamped.msg))| -|Vehicle License Detection|```/ros2_openvino_toolkit/detected_license_plates```([people_msgs::msg::LicensePlateStamped](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/LicensePlateStamped.msg))| - -## Image View Window ->> The original image and the inference results are rendered together and shown in a CV window. -## RViz Showing ->> The Rendered image (rendering inference results into the original image) was transformed into sensor_msgs::msg::Image topic, that can be shown in RViz application. -- RViz Published Topic -```/ros2_openvino_toolkit/image_rviz```([sensor_msgs::msg::Image](https://github.com/ros2/common_interfaces/blob/master/sensor_msgs/msg/Image.msg)) - -## Service ->> Several ROS2 Services are created, expecting to be used in client/server mode, especially when synchronously getting inference results for a given image frame or when managing inference pipeline's lifecycle.
- -- **Face Detection or Object Detection for a given Image file** - -|Inference|Service| -|---|---| -|Object Detection Service|```/detect_object```([object_msgs::srv::DetectObject](https://github.com/intel/ros2_object_msgs/blob/master/srv/DetectObject.srv))| -|Face Detection Service|```/detect_face```([object_msgs::srv::DetectObject](https://github.com/intel/ros2_object_msgs/blob/master/srv/DetectObject.srv))| -|Age Gender Detection Service|```/detect_age_gender```([people_msgs::srv::AgeGender](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/srv/AgeGender.srv))| -|Headpose Detection Service|```/detect_head_pose```([people_msgs::srv::HeadPose](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/srv/HeadPose.srv))| -|Emotion Detection Service|```/detect_emotion```([people_msgs::srv::Emotion](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/srv/Emotion.srv))| - -- **Inference Pipeline Lifecycle Management** - - Create new pipeline - - Start/Stop/Pause a pipeline - - Get pipeline list or status - diff --git a/doc/tables_of_contents/tutorials/Multiple_Pipelines.md b/doc/tables_of_contents/tutorials/Multiple_Pipelines.md deleted file mode 100644 index cd03aec7..00000000 --- a/doc/tables_of_contents/tutorials/Multiple_Pipelines.md +++ /dev/null @@ -1,54 +0,0 @@ -# Multiple Pipelines ->> This is a way to run more than one pipeline in the same process.Having multiple pipelines in a single instance allows each pipeline to have custom configuration and different performance. - -## prerequest -see [this guide](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/tables_of_contents/tutorials/configuration_file_customization.md) to see how to customize a pipeline. - -## A demo for multiple pipeline -```bash -1 Pipelines: - 2 - name: object1 - 3 inputs: [StandardCamera] - 4 infers: - 5 - name: ObjectDetection - 6 model: /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP32/mobilenet-ssd.xml - 7 engine: CPU - 8 label: to/be/set/xxx.labels - 9 batch: 1 - 10 confidence_threshold: 0.5 - 11 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - 12 outputs: [ImageWindow, RosTopic, RViz] - 13 connects: - 14 - left: StandardCamera - 15 right: [ObjectDetection] - 16 - left: ObjectDetection - 17 right: [ImageWindow] - 18 - left: ObjectDetection - 19 right: [RosTopic] - 20 - left: ObjectDetection - 21 right: [RViz] - 22 - 23 - name: object2 - 24 inputs: [RealSenseCamera] - 25 infers: - 26 - name: ObjectDetection - 27 model: /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP32/mobilenet-ssd.xml - 28 engine: CPU - 29 label: to/be/set/xxx.labels - 30 batch: 1 - 31 confidence_threshold: 0.5 - 32 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - 33 outputs: [ImageWindow, RosTopic, RViz] - 34 connects: - 35 - left: RealSenseCamera - 36 right: [ObjectDetection] - 37 - left: ObjectDetection - 38 right: [ImageWindow] - 39 - left: ObjectDetection - 40 right: [RosTopic] - 41 - left: ObjectDetection - 42 right: [RViz] - 43 - 44 OpenvinoCommon: - -``` diff --git a/doc/tables_of_contents/tutorials/configuration_file_customization.md b/doc/tables_of_contents/tutorials/configuration_file_customization.md deleted file mode 100644 index 703459b6..00000000 --- a/doc/tables_of_contents/tutorials/configuration_file_customization.md +++ /dev/null @@ -1,58 +0,0 @@ -# Configuration File Customization - -One of the key added values of ROS2 OpenVINO is automatically create new pipeline on demand according to the given configuration files. In order to create new pipelines, the end user only need to create a new configuration file or update one already existed. The configuration file must be written by following some rules. - - 1 Pipelines: - 2 - name: object - 3 inputs: [RealSenseCamera] - 4 infers: - 5 - name: ObjectDetection - 6 model: /opt/intel/openvino/deployment_tools/tools/model_downloader/object_detection/common/mobilenet-ssd/caffe/output/FP16/mobilenet-ssd.xml - 7 engine: MYRIAD - 8 label: to/be/set/xxx.labels - 9 batch: 1 - 10 confidence_threshold: 0.5 - 11 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - 12 outputs: [ImageWindow, RosTopic, RViz] - 13 connects: - 14 - left: RealSenseCamera - 15 right: [ObjectDetection] - 16 - left: ObjectDetection - 17 right: [ImageWindow] - 18 - left: ObjectDetection - 19 right: [RosTopic] - 20 - left: ObjectDetection - 21 right: [RViz] - -In this sample, a pipeline is to be created with this topology: - -```flow -input=operation: RealSenseCamera -infer=operation: ObjectDetection -output1=operation: ImageWindow -output2=operation: RosTopic -output3=operation: RViz - -input-infer-output1 -infer-output2 -infer-output3 -``` - -Detail Description for each line shows in below tabel: - -|Line No.|Description| -|-------------|---| -| 1 |Keyword, label for pipeline parameters. The pipeline configuration must be started by this line.| -|2|Pipeline name, the published topics bound to this name. (e.g. /openvino_toolkit/**object**/face_detection)| -|3|The name of chosen input device, should be one and only one of [the list](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/tables_of_contents/supported_features/Supported_features.md#input-resources) (taking the item "Input Resource Name").| -|4|key word for inference section. one or more inferences can be included in a pipeline's inference section.| -|5|The name of Inference instance, should be in [the list](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/tables_of_contents/supported_features/Supported_features.md#inference-implementations).
**NOTE**: if a pipeline contains 2 or more inference instances, the first one should be a detection inference. -|6|Model description file with absolute path, generated by model_optimizer tool| -|7|The name of Inference engine, should be one of:CPU, GPU and MYRIAD.| -|8|The file name with absolute path of object labels.
**NOTE**: not enabled in the current version. The labels file with the same name as model description file under the same folder is searched and used.| -|9|The number of input data to be enqueued and handled by inference engine in parallel.| -|10|Set the inference result filtering by confidence ratio.| -|11|set *enable_roi_constraint* to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame.| -|12|A list of output method enabled for inference result showing/notifying. Should be one or some of:
• ImageWindow
• RosTopic
• Rviz
• RosService(*)
**NOTE**: RosService can only be used in ROS2 service server pipeline.| -|13|keyword for pipeline entities' relationship topology.| -|14~21|The detailed connection topology for the pipeline.
A pair of "left" and "right" parameters, whose contents are the names of inputs(line3), infers(line5) and outputs(line12) defines a connection between the two entities, it also defines that the data would be moved from *entity left* to *entity right*.| diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100644 index 00000000..2ee938b7 --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,52 @@ +ARG ROS_PRE_INSTALLED_PKG +FROM osrf/ros:${ROS_PRE_INSTALLED_PKG} +ARG VERSION + +SHELL ["/bin/bash", "-c"] + +# ignore the warning +ARG DEBIAN_FRONTEND=noninteractive +ARG APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=1 + +# install openvino 2022.3 +# https://docs.openvino.ai/2022.3/openvino_docs_install_guides_installing_openvino_apt.html +RUN apt update && apt install --assume-yes curl wget gnupg2 lsb-release \ +&& wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB && \ +apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB && echo "deb https://apt.repos.intel.com/openvino/2022 focal main" | tee /etc/apt/sources.list.d/intel-openvino-2022.list && \ +apt update && apt-cache search openvino && apt install -y openvino-2022.3.0 + + +# install librealsense2 +RUN apt-get install -y --no-install-recommends \ +software-properties-common \ +&& apt-key adv --keyserver keyserver.ubuntu.com --recv-key F6E65AC044F831AC80A06380C8B3A55A6F3EFCDE || apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-key F6E65AC044F831AC80A06380C8B3A55A6F3EFCDE \ +&& add-apt-repository "deb https://librealsense.intel.com/Debian/apt-repo $(lsb_release -cs) main" -u \ +&& apt-get install -y --no-install-recommends \ +librealsense2-dkms \ +librealsense2-utils \ +librealsense2-dev \ +librealsense2-dbg \ +libgflags-dev \ +libboost-all-dev \ +&& rm -rf /var/lib/apt/lists/* + +# other dependencies +RUN apt-get update && apt-get install -y python3-pip && python3 -m pip install -U \ +numpy \ +networkx \ +pyyaml \ +requests \ +&& apt-get install --assume-yes apt-utils \ +&& apt-get install -y --no-install-recommends libboost-all-dev \ +&& apt-get install ros-${VERSION}-diagnostic-updater \ +&& pip install --upgrade pip +RUN cd /usr/lib/x86_64-linux-gnu && ln -sf libboost_python-py36.so libboost_python37.so +COPY jpg /root/jpg +# build ros2 openvino toolkit +RUN cd /root && mkdir -p catkin_ws/src && cd /root/catkin_ws/src \ +&& git clone https://github.com/intel/ros2_object_msgs.git +WORKDIR /root/catkin_ws/src +RUN git clone -b ros2 https://github.com/intel/ros2_openvino_toolkit.git +WORKDIR /root/catkin_ws +RUN source /opt/ros/${VERSION}/setup.bash && colcon build --cmake-args -DCMAKE_BUILD_TYPE=Release + diff --git a/docker/docker_instructions_ov2.0.md b/docker/docker_instructions_ov2.0.md new file mode 100644 index 00000000..c9cdd202 --- /dev/null +++ b/docker/docker_instructions_ov2.0.md @@ -0,0 +1,130 @@ +# Run Docker Images For ROS2_OpenVINO_Toolkit + +**NOTE:** +Below steps have been tested on **Ubuntu 20.04**. +Supported ROS2 versions include foxy and galactic. + +## 1. Environment Setup +* Install docker.
+Refer to: [Docker_install_guide](https://docs.docker.com/engine/install/ubuntu/) + +## 2. Build docker image by dockerfile +``` +cd ~/ros2_openvino_toolkit/docker/Dockerfile +vi ~/ros2_openvino_toolkit/docker/Dockerfile +docker build --build-arg ROS_PRE_INSTALLED_PKG= --build-arg VERSION= --build-arg "HTTP_PROXY=set_your_proxy" -t ros2_openvino_202203 . +``` +For example: +* Build image for ros_galactic +``` +cd ~/ros2_openvino_toolkit/docker/Dockerfile +vi ~/ros2_openvino_toolkit/docker/Dockerfile +docker build --build-arg ROS_PRE_INSTALLED_PKG=galactic-desktop --build-arg VERSION=galactic --build-arg "HTTP_PROXY=set_your_proxy" -t ros2_galactic_openvino_202203 . +``` +* Build image for ros_foxy +``` +cd ~/ros2_openvino_toolkit/docker/Dockerfile +vi ~/ros2_openvino_toolkit/docker/Dockerfile +docker build --build-arg ROS_PRE_INSTALLED_PKG=foxy-desktop --build-arg VERSION=foxy --build-arg "HTTP_PROXY=set_your_proxy" -t ros2_foxy_openvino_202203 . +``` + +## 3. Download and load docker image +* Download docker image +``` + # ros2_openvino_202203 for demo + cd ~/Downloads/ + wget +``` +* Load docker image +``` +cd ~/Downloads/ +docker load -i +docker images +// (show in the list) +``` + +## 4. Running the Demos +* Install dependency +``` + sudo apt install x11-xserver-utils + xhost + +``` +* Run docker image +``` + docker images + docker run -itd  -e DISPLAY=$DISPLAY -v /tmp/.X11-unix:/tmp/.X11-unix -v /dev:/dev  --privileged=true --name +``` +* In Docker Container + +* Preparation +``` +source /opt/ros//setup.bash +cd ~/catkin_ws +source ./install/local_setup.bash +``` + +* See all available models +OMZ tools are provided for downloading and converting OMZ models in ov2022.
+Refer to: [OMZtool_guide](https://pypi.org/project/openvino-dev/) + +``` +omz_downloader --print_all +``` + +* Download the optimized Intermediate Representation (IR) of model (execute once), for example: +``` +cd ~/catkin_ws/src/ros2_openvino_toolkit/data/model_list +omz_downloader --list download_model.lst -o /opt/openvino_toolkit/models/ +``` + +* If the model (tensorflow, caffe, MXNet, ONNX, Kaldi) need to be converted to intermediate representation (such as the model for object detection): +``` +cd ~/catkin_ws/src/ros2_openvino_toolkit/data/model_list +omz_converter --list convert_model.lst -d /opt/openvino_toolkit/models/ -o /opt/openvino_toolkit/models/convert +``` +* Copy label files (execute once) +**Note**:Need to make label_dirs if skip steps for set output_dirs above. +``` +sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP32/ +sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/ +sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/emotions-recognition/FP32/emotions-recognition-retail-0003.labels /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/ +sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/intel/semantic-segmentation-adas-0001/FP32/ +sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/intel/semantic-segmentation-adas-0001/FP16/ +sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels /opt/openvino_toolkit/models/intel/vehicle-license-plate-detection-barrier-0106/FP32 +``` + +* Check the parameter configuration in ros2_openvino_toolkit/sample/param/xxxx.yaml before lauching, make sure parameters such as model_path, label_path and input_path are set correctly. Please refer to the quick start document for [yaml configuration guidance](../doc/quick_start/yaml_configuration_guide.md) for detailed configuration guidance. + * run face detection sample code input from StandardCamera. + ``` + ros2 launch openvino_node pipeline_people.launch.py + ``` + * run person reidentification sample code input from StandardCamera. + ``` + ros2 launch openvino_node pipeline_reidentification.launch.py + ``` + * run face detection sample code input from Image. + ``` + ros2 launch openvino_node pipeline_image.launch.py + ``` + * run object segmentation sample code input from RealSenseCameraTopic. + ``` + ros2 launch openvino_node pipeline_segmentation.launch.py + ``` + * run object segmentation sample code input from Image. + ``` + ros2 launch openvino_node pipeline_segmentation_image.launch.py + ``` + * run vehicle detection sample code input from StandardCamera. + ``` + ros2 launch openvino_node pipeline_vehicle_detection.launch.py + ``` + * run person attributes sample code input from StandardCamera. + ``` + ros2 launch openvino_node pipeline_person_attributes.launch.py + ``` + +# More Information +* ROS2 OpenVINO discription writen in Chinese: https://mp.weixin.qq.com/s/BgG3RGauv5pmHzV_hkVAdw + +###### *Any security issue should be reported using process at https://01.org/security* + diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine_manager.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine_manager.hpp deleted file mode 100644 index ed5923f3..00000000 --- a/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine_manager.hpp +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) 2018-2019 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * @brief A header file with declaration for NetworkEngine class - * @file engine.h - */ -#ifndef DYNAMIC_VINO_LIB__ENGINES__ENGINE_MANAGER_HPP_ -#define DYNAMIC_VINO_LIB__ENGINES__ENGINE_MANAGER_HPP_ - -#pragma once - -#include "dynamic_vino_lib/models/base_model.hpp" -#include "dynamic_vino_lib/engines/engine.hpp" -#include "inference_engine.hpp" - -namespace Engines -{ -/** - * @class EngineManager - * @brief This class is used to create and manage Inference engines. - */ -class EngineManager -{ -public: - /** - * @brief Create InferenceEngine instance by given Engine Name and Network. - * @return The shared pointer of created Engine instance. - */ - std::shared_ptr createEngine( - const std::string &, const std::shared_ptr &); - -private: -#if(defined(USE_OLD_E_PLUGIN_API)) - std::map plugins_for_devices_; - std::unique_ptr - makePluginByName( - const std::string & device_name, const std::string & custom_cpu_library_message, - const std::string & custom_cldnn_message, bool performance_message); - std::shared_ptr createEngine_beforeV2019R2( - const std::string &, const std::shared_ptr &); -#endif - - std::shared_ptr createEngine_V2019R2_plus( - const std::string &, const std::shared_ptr &); - -}; -} // namespace Engines - -#endif // DYNAMIC_VINO_LIB__ENGINES__ENGINE_MANAGER_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_filter.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_filter.hpp deleted file mode 100644 index ec46271e..00000000 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_filter.hpp +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright (c) 2018 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * @brief A header file with declaration for BaseFilter Class - * @file base_filter.hpp - */ -#ifndef DYNAMIC_VINO_LIB__INFERENCES__BASE_FILTER_HPP_ -#define DYNAMIC_VINO_LIB__INFERENCES__BASE_FILTER_HPP_ - -#include -#include -#include -#include -#include "dynamic_vino_lib/inferences/base_inference.hpp" - -namespace dynamic_vino_lib -{ - -/** - * @class BaseFilter - * @brief Base class for result filter. - */ -class BaseFilter -{ -public: - BaseFilter(); - /** - * @brief Initiate a result filter. - */ - virtual void init() = 0; - - /** - * @brief Get the filtered results' ROIs. - * @return The filtered ROIs. - */ - virtual std::vector getFilteredLocations() = 0; - - /** - * @brief Check if the filter conditions is valid for filtering. - * @param[in] Filter conditions. - * @return true if some of the conditions are valid, otherwise false. - */ - bool isValidFilterConditions(const std::string &); - - /** - * @brief Accept the filter conditions for filtering. - * @param[in] Filter conditions. - */ - void acceptFilterConditions(const std::string &); - - /** - * @brief Decide whether the input string is a relational operator or not. - * @param[in] A string to be decided. - * @return True if the input string is a relational operator, false if not. - */ - bool isRelationOperator(const std::string &); - - /** - * @brief Decide whether the input string is a logic operator or not. - * @param[in] A string to be decided. - * @return True if the input string is a logic operator, false if not. - */ - bool isLogicOperator(const std::string &); - - /** - * @brief Decide whether the an operator has a higher priority than anthor. - * @param[in] The two operators. - * @return True if the first operator has higher priority, false if not. - */ - bool isPriorTo(const std::string &, const std::string &); - - /** - * @brief Convert the input bool variable to a string type. - * @param[in] A bool type to be converted. - * @return A converted string result. - */ - std::string boolToStr(bool); - - /** - * @brief Convert the input string variable to a bool type. - * @param[in] A string type to be converted. - * @return A converted bool result. - */ - bool strToBool(const std::string &); - - /** - * @brief Get the filter conditions in the suffix order. - * @return A vector with suffix-order filter conditions. - */ - const std::vector & getSuffixConditions() const; - - /** - * @brief Do logic operation with the given bool values and the operator. - * @param[in] A bool string, an logic operator, the other bool string. - * @return The logic operation result. - */ - bool logicOperation(const std::string &, const std::string &, const std::string &); - - /** - * @brief Compare two strings with a given relational operator. - * @param[in] A string, an relational operator, the other string. - * @return True if valid, false if not. - */ - static bool stringCompare(const std::string &, const std::string &, const std::string &); - - /** - * @brief Compare two floats with a given relational operator. - * @param[in] A float number, an relational operator, the other float number. - * @return True if valid, false if not. - */ - static bool floatCompare(float, const std::string &, float); - - /** - * @brief Convert a string into a float number. - * @param[in] A string to be converted. - * @return The converted float number, 0 if string is invalid. - */ - static float stringToFloat(const std::string &); - - /** - * @brief A macro to decide whether a given result satisfies the filter condition. - * @param[in] A key to function mapping, a given result. - * @return True if valid, false if not. - */ - #define ISVALIDRESULT(key_to_function, result) \ - { \ - std::vector suffix_conditons = getSuffixConditions(); \ - std::stack result_stack; \ - for (auto elem : suffix_conditons) { \ - if (!isRelationOperator(elem) && !isLogicOperator(elem)) { \ - result_stack.push(elem); \ - } else { \ - try { \ - std::string str1 = result_stack.top(); \ - result_stack.pop(); \ - std::string str2 = result_stack.top(); \ - result_stack.pop(); \ - if (key_to_function.count(str2)) { \ - result_stack.push(boolToStr(key_to_function[str2](result, elem, str1))); \ - } else { \ - result_stack.push(boolToStr(logicOperation(str1, elem, str2))); \ - } \ - } \ - catch (...) { \ - slog::err << "Invalid filter conditions format!" << slog::endl; \ - } \ - } \ - } \ - if (result_stack.empty()) { \ - return true; \ - } \ - return strToBool(result_stack.top()); \ - } - -private: - /** - * @brief Parse the filter conditions and stores it into a vector. - * @param[in] A string form filter conditions. - * @return The vector form filter conditions. - */ - std::vector split(const std::string & filter_conditions); - - /** - * @brief Convert the infix expression into suffix expression. - * @param[in] The infix form filter conditions. - */ - void infixToSuffix(std::vector&infix_conditions); - - /** - * @brief Strip the extra space in a string. - * @param[in] A string to be striped. - * @return The striped string. - */ - std::string strip(const std::string & str); - - std::string striped_conditions_ = ""; - std::vector suffix_conditons_; - std::vector relation_operators_ = {"==", "!=", "<=", ">=", "<", ">"}; - std::vector logic_operators_ = {"&&", "||"}; -}; -} // namespace dynamic_vino_lib - -#endif // DYNAMIC_VINO_LIB__INFERENCES__BASE_FILTER_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/inference_manager.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/inference_manager.hpp deleted file mode 100644 index 0966a96a..00000000 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/inference_manager.hpp +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright (c) 2018 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * @brief a header file with declaration of Inference Manager class - * @file inference_manager.hpp - */ -#ifndef DYNAMIC_VINO_LIB__INFERENCES__INFERENCE_MANAGER_HPP_ -#define DYNAMIC_VINO_LIB__INFERENCES__INFERENCE_MANAGER_HPP_ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "dynamic_vino_lib/pipeline.hpp" - -/** - * @class InferenceManager - * @brief This class manages inference resources. - */ -class InferenceManager -{ -public: - /** - * @brief Get the singleton instance of InferenceManager class. - * The instance will be created when first call. - * @return The reference of InferenceManager instance. - */ - static InferenceManager & getInstance() - { - static InferenceManager manager_; - return manager_; - } - - std::shared_ptr createPipeline( - const Params::ParamManager::PipelineRawData & params); - void removePipeline(const std::string & name); - InferenceManager & updatePipeline( - const std::string & name, - const Params::ParamManager::PipelineRawData & params); - - void runAll(); - void stopAll(); - void joinAll(); - - enum PipelineState - { - PipelineState_ThreadNotCreated, - PipelineState_ThreadStopped, - PipelineState_ThreadRunning, - PipelineState_Error - }; - struct PipelineData - { - Params::ParamManager::PipelineRawData params; - std::shared_ptr pipeline; - std::vector> spin_nodes; - std::shared_ptr thread; - PipelineState state; - }; - -private: - InferenceManager() {} - InferenceManager(InferenceManager const &); - void operator=(InferenceManager const &); - void threadPipeline(const char * name); - std::map> - parseInputDevice(const Params::ParamManager::PipelineRawData & params); - std::map> parseOutput( - const Params::ParamManager::PipelineRawData & params); - std::map> - parseInference(const Params::ParamManager::PipelineRawData & params); - std::shared_ptr createFaceDetection( - const Params::ParamManager::InferenceParams & infer); - std::shared_ptr createAgeGenderRecognition( - const Params::ParamManager::InferenceParams & infer); - std::shared_ptr createEmotionRecognition( - const Params::ParamManager::InferenceParams & infer); - std::shared_ptr createHeadPoseEstimation( - const Params::ParamManager::InferenceParams & infer); - std::shared_ptr createObjectDetection( - const Params::ParamManager::InferenceParams & infer); - - std::map pipelines_; - }; - -#endif // DYNAMIC_VINO_LIB__INFERENCES__INFERENCE_MANAGER_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/license_plate_detection.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/license_plate_detection.hpp deleted file mode 100644 index 7d8b6e33..00000000 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/license_plate_detection.hpp +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright (c) 2018 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * @brief A header file with declaration for LicensePlateDetection Class - * @file license_plate_detection.hpp - */ -#ifndef DYNAMIC_VINO_LIB__INFERENCES__LICENSE_PLATE_DETECTION_HPP_ -#define DYNAMIC_VINO_LIB__INFERENCES__LICENSE_PLATE_DETECTION_HPP_ -#include -#include -#include -#include -#include "dynamic_vino_lib/models/license_plate_detection_model.hpp" -#include "dynamic_vino_lib/engines/engine.hpp" -#include "dynamic_vino_lib/inferences/base_inference.hpp" -#include "inference_engine.hpp" -#include "opencv2/opencv.hpp" -// namespace -namespace dynamic_vino_lib -{ -/** - * @class LicensePlateDetectionResult - * @brief Class for storing and processing license plate detection result. - */ -class LicensePlateDetectionResult : public Result -{ -public: - friend class LicensePlateDetection; - explicit LicensePlateDetectionResult(const cv::Rect & location); - std::string getLicense() const - { - return license_; - } - -private: - std::string license_ = ""; -}; -/** - * @class LicensePlateDetection - * @brief Class to load license plate detection model and perform detection. - */ -class LicensePlateDetection : public BaseInference -{ -public: - using Result = dynamic_vino_lib::LicensePlateDetectionResult; - LicensePlateDetection(); - ~LicensePlateDetection() override; - /** - * @brief Load the license plate detection model. - */ - void loadNetwork(std::shared_ptr); - /** - * @brief Enqueue a frame to this class. - * The frame will be buffered but not infered yet. - * @param[in] frame The frame to be enqueued. - * @param[in] input_frame_loc The location of the enqueued frame with respect - * to the frame generated by the input device. - * @return Whether this operation is successful. - */ - bool enqueue(const cv::Mat &, const cv::Rect &) override; - /** - * @brief Set the sequence input blob - */ - void fillSeqBlob(); - /** - * @brief Start inference for all buffered frames. - * @return Whether this operation is successful. - */ - bool submitRequest() override; - /** - * @brief This function will fetch the results of the previous inference and - * stores the results in a result buffer array. All buffered frames will be - * cleared. - * @return Whether the Inference object fetches a result this time - */ - bool fetchResults() override; - /** - * @brief Get the length of the buffer result array. - * @return The length of the buffer result array. - */ - int getResultsLength() const override; - /** - * @brief Get the location of result with respect - * to the frame generated by the input device. - * @param[in] idx The index of the result. - */ - const dynamic_vino_lib::Result * getLocationResult(int idx) const override; - /** - * @brief Show the observed detection result either through image window - or ROS topic. - */ - void observeOutput(const std::shared_ptr & output); - /** - * @brief Get the name of the Inference instance. - * @return The name of the Inference instance. - */ - const std::string getName() const override; - const std::vector getFilteredROIs( - const std::string filter_conditions) const override; - -private: - std::shared_ptr valid_model_; - std::vector results_; - const std::vector licenses_ = { - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", - "", "", "", "", - "", "", "", "", - "", "", "", "", - "", "", "", "", - "", "", "", "", - "", "", "", "", - "", "", "", "", - "", "", "", "", - "", "", - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", - "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", - "U", "V", "W", "X", "Y", "Z" - }; -}; -} // namespace dynamic_vino_lib -#endif // DYNAMIC_VINO_LIB__INFERENCES__LICENSE_PLATE_DETECTION_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/attributes/base_attribute.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/models/attributes/base_attribute.hpp deleted file mode 100644 index 061a1c2b..00000000 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/attributes/base_attribute.hpp +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright (c) 2018-2020 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * @brief A header file with declaration for ModelAttribute class. - * @file base_attribute.hpp - */ - -#ifndef DYNAMIC_VINO_LIB__MODELS__ATTRIBUTES_BASE_ATTRIBUTE_HPP_ -#define DYNAMIC_VINO_LIB__MODELS__ATTRIBUTES_BASE_ATTRIBUTE_HPP_ - -#include -#include -#include -#include - -#include "inference_engine.hpp" -#include "dynamic_vino_lib/slog.hpp" - -namespace Models -{ -/** - * @class ModelAttribute - * @brief This class represents the network given by .xml and .bin file - */ -class ModelAttribute -{ -public: - using Ptr = std::shared_ptr; - struct ModelAttr { - int max_proposal_count = 0; - int object_size = 0; - int input_height = 0; - int input_width = 0; - std::string model_name; - std::map input_names; - std::map output_names; - std::vector labels; - }; - - ModelAttribute(const std::string model_name) - { - attr_.model_name = model_name; - } - - inline bool isVerified() - { - return (attr_.max_proposal_count > 0 && attr_.object_size > 0 && attr_.input_height > 0 - && attr_.input_width > 0 && attr_.input_names.empty() && attr_.output_names.empty()); - } - inline void printAttribute() - { - slog::info << "----Attributes for Model " << attr_.model_name << "----" << slog::endl; - slog::info << "| model_name: " << attr_.model_name << slog::endl; - slog::info << "| max_proposal_count: " << attr_.max_proposal_count << slog::endl; - slog::info << "| object_size: " << attr_.object_size << slog::endl; - slog::info << "| input_height: " << attr_.input_height << slog::endl; - slog::info << "| input_width: " << attr_.input_width << slog::endl; - slog::info << "| input_names: " << slog::endl; - for (auto & item: attr_.input_names) { - slog::info << "| " << item.first << "-->" << item.second << slog::endl; - } - slog::info << "| output_names: " << slog::endl; - for (auto & item: attr_.output_names) { - slog::info << "| " << item.first << "-->" << item.second << slog::endl; - } - - if(attr_.max_proposal_count <= 0 || attr_.object_size <= 0 || attr_.input_height <= 0 - || attr_.input_width <= 0 || attr_.input_names.empty() || attr_.output_names.empty()){ - slog::info << "--------" << slog::endl; - slog::warn << "Not all attributes are set correctly! not 0 or empty is allowed in" - << " the above list." << slog::endl; - } - slog::info << "--------------------------------" << slog::endl; - } - - virtual bool updateLayerProperty( - const InferenceEngine::CNNNetwork&) - { return false; } - - inline std::string getModelName() const - { - return attr_.model_name; - } - - inline void setModelName(std::string name) - { - attr_.model_name = name; - } - - inline std::string getInputName(std::string name = "input") const - { - // std::map::iterator it; - auto it = attr_.input_names.find(name); - if(it == attr_.input_names.end()){ - slog::warn << "No input named: " << name << slog::endl; - return std::string(""); - } - - return it->second; - } - - inline std::string getOutputName(std::string name = "output") const - { - //std::map::iterator it; - auto it = attr_.output_names.find(name); - if(it == attr_.output_names.end()){ - slog::warn << "No output named: " << name << slog::endl; - return std::string(""); - } - - return it->second; - } - - inline int getMaxProposalCount() const - { - return attr_.max_proposal_count; - } - - inline int getObjectSize() const - { - return attr_.object_size; - } - - inline void loadLabelsFromFile(const std::string file_path) - { - std::ifstream input_file(file_path); - std::copy(std::istream_iterator(input_file), - std::istream_iterator(), - std::back_inserter(attr_.labels)); - } - - inline std::vector& getLabels() - { - return attr_.labels; - } - - inline void addInputInfo(std::string key, std::string value) - { - attr_.input_names[key] = value; - } - - inline void addOutputInfo(std::string key, std::string value) - { - attr_.output_names[key] = value; - } - - inline void setInputHeight(const int height) - { - attr_.input_height = height; - } - - inline void setInputWidth(const int width) - { - attr_.input_width = width; - } - - inline void setMaxProposalCount(const int max) - { - attr_.max_proposal_count = max; - } - - inline void setObjectSize(const int size) - { - attr_.object_size = size; - } - -protected: - ModelAttr attr_; - -}; - -class SSDModelAttr : public ModelAttribute -{ -public: - explicit SSDModelAttr(const std::string model_name = "SSDNet-like"); - - bool updateLayerProperty( - const InferenceEngine::CNNNetwork&); - -}; - - - -} // namespace Models - -#endif // DYNAMIC_VINO_LIB__MODELS__ATTRIBUTES_BASE_ATTRIBUTE_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/base_model.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/models/base_model.hpp deleted file mode 100644 index b3e19a52..00000000 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/base_model.hpp +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright (c) 2018 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * @brief A header file with declaration for BaseModel Class - * @file base_model.h - */ - -#ifndef DYNAMIC_VINO_LIB__MODELS__BASE_MODEL_HPP_ -#define DYNAMIC_VINO_LIB__MODELS__BASE_MODEL_HPP_ - -#include - -#include -#include -#include -#include -#include -#include - -#include "inference_engine.hpp" -#include "dynamic_vino_lib/slog.hpp" -#include "dynamic_vino_lib/models/attributes/base_attribute.hpp" - -namespace Engines -{ - class Engine; -} - -namespace dynamic_vino_lib -{ - class ObjectDetectionResult; -} - -namespace Models -{ - /** - * @class BaseModel - * @brief This class represents the network given by .xml and .bin file - */ - class BaseModel : public ModelAttribute - { - public: - using Ptr = std::shared_ptr; - /** - * @brief Initialize the class with given .xml, .bin and .labels file. It will - * also check whether the number of input and output are fit. - * @param[in] model_loc The location of model' s .xml file - * (model' s bin file should be the same as .xml file except for extension) - * @param[in] input_num The number of input the network should have. - * @param[in] output_num The number of output the network should have. - * @param[in] batch_size The number of batch size (default: 1) the network should have. - * @return Whether the input device is successfully turned on. - */ - BaseModel(const std::string& label_loc, const std::string& model_loc, int batch_size = 1); - - /** - * @brief Get the maximum batch size of the model. - * @return The maximum batch size of the model. - */ - inline int getMaxBatchSize() const - { - return max_batch_size_; - } - inline void setMaxBatchSize(int max_batch_size) - { - max_batch_size_ = max_batch_size; - } - - virtual bool enqueue( - const std::shared_ptr &engine, - const cv::Mat &frame, - const cv::Rect &input_frame_loc) { return true; } - /** - * @brief Initialize the model. During the process the class will check - * the network input, output size, check layer property and - * set layer property. - */ - void modelInit(); - /** - * @brief Get the name of the model. - * @return The name of the model. - */ - virtual const std::string getModelCategory() const = 0; - inline ModelAttr getAttribute() { return attr_; } - - inline InferenceEngine::CNNNetwork getNetReader() const - { - return net_reader_; - } - - protected: - /** - * New infterface to check and update Layer Property - * @brief Set the layer property (layer layout, layer precision, etc.). - * @param[in] network_reader The reader of the network to be set. - */ - virtual bool updateLayerProperty(InferenceEngine::CNNNetwork& network_reader) = 0; - - ///InferenceEngine::CNNNetReader::Ptr net_reader_; - InferenceEngine::Core engine; - InferenceEngine::CNNNetwork net_reader_; // = engine.ReadNetwork(model->getModelFileName()); - void setFrameSize(const int &w, const int &h) - { - frame_size_.width = w; - frame_size_.height = h; - } - cv::Size getFrameSize() - { - return frame_size_; - } - - private: - int max_batch_size_; - std::string model_loc_; - std::string label_loc_; - cv::Size frame_size_; - }; - - class ObjectDetectionModel : public BaseModel - { - public: - ObjectDetectionModel(const std::string& label_loc, const std::string& model_loc, int batch_size = 1); - virtual bool fetchResults( - const std::shared_ptr &engine, - std::vector &result, - const float &confidence_thresh = 0.3, - const bool &enable_roi_constraint = false) = 0; - virtual bool matToBlob( - const cv::Mat &orig_image, const cv::Rect &, float scale_factor, - int batch_index, const std::shared_ptr &engine) = 0; - }; - -} // namespace Models - -#endif // DYNAMIC_VINO_LIB__MODELS__BASE_MODEL_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_yolov2_model.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_yolov2_model.hpp deleted file mode 100644 index efbe17e9..00000000 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_yolov2_model.hpp +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) 2018 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -/** - * @brief A header file with declaration for ObjectDetectionModel Class - * @file face_detection_model.h - */ -#ifndef DYNAMIC_VINO_LIB__MODELS__OBJECT_DETECTION_YOLOV2_MODEL_HPP_ -#define DYNAMIC_VINO_LIB__MODELS__OBJECT_DETECTION_YOLOV2_MODEL_HPP_ -#include -#include -#include -#include "dynamic_vino_lib/models/base_model.hpp" -namespace Models -{ -/** - * @class ObjectDetectionModel - * @brief This class generates the face detection model. - */ -class ObjectDetectionYolov2Model : public ObjectDetectionModel -{ - using Result = dynamic_vino_lib::ObjectDetectionResult; - -public: - ObjectDetectionYolov2Model(const std::string& label_loc, const std::string & model_loc, int batch_size = 1); - - bool fetchResults( - const std::shared_ptr & engine, - std::vector & results, - const float & confidence_thresh = 0.3, - const bool & enable_roi_constraint = false) override; - - bool enqueue( - const std::shared_ptr & engine, - const cv::Mat & frame, - const cv::Rect & input_frame_loc) override; - - bool matToBlob( - const cv::Mat & orig_image, const cv::Rect &, float scale_factor, - int batch_index, const std::shared_ptr & engine) override; - - /** - * @brief Get the name of this detection model. - * @return Name of the model. - */ - const std::string getModelCategory() const override; - bool updateLayerProperty(InferenceEngine::CNNNetwork&) override; - -protected: - int getEntryIndex(int side, int lcoords, int lclasses, int location, int entry); - InferenceEngine::InputInfo::Ptr input_info_ = nullptr; -}; -} // namespace Models -#endif // DYNAMIC_VINO_LIB__MODELS__OBJECT_DETECTION_YOLOV2_MODEL_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.hpp deleted file mode 100644 index 7d25944c..00000000 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.hpp +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright (c) 2018 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * @brief A header file with declaration for HeadPoseDetectionModel Class - * @file head_pose_detection_model.h - */ - -#ifndef DYNAMIC_VINO_LIB__OUTPUTS__BASE_OUTPUT_HPP_ -#define DYNAMIC_VINO_LIB__OUTPUTS__BASE_OUTPUT_HPP_ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "dynamic_vino_lib/inferences/age_gender_detection.hpp" -#include "dynamic_vino_lib/inferences/base_inference.hpp" -#include "dynamic_vino_lib/inferences/emotions_detection.hpp" -#include "dynamic_vino_lib/inferences/face_detection.hpp" -#include "dynamic_vino_lib/inferences/head_pose_detection.hpp" -#include "dynamic_vino_lib/inferences/object_detection.hpp" -#include "dynamic_vino_lib/inferences/object_segmentation.hpp" -#include "dynamic_vino_lib/inferences/person_reidentification.hpp" -#include "dynamic_vino_lib/inferences/person_attribs_detection.hpp" -#include "dynamic_vino_lib/inferences/landmarks_detection.hpp" -#include "dynamic_vino_lib/inferences/face_reidentification.hpp" -#include "dynamic_vino_lib/inferences/vehicle_attribs_detection.hpp" -#include "dynamic_vino_lib/inferences/license_plate_detection.hpp" -#include "opencv2/opencv.hpp" - -class Pipeline; -namespace Outputs -{ -/** - * @class BaseOutput - * @brief This class is a base class for various output devices. It employs - * visitor pattern to perform different operations to different inference - * result with different output device - */ -class BaseOutput -{ -public: - explicit BaseOutput(std::string output_name) - : output_name_(output_name) {} - /** - * @brief Generate output content according to the license plate detection result. - */ - virtual void accept(const std::vector &) - { - } - /** - * @brief Generate output content according to the vehicle attributes detection result. - */ - virtual void accept(const std::vector &) - { - } - /** - * @brief Generate output content according to the face reidentification result. - */ - virtual void accept(const std::vector &) - { - } - /** - * @brief Generate output content according to the landmarks detection result. - */ - virtual void accept(const std::vector &) - { - } - /** - * @brief Generate output content according to the person reidentification result. - */ - virtual void accept(const std::vector &) - { - } - /** - * @brief Generate output content according to the person reidentification result. - */ - virtual void accept(const std::vector &) - { - } - /** - * @brief Generate output content according to the object segmentation result. - */ - virtual void accept(const std::vector &) - { - } - /** - * @brief Generate output content according to the object detection result. - */ - virtual void accept(const std::vector &) - { - } - /** - * @brief Generate output content according to the face detection result. - */ - virtual void accept(const std::vector &) - { - } - /** - * @brief Generate output content according to the emotion detection result. - */ - virtual void accept(const std::vector &) - { - } - /** - * @brief Generate output content according to the age and gender detection - * result. - */ - virtual void accept(const std::vector &) - { - } - /** - * @brief Generate output content according to the headpose detection result. - */ - virtual void accept(const std::vector &) - { - } - /** - * @brief Calculate the camera matrix of a frame for image window output, no - implementation for ros topic output. - */ - virtual void feedFrame(const cv::Mat &) - { - } - /** - * @brief Show all the contents generated by the accept functions. - */ - virtual void handleOutput() = 0; - - void setPipeline(Pipeline * const pipeline); - virtual void setServiceResponse( - std::shared_ptr response) {} - virtual void setServiceResponseForFace( - std::shared_ptr response) {} - virtual void setServiceResponse( - std::shared_ptr response) {} - virtual void setServiceResponse( - std::shared_ptr response) {} - virtual void setServiceResponse( - std::shared_ptr response) {} - virtual void setServiceResponse( - std::shared_ptr response) {} - Pipeline * getPipeline() const; - cv::Mat getFrame() const; - virtual void clearData() {} - -protected: - cv::Mat frame_; - Pipeline * pipeline_; - std::string output_name_; -}; -} // namespace Outputs -#endif // DYNAMIC_VINO_LIB__OUTPUTS__BASE_OUTPUT_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_topic_output.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_topic_output.hpp deleted file mode 100644 index c102e44e..00000000 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_topic_output.hpp +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright (c) 2018 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * @brief A header file with declaration for RosTopicOutput Class - * @file ros_topic_output.hpp - */ - -#ifndef DYNAMIC_VINO_LIB__OUTPUTS__ROS_TOPIC_OUTPUT_HPP_ -#define DYNAMIC_VINO_LIB__OUTPUTS__ROS_TOPIC_OUTPUT_HPP_ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "dynamic_vino_lib/inferences/face_detection.hpp" -#include "dynamic_vino_lib/outputs/base_output.hpp" - -namespace Outputs -{ -/** - * @class RosTopicOutput - * @brief This class handles and publish the detection result with ros topic. - */ -class RosTopicOutput : public BaseOutput -{ -public: - explicit RosTopicOutput(std::string output_name_, - const rclcpp::Node::SharedPtr node=nullptr); - /** - * @brief Calculate the camera matrix of a frame. - * @param[in] A frame. - */ - void feedFrame(const cv::Mat &) override; - /** - * @brief Publish all the detected infomations generated by the accept - * functions with ros topic. - */ - void handleOutput() override; - /** - * @brief Generate ros topic infomation according to - * the license plate detection result. - * @param[in] results a bundle of license plate detection results. - */ - void accept(const std::vector &) override; - /** - * @brief Generate ros topic infomation according to - * the vehicle attributes detection result. - * @param[in] results a bundle of vehicle attributes detection results. - */ - void accept(const std::vector &) override; - /** - * @brief Generate ros topic infomation according to - * the face reidentification result. - * @param[in] results a bundle of face reidentification results. - */ - void accept(const std::vector &) override; - /** - * @brief Generate ros topic infomation according to - * the landmarks detection result. - * @param[in] results a bundle of landmarks detection results. - */ - void accept(const std::vector &) override; - /** - * @brief Generate ros topic infomation according to - * the person attributes detection result. - * @param[in] results a bundle of person attributes detection results. - */ - void accept(const std::vector &) override; - /** - * @brief Generate ros topic infomation according to - * the person reidentification result. - * @param[in] results a bundle of person reidentification results. - */ - void accept(const std::vector &) override; - /** - * @brief Generate ros topic infomation according to - * the object segmentation result. - * @param[in] results a bundle of object segmentation results. - */ - void accept(const std::vector &) override; - /** - * @brief Generate ros topic infomation according to - * the object detection result. - * @param[in] results a bundle of object detection results. - */ - void accept(const std::vector &) override; - /** - * @brief Generate ros topic infomation according to - * the face detection result. - * @param[in] An face detection result objetc. - */ - void accept(const std::vector &) override; - /** - * @brief Generate ros topic infomation according to - * the emotion detection result. - * @param[in] An emotion detection result objetc. - */ - void accept(const std::vector &) override; - /** - * @brief Generate ros topic infomation according to - * the age gender detection result. - * @param[in] An age gender detection result objetc. - */ - void accept(const std::vector &) override; - /** - * @brief Generate ros topic infomation according to - * the headpose detection result. - * @param[in] An head pose detection result objetc. - */ - void accept(const std::vector &) override; - -protected: - const std::string topic_name_; - std::shared_ptr node_; - rclcpp::Publisher::SharedPtr pub_license_plate_; - std::shared_ptr license_plate_topic_; - rclcpp::Publisher::SharedPtr pub_vehicle_attribs_; - std::shared_ptr vehicle_attribs_topic_; - rclcpp::Publisher::SharedPtr pub_landmarks_; - std::shared_ptr landmarks_topic_; - rclcpp::Publisher::SharedPtr pub_face_reid_; - std::shared_ptr face_reid_topic_; - rclcpp::Publisher::SharedPtr pub_person_attribs_; - std::shared_ptr person_attribs_topic_; - rclcpp::Publisher::SharedPtr pub_person_reid_; - std::shared_ptr person_reid_topic_; - rclcpp::Publisher::SharedPtr pub_segmented_object_; - std::shared_ptr segmented_objects_topic_; - rclcpp::Publisher::SharedPtr pub_detected_object_; - std::shared_ptr detected_objects_topic_; - rclcpp::Publisher::SharedPtr pub_face_; - std::shared_ptr faces_topic_; - rclcpp::Publisher::SharedPtr pub_emotion_; - std::shared_ptr emotions_topic_; - rclcpp::Publisher::SharedPtr pub_age_gender_; - std::shared_ptr age_gender_topic_; - rclcpp::Publisher::SharedPtr pub_headpose_; - std::shared_ptr headpose_topic_; -}; -} // namespace Outputs -#endif // DYNAMIC_VINO_LIB__OUTPUTS__ROS_TOPIC_OUTPUT_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/slog.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/slog.hpp deleted file mode 100644 index e9790327..00000000 --- a/dynamic_vino_lib/include/dynamic_vino_lib/slog.hpp +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright (c) 2018 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * @brief a header file with logging facility for common samples - * @file slog.hpp - */ -#ifndef DYNAMIC_VINO_LIB__SLOG_HPP_ -#define DYNAMIC_VINO_LIB__SLOG_HPP_ - -#pragma once - -#include -#include - -namespace slog -{ -#if 1 - enum COLOR { - RESET = 0, - BLUE = 1, - GREEN = 2, - YELLOW = 3, - RED = 4, - }; - -#else -//the following are UBUNTU/LINUX ONLY terminal color codes. -#define RESET "\033[0m" -#define BLACK "\033[30m" /* Black */ -#define RED "\033[31m" /* Red */ -#define GREEN "\033[32m" /* Green */ -#define YELLOW "\033[33m" /* Yellow */ -#define BLUE "\033[34m" /* Blue */ -#define MAGENTA "\033[35m" /* Magenta */ -#define CYAN "\033[36m" /* Cyan */ -#define WHITE "\033[37m" /* White */ -#define BOLDBLACK "\033[1m\033[30m" /* Bold Black */ -#define BOLDRED "\033[1m\033[31m" /* Bold Red */ -#define BOLDGREEN "\033[1m\033[32m" /* Bold Green */ -#define BOLDYELLOW "\033[1m\033[33m" /* Bold Yellow */ -#define BOLDBLUE "\033[1m\033[34m" /* Bold Blue */ -#define BOLDMAGENTA "\033[1m\033[35m" /* Bold Magenta */ -#define BOLDCYAN "\033[1m\033[36m" /* Bold Cyan */ -#define BOLDWHITE "\033[1m\033[37m" /* Bold White */ -#endif - -/** - * @class LogStreamEndLine - * @brief The LogStreamEndLine class implements an end line marker for a log - * stream - */ -class LogStreamEndLine -{ -}; - -static constexpr LogStreamEndLine endl; - -/** - * @class LogStream - * @brief The LogStream class implements a stream for sample logging - */ -class LogStream -{ - std::string _prefix; - std::ostream * _log_stream; - bool _new_line; - int _color_id; - -public: - /** - * @brief A constructor. Creates an LogStream object - * @param prefix The prefix to print - */ - LogStream(const std::string & prefix, std::ostream & log_stream, - const int color_id = -1) - : _prefix(prefix), _new_line(true), _color_id(color_id) - { - _log_stream = &log_stream; - } - - /** - * @brief A stream output operator to be used within the logger - * @param arg Object for serialization in the logger message - */ - template - LogStream & operator<<(const T & arg) - { - if (_new_line) { - setLineColor(); - (*_log_stream) << "[ " << _prefix << " ] "; - _new_line = false; - } - - (*_log_stream) << arg; - return *this; - } - - // Specializing for LogStreamEndLine to support slog::endl - LogStream & operator<<(const LogStreamEndLine & arg) - { - _new_line = true; - resetLineColor(); - (*_log_stream) << std::endl; - return *this; - } - - void setLineColor() - { - switch(_color_id){ - case BLUE: - (*_log_stream) << "\033[34m"; - break; - case GREEN: - (*_log_stream) << "\033[32m"; - break; - case YELLOW: - (*_log_stream) << "\033[33m"; - break; - case RED: - (*_log_stream) << "\033[31m"; - break; - default: - break; - } - } - - void resetLineColor() - { - if(_color_id > 0){ - (*_log_stream) << "\033[0m"; //RESET - } - } -}; - -class NullStream -{ -public: - NullStream(){} - - NullStream(const std::string & prefix, std::ostream & log_stream) - { - (void)prefix; - (void)log_stream; - } - - template - NullStream & operator<<(const T & arg) - { - return *this; - } -}; - -#ifdef LOG_LEVEL_DEBUG - static LogStream debug("DEBUG", std::cout, GREEN); -#else - static NullStream debug; -#endif -static LogStream info("INFO", std::cout, BLUE); -static LogStream warn("WARNING", std::cout, YELLOW); -static LogStream err("ERROR", std::cerr, RED); - -} // namespace slog -#endif // DYNAMIC_VINO_LIB__SLOG_HPP_ diff --git a/dynamic_vino_lib/src/engines/engine_manager.cpp b/dynamic_vino_lib/src/engines/engine_manager.cpp deleted file mode 100644 index ed0e3efb..00000000 --- a/dynamic_vino_lib/src/engines/engine_manager.cpp +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright (c) 2018-2019 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * @brief a header file with definition of Engine class - * @file engine.cpp - */ -#include "dynamic_vino_lib/engines/engine_manager.hpp" -#include "dynamic_vino_lib/engines/engine.hpp" -#include "dynamic_vino_lib/slog.hpp" -#include "dynamic_vino_lib/models/base_model.hpp" -#include "dynamic_vino_lib/utils/version_info.hpp" -#include -#include -#if(defined(USE_OLD_E_PLUGIN_API)) -#include -#endif - -std::shared_ptr Engines::EngineManager::createEngine( - const std::string & device, const std::shared_ptr & model) -{ -#if(defined(USE_OLD_E_PLUGIN_API)) - return createEngine_beforeV2019R2(device, model); -#else - return createEngine_V2019R2_plus(device, model); -#endif -} - -std::shared_ptr Engines::EngineManager::createEngine_V2019R2_plus( - const std::string & device, const std::shared_ptr & model) -{ - InferenceEngine::Core core; - auto executable_network = core.LoadNetwork(model->getNetReader(), device); - auto request = executable_network.CreateInferRequestPtr(); - - return std::make_shared(request); -} - -#if(defined(USE_OLD_E_PLUGIN_API)) -std::shared_ptr Engines::EngineManager::createEngine_beforeV2019R2( - const std::string & device, const std::shared_ptr & model) -{ - if(plugins_for_devices_.find(device) == plugins_for_devices_.end()) { - auto pcommon = Params::ParamManager::getInstance().getCommon(); - plugins_for_devices_[device] = *makePluginByName(device, pcommon.custom_cpu_library, - pcommon.custom_cldnn_library, pcommon.enable_performance_count); - slog::info << "Created plugin for " << device << slog::endl; - } - - auto executeable_network = - plugins_for_devices_[device].LoadNetwork(model->getNetReader()->getNetwork(), {}); - auto request = executeable_network.CreateInferRequestPtr(); - - return std::make_shared(request); -} - -std::unique_ptr -Engines::EngineManager::makePluginByName( - const std::string & device_name, const std::string & custom_cpu_library_message, - const std::string & custom_cldnn_message, bool performance_message) -{ - slog::info << "Creating plugin for " << device_name << slog::endl; - - InferenceEngine::InferencePlugin plugin = - InferenceEngine::PluginDispatcher({"../../../lib/intel64", ""}) - .getPluginByDevice(device_name); - - /** Printing plugin version **/ - printPluginVersion(plugin, std::cout); - - /** Load extensions for the CPU plugin **/ - if ((device_name.find("CPU") != std::string::npos)) { - plugin.AddExtension(std::make_shared()); - if (!custom_cpu_library_message.empty()) { - slog::info << "custom cpu library is not empty, tyring to use this extension:" - << custom_cpu_library_message << slog::endl; - // CPU(MKLDNN) extensions are loaded as a shared library and passed as a - // pointer to base - // extension - auto extension_ptr = - InferenceEngine::make_so_pointer(custom_cpu_library_message); - plugin.AddExtension(extension_ptr); - } - } else if (!custom_cldnn_message.empty()) { - slog::info << "custom cldnn library is not empty, tyring to use this extension:" - << custom_cldnn_message << slog::endl; - // Load Extensions for other plugins not CPU - plugin.SetConfig( - {{InferenceEngine::PluginConfigParams::KEY_CONFIG_FILE, custom_cldnn_message}}); - } - if (performance_message) { - plugin.SetConfig({{InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, - InferenceEngine::PluginConfigParams::YES}}); - } - - return std::make_unique( - InferenceEngine::InferenceEnginePluginPtr(plugin)); -} -#endif diff --git a/dynamic_vino_lib/src/inferences/age_gender_detection.cpp b/dynamic_vino_lib/src/inferences/age_gender_detection.cpp deleted file mode 100644 index 6cb2bc71..00000000 --- a/dynamic_vino_lib/src/inferences/age_gender_detection.cpp +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright (c) 2018 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * @brief a header file with declaration of AgeGenderResult class - * @file age_gender_detection.cpp - */ - -#include -#include -#include -#include "dynamic_vino_lib/inferences/age_gender_detection.hpp" -#include "dynamic_vino_lib/outputs/base_output.hpp" - -// AgeGenderResult -dynamic_vino_lib::AgeGenderResult::AgeGenderResult(const cv::Rect & location) -: Result(location) -{ -} - -// AgeGender Detection -dynamic_vino_lib::AgeGenderDetection::AgeGenderDetection() -: dynamic_vino_lib::BaseInference() -{ -} - -dynamic_vino_lib::AgeGenderDetection::~AgeGenderDetection() = default; - -void dynamic_vino_lib::AgeGenderDetection::loadNetwork( - std::shared_ptr network) -{ - valid_model_ = network; - setMaxBatchSize(network->getMaxBatchSize()); -} - -bool dynamic_vino_lib::AgeGenderDetection::enqueue( - const cv::Mat & frame, - const cv::Rect & input_frame_loc) -{ - if (getEnqueuedNum() == 0) { - results_.clear(); - } - bool succeed = dynamic_vino_lib::BaseInference::enqueue( - frame, input_frame_loc, 1, getResultsLength(), valid_model_->getInputName()); - if (!succeed) { - return false; - } - Result r(input_frame_loc); - results_.emplace_back(r); - return true; -} - -bool dynamic_vino_lib::AgeGenderDetection::submitRequest() -{ - return dynamic_vino_lib::BaseInference::submitRequest(); -} - -bool dynamic_vino_lib::AgeGenderDetection::fetchResults() -{ - bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); - if (!can_fetch) { - return false; - } - auto request = getEngine()->getRequest(); - InferenceEngine::Blob::Ptr genderBlob = request->GetBlob(valid_model_->getOutputGenderName()); - InferenceEngine::Blob::Ptr ageBlob = request->GetBlob(valid_model_->getOutputAgeName()); - - for (int i = 0; i < results_.size(); ++i) { - results_[i].age_ = ageBlob->buffer().as()[i] * 100; - results_[i].male_prob_ = genderBlob->buffer().as()[i * 2 + 1]; - } - return true; -} - -int dynamic_vino_lib::AgeGenderDetection::getResultsLength() const -{ - return static_cast(results_.size()); -} - -const dynamic_vino_lib::Result * -dynamic_vino_lib::AgeGenderDetection::getLocationResult(int idx) const -{ - return &(results_[idx]); -} - -const std::string dynamic_vino_lib::AgeGenderDetection::getName() const -{ - return valid_model_->getModelCategory(); -} - -void dynamic_vino_lib::AgeGenderDetection::observeOutput( - const std::shared_ptr & output) -{ - if (output != nullptr) { - output->accept(results_); - } -} - -const std::vector dynamic_vino_lib::AgeGenderDetection::getFilteredROIs( - const std::string filter_conditions) const -{ - if (!filter_conditions.empty()) { - slog::err << "Age gender detection does not support filtering now! " << - "Filter conditions: " << filter_conditions << slog::endl; - } - std::vector filtered_rois; - for (auto res : results_) { - filtered_rois.push_back(res.getLocation()); - } - return filtered_rois; -} diff --git a/dynamic_vino_lib/src/inferences/emotions_detection.cpp b/dynamic_vino_lib/src/inferences/emotions_detection.cpp deleted file mode 100644 index ab3313fc..00000000 --- a/dynamic_vino_lib/src/inferences/emotions_detection.cpp +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (c) 2018 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * @brief a header file with declaration of EmotionsDetection class and - * EmotionsResult class - * @file emotions_recognition.cpp - */ - -#include -#include -#include -#include "dynamic_vino_lib/inferences/emotions_detection.hpp" -#include "dynamic_vino_lib/outputs/base_output.hpp" -#include "dynamic_vino_lib/slog.hpp" - -// EmotionsResult -dynamic_vino_lib::EmotionsResult::EmotionsResult(const cv::Rect & location) -: Result(location) -{ -} - -// Emotions Detection -dynamic_vino_lib::EmotionsDetection::EmotionsDetection() -: dynamic_vino_lib::BaseInference() -{ -} - -dynamic_vino_lib::EmotionsDetection::~EmotionsDetection() = default; - -void dynamic_vino_lib::EmotionsDetection::loadNetwork( - const std::shared_ptr network) -{ - valid_model_ = network; - setMaxBatchSize(network->getMaxBatchSize()); -} - -bool dynamic_vino_lib::EmotionsDetection::enqueue( - const cv::Mat & frame, - const cv::Rect & input_frame_loc) -{ - if (getEnqueuedNum() == 0) { - results_.clear(); - } - bool succeed = dynamic_vino_lib::BaseInference::enqueue( - frame, input_frame_loc, 1, getResultsLength(), valid_model_->getInputName()); - if (!succeed) { - slog::err << "Failed enqueue Emotion frame." << slog::endl; - // TODO(weizhi): throw an error here - return false; - } - Result r(input_frame_loc); - results_.emplace_back(r); - return true; -} - -bool dynamic_vino_lib::EmotionsDetection::submitRequest() -{ - return dynamic_vino_lib::BaseInference::submitRequest(); -} - -bool dynamic_vino_lib::EmotionsDetection::fetchResults() -{ - bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); - if (!can_fetch) { - return false; - } - int label_length = static_cast(valid_model_->getLabels().size()); - std::string output_name = valid_model_->getOutputName(); - InferenceEngine::Blob::Ptr emotions_blob = getEngine()->getRequest()->GetBlob(output_name); - /** emotions vector must have the same size as number of channels - in model output. Default output format is NCHW so we check index 1 */ - - int64 num_of_channels = emotions_blob->getTensorDesc().getDims().at(1); - if (num_of_channels != label_length) { - slog::err << "Output size (" << num_of_channels << - ") of the Emotions Recognition network is not equal " << - "to used emotions vector size (" << label_length << ")" << slog::endl; - throw std::logic_error("Output size (" + std::to_string(num_of_channels) + - ") of the Emotions Recognition network is not equal " - "to used emotions vector size (" + - std::to_string(label_length) + ")"); - } - - /** we identify an index of the most probable emotion in output array - for idx image to return appropriate emotion name */ - auto emotions_values = emotions_blob->buffer().as(); - for (int idx = 0; idx < results_.size(); ++idx) { - auto output_idx_pos = emotions_values + label_length * idx; - int64 max_prob_emotion_idx = - std::max_element(output_idx_pos, output_idx_pos + label_length) - output_idx_pos; - results_[idx].label_ = valid_model_->getLabels()[max_prob_emotion_idx]; - } - - return true; -} - -int dynamic_vino_lib::EmotionsDetection::getResultsLength() const -{ - return static_cast(results_.size()); -} - -const dynamic_vino_lib::Result * -dynamic_vino_lib::EmotionsDetection::getLocationResult(int idx) const -{ - return &(results_[idx]); -} - -const std::string dynamic_vino_lib::EmotionsDetection::getName() const -{ - return valid_model_->getModelCategory(); -} - -void dynamic_vino_lib::EmotionsDetection::observeOutput( - const std::shared_ptr & output) -{ - if (output != nullptr) { - output->accept(results_); - } -} - -const std::vector dynamic_vino_lib::EmotionsDetection::getFilteredROIs( - const std::string filter_conditions) const -{ - if (!filter_conditions.empty()) { - slog::err << "Emotion detection does not support filtering now! " << - "Filter conditions: " << filter_conditions << slog::endl; - } - std::vector filtered_rois; - for (auto res : results_) { - filtered_rois.push_back(res.getLocation()); - } - return filtered_rois; -} diff --git a/dynamic_vino_lib/src/inferences/face_reidentification.cpp b/dynamic_vino_lib/src/inferences/face_reidentification.cpp deleted file mode 100644 index d50d317f..00000000 --- a/dynamic_vino_lib/src/inferences/face_reidentification.cpp +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright (c) 2018 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * @brief a header file with declaration of FaceReidentification class and - * FaceReidentificationResult class - * @file face_reidentification.cpp - */ -#include -#include -#include -#include "dynamic_vino_lib/inferences/face_reidentification.hpp" -#include "dynamic_vino_lib/outputs/base_output.hpp" -#include "dynamic_vino_lib/slog.hpp" - -// FaceReidentificationResult -dynamic_vino_lib::FaceReidentificationResult::FaceReidentificationResult( - const cv::Rect & location) -: Result(location) {} - -// FaceReidentification -dynamic_vino_lib::FaceReidentification::FaceReidentification(double match_thresh) -: dynamic_vino_lib::BaseInference() -{ - face_tracker_ = std::make_shared(1000, match_thresh, 0.3); -} - -dynamic_vino_lib::FaceReidentification::~FaceReidentification() = default; -void dynamic_vino_lib::FaceReidentification::loadNetwork( - const std::shared_ptr network) -{ - valid_model_ = network; - setMaxBatchSize(network->getMaxBatchSize()); -} - -bool dynamic_vino_lib::FaceReidentification::enqueue( - const cv::Mat & frame, const cv::Rect & input_frame_loc) -{ - if (getEnqueuedNum() == 0) { - results_.clear(); - } - if (!dynamic_vino_lib::BaseInference::enqueue( - frame, input_frame_loc, 1, 0, valid_model_->getInputName())) - { - return false; - } - Result r(input_frame_loc); - results_.emplace_back(r); - return true; -} - -bool dynamic_vino_lib::FaceReidentification::submitRequest() -{ - return dynamic_vino_lib::BaseInference::submitRequest(); -} - -bool dynamic_vino_lib::FaceReidentification::fetchResults() -{ - bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); - if (!can_fetch) {return false;} - bool found_result = false; - InferenceEngine::InferRequest::Ptr request = getEngine()->getRequest(); - std::string output = valid_model_->getOutputName(); - const float * output_values = request->GetBlob(output)->buffer().as(); - int result_length = request->GetBlob(output)->getTensorDesc().getDims()[1]; - for (int i = 0; i < getResultsLength(); i++) { - std::vector new_face = std::vector( - output_values + result_length * i, output_values + result_length * (i + 1)); - std::string face_id = "No." + std::to_string(face_tracker_->processNewTrack(new_face)); - results_[i].face_id_ = face_id; - found_result = true; - } - if (!found_result) {results_.clear();} - return true; -} - -int dynamic_vino_lib::FaceReidentification::getResultsLength() const -{ - return static_cast(results_.size()); -} - -const dynamic_vino_lib::Result * -dynamic_vino_lib::FaceReidentification::getLocationResult(int idx) const -{ - return &(results_[idx]); -} - -const std::string dynamic_vino_lib::FaceReidentification::getName() const -{ - return valid_model_->getModelCategory(); -} - -void dynamic_vino_lib::FaceReidentification::observeOutput( - const std::shared_ptr & output) -{ - if (output != nullptr) { - output->accept(results_); - } -} - -const std::vector dynamic_vino_lib::FaceReidentification::getFilteredROIs( - const std::string filter_conditions) const -{ - if (!filter_conditions.empty()) { - slog::err << "Face reidentification does not support filtering now! " << - "Filter conditions: " << filter_conditions << slog::endl; - } - std::vector filtered_rois; - for (auto res : results_) { - filtered_rois.push_back(res.getLocation()); - } - return filtered_rois; -} diff --git a/dynamic_vino_lib/src/inferences/head_pose_detection.cpp b/dynamic_vino_lib/src/inferences/head_pose_detection.cpp deleted file mode 100644 index 0c5b3170..00000000 --- a/dynamic_vino_lib/src/inferences/head_pose_detection.cpp +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright (c) 2018 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * @brief a header file with declaration of HeadPoseDetection class and - * HeadPoseResult class - * @file head_pose_recognition.cpp - */ - -#include -#include -#include -#include "dynamic_vino_lib/inferences/head_pose_detection.hpp" -#include "dynamic_vino_lib/outputs/base_output.hpp" - -// HeadPoseResult -dynamic_vino_lib::HeadPoseResult::HeadPoseResult(const cv::Rect & location) -: Result(location) -{ -} - -// Head Pose Detection -dynamic_vino_lib::HeadPoseDetection::HeadPoseDetection() -: dynamic_vino_lib::BaseInference() -{ -} - -dynamic_vino_lib::HeadPoseDetection::~HeadPoseDetection() = default; - -void dynamic_vino_lib::HeadPoseDetection::loadNetwork( - std::shared_ptr network) -{ - valid_model_ = network; - setMaxBatchSize(network->getMaxBatchSize()); -} - -bool dynamic_vino_lib::HeadPoseDetection::enqueue( - const cv::Mat & frame, - const cv::Rect & input_frame_loc) -{ - if (getEnqueuedNum() == 0) { - results_.clear(); - } - bool succeed = dynamic_vino_lib::BaseInference::enqueue( - frame, input_frame_loc, 1, getResultsLength(), valid_model_->getInputName()); - if (!succeed) { - return false; - } - Result r(input_frame_loc); - results_.emplace_back(r); - return true; -} - -bool dynamic_vino_lib::HeadPoseDetection::submitRequest() -{ - return dynamic_vino_lib::BaseInference::submitRequest(); -} - -bool dynamic_vino_lib::HeadPoseDetection::fetchResults() -{ - bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); - if (!can_fetch) { - return false; - } - auto request = getEngine()->getRequest(); - InferenceEngine::Blob::Ptr angle_r = request->GetBlob(valid_model_->getOutputOutputAngleR()); - InferenceEngine::Blob::Ptr angle_p = request->GetBlob(valid_model_->getOutputOutputAngleP()); - InferenceEngine::Blob::Ptr angle_y = request->GetBlob(valid_model_->getOutputOutputAngleY()); - - for (int i = 0; i < getResultsLength(); ++i) { - results_[i].angle_r_ = angle_r->buffer().as()[i]; - results_[i].angle_p_ = angle_p->buffer().as()[i]; - results_[i].angle_y_ = angle_y->buffer().as()[i]; - } - return true; -} - -int dynamic_vino_lib::HeadPoseDetection::getResultsLength() const -{ - return static_cast(results_.size()); -} - -const dynamic_vino_lib::Result * -dynamic_vino_lib::HeadPoseDetection::getLocationResult(int idx) const -{ - return &(results_[idx]); -} - -const std::string dynamic_vino_lib::HeadPoseDetection::getName() const -{ - return valid_model_->getModelCategory(); -} - -void dynamic_vino_lib::HeadPoseDetection::observeOutput( - const std::shared_ptr & output) -{ - if (output != nullptr) { - output->accept(results_); - } -} - -const std::vector dynamic_vino_lib::HeadPoseDetection::getFilteredROIs( - const std::string filter_conditions) const -{ - if (!filter_conditions.empty()) { - slog::err << "Headpose detection does not support filtering now! " << - "Filter conditions: " << filter_conditions << slog::endl; - } - std::vector filtered_rois; - for (auto res : results_) { - filtered_rois.push_back(res.getLocation()); - } - return filtered_rois; -} diff --git a/dynamic_vino_lib/src/inferences/license_plate_detection.cpp b/dynamic_vino_lib/src/inferences/license_plate_detection.cpp deleted file mode 100644 index bb42f285..00000000 --- a/dynamic_vino_lib/src/inferences/license_plate_detection.cpp +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright (c) 2018 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * @brief a realization file with declaration of LicensePlateDetection class and - * LicensePlateDetectionResult class - * @file license_plate_detection.cpp - */ -#include -#include -#include -#include "dynamic_vino_lib/inferences/license_plate_detection.hpp" -#include "dynamic_vino_lib/outputs/base_output.hpp" -#include "dynamic_vino_lib/slog.hpp" - -// LicensePlateDetectionResult -dynamic_vino_lib::LicensePlateDetectionResult::LicensePlateDetectionResult( - const cv::Rect & location) -: Result(location) {} - -// LicensePlateDetection -dynamic_vino_lib::LicensePlateDetection::LicensePlateDetection() -: dynamic_vino_lib::BaseInference() {} - -dynamic_vino_lib::LicensePlateDetection::~LicensePlateDetection() = default; -void dynamic_vino_lib::LicensePlateDetection::loadNetwork( - const std::shared_ptr network) -{ - valid_model_ = network; - setMaxBatchSize(network->getMaxBatchSize()); -} - -void dynamic_vino_lib::LicensePlateDetection::fillSeqBlob() -{ - InferenceEngine::Blob::Ptr seq_blob = getEngine()->getRequest()->GetBlob( - valid_model_->getSeqInputName()); - int max_sequence_size = seq_blob->getTensorDesc().getDims()[0]; - // second input is sequence, which is some relic from the training - // it should have the leading 0.0f and rest 1.0f - float * blob_data = seq_blob->buffer().as(); - blob_data[0] = 0.0f; - std::fill(blob_data + 1, blob_data + max_sequence_size, 1.0f); -} - -bool dynamic_vino_lib::LicensePlateDetection::enqueue( - const cv::Mat & frame, const cv::Rect & input_frame_loc) -{ - if (getEnqueuedNum() == 0) { - results_.clear(); - } - if (!dynamic_vino_lib::BaseInference::enqueue( - frame, input_frame_loc, 1, 0, valid_model_->getInputName())) - { - return false; - } - fillSeqBlob(); - Result r(input_frame_loc); - results_.emplace_back(r); - return true; -} - -bool dynamic_vino_lib::LicensePlateDetection::submitRequest() -{ - return dynamic_vino_lib::BaseInference::submitRequest(); -} - -bool dynamic_vino_lib::LicensePlateDetection::fetchResults() -{ - bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); - if (!can_fetch) {return false;} - bool found_result = false; - InferenceEngine::InferRequest::Ptr request = getEngine()->getRequest(); - std::string output = valid_model_->getOutputName(); - const float * output_values = request->GetBlob(output)->buffer().as(); - for (int i = 0; i < getResultsLength(); i++) { - std::string license = ""; - int max_size = valid_model_->getMaxSequenceSize(); - for (int j = 0; j < max_size; j++) { - if (output_values[i * max_size + j] == -1) { - break; - } - license += licenses_[output_values[i * max_size + j]]; - } - results_[i].license_ = license; - found_result = true; - } - if (!found_result) {results_.clear();} - return true; -} - -int dynamic_vino_lib::LicensePlateDetection::getResultsLength() const -{ - return static_cast(results_.size()); -} - -const dynamic_vino_lib::Result * -dynamic_vino_lib::LicensePlateDetection::getLocationResult(int idx) const -{ - return &(results_[idx]); -} - -const std::string dynamic_vino_lib::LicensePlateDetection::getName() const -{ - return valid_model_->getModelCategory(); -} - -void dynamic_vino_lib::LicensePlateDetection::observeOutput( - const std::shared_ptr & output) -{ - if (output != nullptr) { - output->accept(results_); - } -} - -const std::vector dynamic_vino_lib::LicensePlateDetection::getFilteredROIs( - const std::string filter_conditions) const -{ - if (!filter_conditions.empty()) { - slog::err << "License plate detection does not support filtering now! " << - "Filter conditions: " << filter_conditions << slog::endl; - } - std::vector filtered_rois; - for (auto res : results_) { - filtered_rois.push_back(res.getLocation()); - } - return filtered_rois; -} diff --git a/dynamic_vino_lib/src/inferences/object_segmentation.cpp b/dynamic_vino_lib/src/inferences/object_segmentation.cpp deleted file mode 100644 index e4772c06..00000000 --- a/dynamic_vino_lib/src/inferences/object_segmentation.cpp +++ /dev/null @@ -1,234 +0,0 @@ -// Copyright (c) 2018 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * @brief a header file with declaration of ObjectSegmentation class and - * ObjectSegmentationResult class - * @file object_segmentation.cpp - */ -#include -#include -#include -#include -#include - -#include "dynamic_vino_lib/inferences/object_segmentation.hpp" -#include "dynamic_vino_lib/outputs/base_output.hpp" -#include "dynamic_vino_lib/slog.hpp" - -// ObjectSegmentationResult -dynamic_vino_lib::ObjectSegmentationResult::ObjectSegmentationResult(const cv::Rect &location) - : Result(location) -{ -} - -// ObjectSegmentation -dynamic_vino_lib::ObjectSegmentation::ObjectSegmentation(double show_output_thresh) - : show_output_thresh_(show_output_thresh), dynamic_vino_lib::BaseInference() -{ -} - -dynamic_vino_lib::ObjectSegmentation::~ObjectSegmentation() = default; - -void dynamic_vino_lib::ObjectSegmentation::loadNetwork( - const std::shared_ptr network) -{ - slog::info << "Loading Network: " << network->getModelCategory() << slog::endl; - valid_model_ = network; - setMaxBatchSize(network->getMaxBatchSize()); -} - -/** - * Deprecated! - * This function only support OpenVINO version <=2018R5 - */ -bool dynamic_vino_lib::ObjectSegmentation::enqueue_for_one_input( - const cv::Mat &frame, - const cv::Rect &input_frame_loc) -{ - if (width_ == 0 && height_ == 0) - { - width_ = frame.cols; - height_ = frame.rows; - } - if (!dynamic_vino_lib::BaseInference::enqueue(frame, input_frame_loc, 1, 0, - valid_model_->getInputName())) - { - return false; - } - Result r(input_frame_loc); - results_.clear(); - results_.emplace_back(r); - return true; -} - -bool dynamic_vino_lib::ObjectSegmentation::enqueue( - const cv::Mat &frame, - const cv::Rect &input_frame_loc) -{ - if (width_ == 0 && height_ == 0) - { - width_ = frame.cols; - height_ = frame.rows; - } - - if (valid_model_ == nullptr || getEngine() == nullptr) - { - throw std::logic_error("Model or Engine is not set correctly!"); - return false; - } - - if (enqueued_frames_ >= valid_model_->getMaxBatchSize()) - { - slog::warn << "Number of " << getName() << "input more than maximum(" << - max_batch_size_ << ") processed by inference" << slog::endl; - return false; - } - - if (!valid_model_->enqueue(getEngine(), frame, input_frame_loc)) - { - return false; - } - - enqueued_frames_ += 1; - return true; -} - -bool dynamic_vino_lib::ObjectSegmentation::submitRequest() -{ - return dynamic_vino_lib::BaseInference::submitRequest(); -} - -bool dynamic_vino_lib::ObjectSegmentation::fetchResults() -{ - bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); - if (!can_fetch) - { - return false; - } - bool found_result = false; - results_.clear(); - InferenceEngine::InferRequest::Ptr request = getEngine()->getRequest(); - slog::debug << "Analyzing Detection results..." << slog::endl; - std::string detection_output = valid_model_->getOutputName("detection"); - std::string mask_output = valid_model_->getOutputName("masks"); - - const InferenceEngine::Blob::Ptr do_blob = request->GetBlob(detection_output.c_str()); - const auto do_data = do_blob->buffer().as(); - const auto masks_blob = request->GetBlob(mask_output.c_str()); - const auto masks_data = masks_blob->buffer().as(); - const size_t output_w = masks_blob->getTensorDesc().getDims().at(3); - const size_t output_h = masks_blob->getTensorDesc().getDims().at(2); - const size_t output_des = masks_blob-> getTensorDesc().getDims().at(1); - const size_t output_extra = masks_blob-> getTensorDesc().getDims().at(0); - - slog::debug << "output w " << output_w<< slog::endl; - slog::debug << "output h " << output_h << slog::endl; - slog::debug << "output description " << output_des << slog::endl; - slog::debug << "output extra " << output_extra << slog::endl; - - const float * detections = request->GetBlob(detection_output)->buffer().as(); - std::vector &labels = valid_model_->getLabels(); - slog::debug << "label size " <(detections[rowId * output_w + colId]); - for (int ch = 0; ch < colored_mask.channels();++ch){ - colored_mask.at(rowId, colId)[ch] = colors_[classId][ch]; - } - //classId = static_cast(predictions[rowId * output_w + colId]); - } else { - for (int chId = 0; chId < output_des; ++chId) - { - float prob = detections[chId * output_h * output_w + rowId * output_w+ colId]; - //float prob = predictions[chId * output_h * output_w + rowId * output_w+ colId]; - if (prob > maxProb) - { - classId = chId; - maxProb = prob; - } - } - while (classId >= colors_.size()) - { - static std::mt19937 rng(classId); - std::uniform_int_distribution distr(0, 255); - cv::Vec3b color(distr(rng), distr(rng), distr(rng)); - colors_.push_back(color); - } - if(maxProb > 0.5){ - for (int ch = 0; ch < colored_mask.channels();++ch){ - colored_mask.at(rowId, colId)[ch] = colors_[classId][ch]; - } - } - } - } - } - const float alpha = 0.7f; - Result result(roi); - result.mask_ = colored_mask; - found_result = true; - results_.emplace_back(result); - return true; -} - -int dynamic_vino_lib::ObjectSegmentation::getResultsLength() const -{ - return static_cast(results_.size()); -} - -const dynamic_vino_lib::Result * -dynamic_vino_lib::ObjectSegmentation::getLocationResult(int idx) const -{ - return &(results_[idx]); -} - -const std::string dynamic_vino_lib::ObjectSegmentation::getName() const -{ - return valid_model_->getModelCategory(); -} - -void dynamic_vino_lib::ObjectSegmentation::observeOutput( - const std::shared_ptr &output) -{ - if (output != nullptr) - { - output->accept(results_); - } -} - -const std::vector dynamic_vino_lib::ObjectSegmentation::getFilteredROIs( - const std::string filter_conditions) const -{ - if (!filter_conditions.empty()) - { - slog::err << "Object segmentation does not support filtering now! " - << "Filter conditions: " << filter_conditions << slog::endl; - } - std::vector filtered_rois; - for (auto res : results_) - { - filtered_rois.push_back(res.getLocation()); - } - return filtered_rois; -} diff --git a/dynamic_vino_lib/src/inferences/person_attribs_detection.cpp b/dynamic_vino_lib/src/inferences/person_attribs_detection.cpp deleted file mode 100644 index 3831c70a..00000000 --- a/dynamic_vino_lib/src/inferences/person_attribs_detection.cpp +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright (c) 2018 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * @brief a header file with declaration of PersonAttribsDetection class and - * PersonAttribsDetectionResult class - * @file person_attribs_detection.cpp - */ -#include -#include -#include -#include "dynamic_vino_lib/inferences/person_attribs_detection.hpp" -#include "dynamic_vino_lib/outputs/base_output.hpp" -#include "dynamic_vino_lib/slog.hpp" - -// PersonAttribsDetectionResult -dynamic_vino_lib::PersonAttribsDetectionResult::PersonAttribsDetectionResult( - const cv::Rect & location) -: Result(location) {} - -// PersonAttribsDetection -dynamic_vino_lib::PersonAttribsDetection::PersonAttribsDetection(double attribs_confidence) -: attribs_confidence_(attribs_confidence), dynamic_vino_lib::BaseInference() {} - -dynamic_vino_lib::PersonAttribsDetection::~PersonAttribsDetection() = default; -void dynamic_vino_lib::PersonAttribsDetection::loadNetwork( - const std::shared_ptr network) -{ - valid_model_ = network; - setMaxBatchSize(network->getMaxBatchSize()); -} - -bool dynamic_vino_lib::PersonAttribsDetection::enqueue( - const cv::Mat & frame, const cv::Rect & input_frame_loc) -{ - if (getEnqueuedNum() == 0) { - results_.clear(); - } - if (!dynamic_vino_lib::BaseInference::enqueue( - frame, input_frame_loc, 1, 0, valid_model_->getInputName())) - { - return false; - } - Result r(input_frame_loc); - results_.emplace_back(r); - return true; -} - -bool dynamic_vino_lib::PersonAttribsDetection::submitRequest() -{ - return dynamic_vino_lib::BaseInference::submitRequest(); -} - -bool dynamic_vino_lib::PersonAttribsDetection::fetchResults() -{ - bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); - if (!can_fetch) {return false;} - bool found_result = false; - InferenceEngine::InferRequest::Ptr request = getEngine()->getRequest(); - slog::debug << "Analyzing Attributes Detection results..." << slog::endl; - std::string attribute_output = valid_model_->getOutputName("attributes_output_"); - std::string top_output = valid_model_->getOutputName("top_output_"); - std::string bottom_output = valid_model_->getOutputName("bottom_output_"); - - /*auto attri_values = request->GetBlob(attribute_output)->buffer().as(); - auto top_values = request->GetBlob(top_output)->buffer().as(); - auto bottom_values = request->GetBlob(bottom_output)->buffer().as();*/ - InferenceEngine::Blob::Ptr attribBlob = request->GetBlob(attribute_output); - InferenceEngine::Blob::Ptr topBlob = request->GetBlob(top_output); - InferenceEngine::Blob::Ptr bottomBlob = request->GetBlob(bottom_output); - - auto attri_values = attribBlob->buffer().as(); - auto top_values = topBlob->buffer().as(); - auto bottom_values = bottomBlob->buffer().as(); - - int net_attrib_length = net_attributes_.size(); - for (int i = 0; i < getResultsLength(); i++) { - results_[i].male_probability_ = attri_values[i * net_attrib_length]; - results_[i].top_point_.x = top_values[i]; - results_[i].top_point_.y = top_values[i+1]; - results_[i].bottom_point_.x = bottom_values[i]; - results_[i].bottom_point_.y = bottom_values[i+1]; - std::string attrib = ""; - for (int j = 1; j < net_attrib_length; j++) { - attrib += (attri_values[i * net_attrib_length + j] > attribs_confidence_) ? - net_attributes_[j] + ", " : ""; - } - results_[i].attributes_ = attrib; - - found_result = true; - } - if (!found_result) {results_.clear();} - return true; -} - -int dynamic_vino_lib::PersonAttribsDetection::getResultsLength() const -{ - return static_cast(results_.size()); -} - -const dynamic_vino_lib::Result * -dynamic_vino_lib::PersonAttribsDetection::getLocationResult(int idx) const -{ - return &(results_[idx]); -} - -const std::string dynamic_vino_lib::PersonAttribsDetection::getName() const -{ - return valid_model_->getModelCategory(); -} - -void dynamic_vino_lib::PersonAttribsDetection::observeOutput( - const std::shared_ptr & output) -{ - if (output != nullptr) { - output->accept(results_); - } -} - -const std::vector dynamic_vino_lib::PersonAttribsDetection::getFilteredROIs( - const std::string filter_conditions) const -{ - if (!filter_conditions.empty()) { - slog::err << "Person attributes detection does not support filtering now! " << - "Filter conditions: " << filter_conditions << slog::endl; - } - std::vector filtered_rois; - for (auto res : results_) { - filtered_rois.push_back(res.getLocation()); - } - return filtered_rois; -} diff --git a/dynamic_vino_lib/src/inferences/person_reidentification.cpp b/dynamic_vino_lib/src/inferences/person_reidentification.cpp deleted file mode 100644 index 34280bd1..00000000 --- a/dynamic_vino_lib/src/inferences/person_reidentification.cpp +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright (c) 2018 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * @brief a header file with declaration of PersonReidentification class and - * PersonReidentificationResult class - * @file person_reidentification.cpp - */ -#include -#include -#include -#include "dynamic_vino_lib/inferences/person_reidentification.hpp" -#include "dynamic_vino_lib/outputs/base_output.hpp" -#include "dynamic_vino_lib/slog.hpp" - -// PersonReidentificationResult -dynamic_vino_lib::PersonReidentificationResult::PersonReidentificationResult( - const cv::Rect & location) -: Result(location) {} - -// PersonReidentification -dynamic_vino_lib::PersonReidentification::PersonReidentification(double match_thresh) -: dynamic_vino_lib::BaseInference() -{ - person_tracker_ = std::make_shared(1000, match_thresh, 0.3); -} - -dynamic_vino_lib::PersonReidentification::~PersonReidentification() = default; -void dynamic_vino_lib::PersonReidentification::loadNetwork( - const std::shared_ptr network) -{ - valid_model_ = network; - setMaxBatchSize(network->getMaxBatchSize()); -} - -bool dynamic_vino_lib::PersonReidentification::enqueue( - const cv::Mat & frame, const cv::Rect & input_frame_loc) -{ - if (getEnqueuedNum() == 0) { - results_.clear(); - } - if (!dynamic_vino_lib::BaseInference::enqueue( - frame, input_frame_loc, 1, 0, valid_model_->getInputName())) - { - return false; - } - Result r(input_frame_loc); - results_.emplace_back(r); - return true; -} - -bool dynamic_vino_lib::PersonReidentification::submitRequest() -{ - return dynamic_vino_lib::BaseInference::submitRequest(); -} - -bool dynamic_vino_lib::PersonReidentification::fetchResults() -{ - bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); - if (!can_fetch) {return false;} - bool found_result = false; - InferenceEngine::InferRequest::Ptr request = getEngine()->getRequest(); - std::string output = valid_model_->getOutputName(); - const float * output_values = request->GetBlob(output)->buffer().as(); - for (int i = 0; i < getResultsLength(); i++) { - std::vector new_person = std::vector( - output_values + 256 * i, output_values + 256 * i + 256); - std::string person_id = "No." + std::to_string( - person_tracker_->processNewTrack(new_person)); - results_[i].person_id_ = person_id; - found_result = true; - } - if (!found_result) {results_.clear();} - return true; -} - -int dynamic_vino_lib::PersonReidentification::getResultsLength() const -{ - return static_cast(results_.size()); -} - -const dynamic_vino_lib::Result * -dynamic_vino_lib::PersonReidentification::getLocationResult(int idx) const -{ - return &(results_[idx]); -} - -const std::string dynamic_vino_lib::PersonReidentification::getName() const -{ - return valid_model_->getModelCategory(); -} - -void dynamic_vino_lib::PersonReidentification::observeOutput( - const std::shared_ptr & output) -{ - if (output != nullptr) { - output->accept(results_); - } -} - -const std::vector dynamic_vino_lib::PersonReidentification::getFilteredROIs( - const std::string filter_conditions) const -{ - if (!filter_conditions.empty()) { - slog::err << "Person reidentification does not support filtering now! " << - "Filter conditions: " << filter_conditions << slog::endl; - } - std::vector filtered_rois; - for (auto res : results_) { - filtered_rois.push_back(res.getLocation()); - } - return filtered_rois; -} diff --git a/dynamic_vino_lib/src/inferences/vehicle_attribs_detection.cpp b/dynamic_vino_lib/src/inferences/vehicle_attribs_detection.cpp deleted file mode 100644 index 14cf4e3e..00000000 --- a/dynamic_vino_lib/src/inferences/vehicle_attribs_detection.cpp +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright (c) 2018 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * @brief a realization file with declaration of VehicleAttribsDetection class and - * VehicleAttribsDetectionResult class - * @file vehicle_attribs_detection.cpp - */ -#include -#include -#include -#include "dynamic_vino_lib/inferences/vehicle_attribs_detection.hpp" -#include "dynamic_vino_lib/outputs/base_output.hpp" -#include "dynamic_vino_lib/slog.hpp" - -// VehicleAttribsDetectionResult -dynamic_vino_lib::VehicleAttribsDetectionResult::VehicleAttribsDetectionResult( - const cv::Rect & location) -: Result(location) {} - -// VehicleAttribsDetection -dynamic_vino_lib::VehicleAttribsDetection::VehicleAttribsDetection() -: dynamic_vino_lib::BaseInference() {} - -dynamic_vino_lib::VehicleAttribsDetection::~VehicleAttribsDetection() = default; -void dynamic_vino_lib::VehicleAttribsDetection::loadNetwork( - const std::shared_ptr network) -{ - valid_model_ = network; - setMaxBatchSize(network->getMaxBatchSize()); -} - -bool dynamic_vino_lib::VehicleAttribsDetection::enqueue( - const cv::Mat & frame, const cv::Rect & input_frame_loc) -{ - if (getEnqueuedNum() == 0) { - results_.clear(); - } - if (!dynamic_vino_lib::BaseInference::enqueue( - frame, input_frame_loc, 1, 0, valid_model_->getInputName())) - { - return false; - } - Result r(input_frame_loc); - results_.emplace_back(r); - return true; -} - -bool dynamic_vino_lib::VehicleAttribsDetection::submitRequest() -{ - return dynamic_vino_lib::BaseInference::submitRequest(); -} - -bool dynamic_vino_lib::VehicleAttribsDetection::fetchResults() -{ - bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); - if (!can_fetch) {return false;} - bool found_result = false; - InferenceEngine::InferRequest::Ptr request = getEngine()->getRequest(); - //std::string color_name = valid_model_->getColorOutputName(); - //std::string type_name = valid_model_->getTypeOutputName(); - std::string color_name = valid_model_->getOutputName("color_output_"); - std::string type_name = valid_model_->getOutputName("type_output_"); - const float * color_values = request->GetBlob(color_name)->buffer().as(); - const float * type_values = request->GetBlob(type_name)->buffer().as(); - for (int i = 0; i < getResultsLength(); i++) { - auto color_id = std::max_element(color_values, color_values + 7) - color_values; - auto type_id = std::max_element(type_values, type_values + 4) - type_values; - color_values += 7; - type_values += 4; - results_[i].color_ = colors_[color_id]; - results_[i].type_ = types_[type_id]; - found_result = true; - } - if (!found_result) {results_.clear();} - return true; -} - -int dynamic_vino_lib::VehicleAttribsDetection::getResultsLength() const -{ - return static_cast(results_.size()); -} - -const dynamic_vino_lib::Result * -dynamic_vino_lib::VehicleAttribsDetection::getLocationResult(int idx) const -{ - return &(results_[idx]); -} - -const std::string dynamic_vino_lib::VehicleAttribsDetection::getName() const -{ - return valid_model_->getModelCategory(); -} - -void dynamic_vino_lib::VehicleAttribsDetection::observeOutput( - const std::shared_ptr & output) -{ - if (output != nullptr) { - output->accept(results_); - } -} - -const std::vector dynamic_vino_lib::VehicleAttribsDetection::getFilteredROIs( - const std::string filter_conditions) const -{ - if (!filter_conditions.empty()) { - slog::err << "Vehicle attributes detection does not support filtering now! " << - "Filter conditions: " << filter_conditions << slog::endl; - } - std::vector filtered_rois; - for (auto res : results_) { - filtered_rois.push_back(res.getLocation()); - } - return filtered_rois; -} diff --git a/dynamic_vino_lib/src/models/age_gender_detection_model.cpp b/dynamic_vino_lib/src/models/age_gender_detection_model.cpp deleted file mode 100644 index fa7e6f2d..00000000 --- a/dynamic_vino_lib/src/models/age_gender_detection_model.cpp +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright (c) 2018 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * @brief a header file with declaration of AgeGenderDetectionModel class - * @file age_gender_detection_model.cpp - */ -#include -#include - -#include "dynamic_vino_lib/models/age_gender_detection_model.hpp" -#include "dynamic_vino_lib/slog.hpp" - -// Validated Age Gender Classification Network -Models::AgeGenderDetectionModel::AgeGenderDetectionModel( - const std::string & label_loc, - const std::string & model_loc, - int max_batch_size) -: BaseModel(label_loc,model_loc, max_batch_size) -{ -} -bool Models::AgeGenderDetectionModel::updateLayerProperty( - InferenceEngine::CNNNetwork& net_reader) -{ - slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; - // set input property - InferenceEngine::InputsDataMap input_info_map(net_reader.getInputsInfo()); - if (input_info_map.size() != 1) { - slog::warn << "This model seems not Age-Gender-like, which should have only one input," - <<" but we got " << std::to_string(input_info_map.size()) << "inputs" - << slog::endl; - return false; - } - InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; - input_info->setPrecision(InferenceEngine::Precision::FP32); - input_info->setLayout(InferenceEngine::Layout::NCHW); - addInputInfo("input", input_info_map.begin()->first); - // set output property - InferenceEngine::OutputsDataMap output_info_map(net_reader.getOutputsInfo()); - if (output_info_map.size() != 2) { - // throw std::logic_error("Age/Gender Recognition network should have two output layers"); - slog::warn << "This model seems not Age-gender like, which should have and only have 2" - " outputs, but we got " << std::to_string(output_info_map.size()) << "outputs" - << slog::endl; - return false; - } - auto it = output_info_map.begin(); - InferenceEngine::DataPtr age_output_ptr = (it++)->second; - InferenceEngine::DataPtr gender_output_ptr = (it++)->second; - -#if(0) /// - //Check More Configuration: - if (gender_output_ptr->getCreatorLayer().lock()->type == "Convolution") { - std::swap(age_output_ptr, gender_output_ptr); - } - if (age_output_ptr->getCreatorLayer().lock()->type != "Convolution") { - slog::err << "In Age Gender network, age layer (" - << age_output_ptr->getCreatorLayer().lock()->name - << ") should be a Convolution, but was: " - << age_output_ptr->getCreatorLayer().lock()->type << slog::endl; - return false; - } - if (gender_output_ptr->getCreatorLayer().lock()->type != "SoftMax") { - slog::err <<"In Age Gender network, gender layer (" - << gender_output_ptr->getCreatorLayer().lock()->name - << ") should be a SoftMax, but was: " - << gender_output_ptr->getCreatorLayer().lock()->type - << slog::endl; - return false; - } - slog::info << "Age layer: " << age_output_ptr->getCreatorLayer().lock()->name << slog::endl; - slog::info << "Gender layer: " << gender_output_ptr->getCreatorLayer().lock()->name << slog::endl; -#endif - - age_output_ptr->setPrecision(InferenceEngine::Precision::FP32); - age_output_ptr->setLayout(InferenceEngine::Layout::NCHW); - gender_output_ptr->setPrecision(InferenceEngine::Precision::FP32); - gender_output_ptr->setLayout(InferenceEngine::Layout::NCHW); - - //output_age_ = age_output_ptr->name; - addOutputInfo("age", age_output_ptr->getName()); - //output_gender_ = gender_output_ptr->name; - addOutputInfo("gender", gender_output_ptr->getName()); - - printAttribute(); - return true; -} - -const std::string Models::AgeGenderDetectionModel::getModelCategory() const -{ - return "Age Gender Detection"; -} diff --git a/dynamic_vino_lib/src/models/base_model.cpp b/dynamic_vino_lib/src/models/base_model.cpp deleted file mode 100644 index f9ddeaa6..00000000 --- a/dynamic_vino_lib/src/models/base_model.cpp +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright (c) 2018 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * @brief a header file with declaration of BaseModel class - * @file base_model.cpp - */ - -#include -#include -#include -#include -#include -#include -#include "dynamic_vino_lib/models/base_model.hpp" -#include "dynamic_vino_lib/slog.hpp" -#include "dynamic_vino_lib/models/attributes/base_attribute.hpp" - -// Validated Base Network -Models::BaseModel::BaseModel( - const std::string& label_loc, const std::string& model_loc, int max_batch_size) -: label_loc_(label_loc), - model_loc_(model_loc), - max_batch_size_(max_batch_size), - ModelAttribute(model_loc) -{ - if (model_loc.empty()) { - throw std::logic_error("model file name is empty!"); - } - - ///net_reader_ = std::make_shared(); -} - -void Models::BaseModel::modelInit() -{ - slog::info << "Loading network files" << model_loc_ << slog::endl; - slog::info << label_loc_ << slog::endl; - // Read network model - ///net_reader_->ReadNetwork(model_loc_); - net_reader_ = engine.ReadNetwork(model_loc_); - // Extract model name and load it's weights - // remove extension - size_t last_index = model_loc_.find_last_of("."); - std::string raw_name = model_loc_.substr(0, last_index); - ///std::string bin_file_name = raw_name + ".bin"; - ///net_reader_->ReadWeights(bin_file_name); - // Read labels (if any) - std::string label_file_name = label_loc_.substr(0, last_index); - //std::string label_file_name = raw_name + ".labels"; - loadLabelsFromFile(label_loc_); - - // Set batch size to given max_batch_size_ - slog::info << "Batch size is set to " << max_batch_size_ << slog::endl; - ///net_reader_->getNetwork().setBatchSize(max_batch_size_); - net_reader_.setBatchSize(max_batch_size_); - - updateLayerProperty(net_reader_); -} - -#if 0 -bool Models::BaseModel::updateLayerProperty( - InferenceEngine::CNNNetReader::Ptr net_reader) -{ -#if 0 - if (!updateLayerProperty(net_reader)){ - slog::warn << "The model(name: " << getModelName() << ") failed to update Layer Property!" - << slog::endl; - return false; - } -#endif - if(!isVerified()){ - slog::warn << "The model(name: " << getModelName() << ") does NOT pass Attribute Check!" - << slog::endl; - return false; - } - - return true; -} -#endif - -Models::ObjectDetectionModel::ObjectDetectionModel( - const std::string& label_loc, - const std::string& model_loc, - int max_batch_size) -: BaseModel(label_loc, model_loc, max_batch_size) {} diff --git a/dynamic_vino_lib/src/models/emotion_detection_model.cpp b/dynamic_vino_lib/src/models/emotion_detection_model.cpp deleted file mode 100644 index 0c4f78e0..00000000 --- a/dynamic_vino_lib/src/models/emotion_detection_model.cpp +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (c) 2018 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * @brief a header file with declaration of EmotionDetectionModel class - * @file emotion_detection_model.cpp - */ -#include - -#include "dynamic_vino_lib/models/emotion_detection_model.hpp" -#include "dynamic_vino_lib/slog.hpp" - -// Validated Emotions Detection Network -Models::EmotionDetectionModel::EmotionDetectionModel( - const std::string & label_loc, const std::string & model_loc, int max_batch_size) -: BaseModel(label_loc, model_loc, max_batch_size) -{ -} - -bool Models::EmotionDetectionModel::updateLayerProperty -(InferenceEngine::CNNNetwork& net_reader) -{ - slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; - // set input property - InferenceEngine::InputsDataMap input_info_map(net_reader.getInputsInfo()); - if (input_info_map.size() != 1) { - slog::warn << "This model seems not Age-Gender-like, which should have only one input," - <<" but we got " << std::to_string(input_info_map.size()) << "inputs" - << slog::endl; - return false; - } - InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; - input_info->setPrecision(InferenceEngine::Precision::FP32); - input_info->setLayout(InferenceEngine::Layout::NCHW); - addInputInfo("input", input_info_map.begin()->first); - - // set output property - InferenceEngine::OutputsDataMap output_info_map(net_reader.getOutputsInfo()); - if (output_info_map.size() != 1) { - // throw std::logic_error("Age/Gender Recognition network should have two output layers"); - slog::warn << "This model should have and only have 1 output, but we got " - << std::to_string(output_info_map.size()) << "outputs" << slog::endl; - return false; - } - ///InferenceEngine::DataPtr & output_data_ptr = output_info_map.begin()->second; - ///slog::info << "Emotions layer: " << output_data_ptr->getCreatorLayer().lock()->name << - /// slog::endl; - ///output_data_ptr->setPrecision(InferenceEngine::Precision::FP32); - ///output_data_ptr->setLayout(InferenceEngine::Layout::NCHW); - addOutputInfo("output", output_info_map.begin()->first); - - printAttribute(); - return true; ///verifyOutputLayer(output_data_ptr); -} - -bool Models::EmotionDetectionModel::verifyOutputLayer(const InferenceEngine::DataPtr & ptr) -{ -/// if (ptr->getCreatorLayer().lock()->type != "SoftMax") { -/// slog::err <<"In Emotion network, gender layer (" -/// << ptr->getCreatorLayer().lock()->name -/// << ") should be a SoftMax, but was: " -/// << ptr->getCreatorLayer().lock()->type -/// << slog::endl; -/// return false; -/// } - - return true; -} - -const std::string Models::EmotionDetectionModel::getModelCategory() const -{ - return "Emotions Detection"; -} diff --git a/dynamic_vino_lib/src/models/head_pose_detection_model.cpp b/dynamic_vino_lib/src/models/head_pose_detection_model.cpp deleted file mode 100644 index faaa6dcf..00000000 --- a/dynamic_vino_lib/src/models/head_pose_detection_model.cpp +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright (c) 2018 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * @brief a header file with declaration of HeadPoseDetectionModel class - * @file head_pose_detection_model.cpp - */ - -#include -#include - -#include "dynamic_vino_lib/models/head_pose_detection_model.hpp" -#include "dynamic_vino_lib/slog.hpp" - -// Validated Head Pose Network -Models::HeadPoseDetectionModel::HeadPoseDetectionModel( - const std::string & label_loc, const std::string & model_loc, int max_batch_size) -: BaseModel(label_loc, model_loc, max_batch_size) -{ -} - -bool Models::HeadPoseDetectionModel::updateLayerProperty -(InferenceEngine::CNNNetwork& net_reader) -{ - slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; - // set input property - InferenceEngine::InputsDataMap input_info_map(net_reader.getInputsInfo()); - if (input_info_map.size() != 1) { - slog::warn << "This model should have only one input, but we got" - << std::to_string(input_info_map.size()) << "inputs" - << slog::endl; - return false; - } - InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; - input_info->setPrecision(InferenceEngine::Precision::U8); - input_info->getInputData()->setLayout(InferenceEngine::Layout::NCHW); - addInputInfo("input", input_info_map.begin()->first); - - // set output property - InferenceEngine::OutputsDataMap output_info_map(net_reader.getOutputsInfo()); - for (auto & output : output_info_map) { - output.second->setPrecision(InferenceEngine::Precision::FP32); - output.second->setLayout(InferenceEngine::Layout::NC); - } - - for (const std::string& outName : {output_angle_r_, output_angle_p_, output_angle_y_}) { - if (output_info_map.find(outName) == output_info_map.end()) { - throw std::logic_error("There is no " + outName + " output in Head Pose Estimation network"); - } else { - addOutputInfo(outName, outName); - } - } - - printAttribute(); - return true; -} - -const std::string Models::HeadPoseDetectionModel::getModelCategory() const -{ - return "Head Pose Network"; -} diff --git a/dynamic_vino_lib/src/models/object_detection_yolov2_model.cpp b/dynamic_vino_lib/src/models/object_detection_yolov2_model.cpp deleted file mode 100644 index df1388ab..00000000 --- a/dynamic_vino_lib/src/models/object_detection_yolov2_model.cpp +++ /dev/null @@ -1,352 +0,0 @@ -// Copyright (c) 2018 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * @brief a header file with declaration of ObjectDetectionModel class - * @file object_detection_yolov2_model.cpp - */ - -#include "dynamic_vino_lib/models/object_detection_yolov2_model.hpp" -#include -#include -#include -#include -#include "dynamic_vino_lib/slog.hpp" -#include "dynamic_vino_lib/engines/engine.hpp" -#include "dynamic_vino_lib/inferences/object_detection.hpp" - -// Validated Object Detection Network -Models::ObjectDetectionYolov2Model::ObjectDetectionYolov2Model( - const std::string & label_loc, const std::string & model_loc, int max_batch_size) -: ObjectDetectionModel(label_loc, model_loc, max_batch_size) -{ -} - -bool Models::ObjectDetectionYolov2Model::updateLayerProperty( - InferenceEngine::CNNNetwork& net_reader) -{ - slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; - - InferenceEngine::InputsDataMap input_info_map(net_reader.getInputsInfo()); - if (input_info_map.size() != 1) { - slog::warn << "This model seems not Yolo-like, which has only one input, but we got " - << std::to_string(input_info_map.size()) << "inputs" << slog::endl; - return false; - } - - InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; - input_info->setPrecision(InferenceEngine::Precision::FP32); - input_info->getInputData()->setLayout(InferenceEngine::Layout::NCHW); - input_info_ = input_info; - addInputInfo("input", input_info_map.begin()->first); - - // set output property - InferenceEngine::OutputsDataMap output_info_map(net_reader.getOutputsInfo()); - if (output_info_map.size() != 1) { - slog::warn << "This model seems not Yolo-like! We got " - << std::to_string(output_info_map.size()) << "outputs, but SSDnet has only one." - << slog::endl; - return false; - } - InferenceEngine::DataPtr & output_data_ptr = output_info_map.begin()->second; - output_data_ptr->setPrecision(InferenceEngine::Precision::FP32); - addOutputInfo("output", output_info_map.begin()->first); - slog::info << "Checking Object Detection output ... Name=" << output_info_map.begin()->first - << slog::endl; - -#if(0) /// - const InferenceEngine::CNNLayerPtr output_layer = - net_reader->getNetwork().getLayerByName(output_info_map.begin()->first.c_str()); - // output layer should have attribute called num_classes - slog::info << "Checking Object Detection num_classes" << slog::endl; - if (output_layer == nullptr || - output_layer->params.find("classes") == output_layer->params.end()) { - slog::warn << "This model's output layer (" << output_info_map.begin()->first - << ") should have num_classes integer attribute" << slog::endl; - return false; - } - // class number should be equal to size of label vector - // if network has default "background" class, fake is used - const int num_classes = output_layer->GetParamAsInt("classes"); - slog::info << "Checking Object Detection output ... num_classes=" << num_classes << slog::endl; - if (getLabels().size() != num_classes) { - if (getLabels().size() == (num_classes - 1)) { - getLabels().insert(getLabels().begin(), "fake"); - } else { - getLabels().clear(); - } - } -#endif - - // last dimension of output layer should be 7 - const InferenceEngine::SizeVector output_dims = output_data_ptr->getTensorDesc().getDims(); - setMaxProposalCount(static_cast(output_dims[2])); - slog::info << "max proposal count is: " << getMaxProposalCount() << slog::endl; - - auto object_size = static_cast(output_dims[3]); - if (object_size != 33) { - slog::warn << "This model is NOT Yolo-like, whose output data for each detected object" - << "should have 7 dimensions, but was " << std::to_string(object_size) - << slog::endl; - return false; - } - setObjectSize(object_size); - - if (output_dims.size() != 2) { - slog::warn << "This model is not Yolo-like, output dimensions shoulld be 2, but was" - << std::to_string(output_dims.size()) << slog::endl; - return false; - } - - printAttribute(); - slog::info << "This model is Yolo-like, Layer Property updated!" << slog::endl; - return true; -} - -const std::string Models::ObjectDetectionYolov2Model::getModelCategory() const -{ - return "Object Detection Yolo v2"; -} - -bool Models::ObjectDetectionYolov2Model::enqueue( - const std::shared_ptr & engine, - const cv::Mat & frame, - const cv::Rect & input_frame_loc) -{ - setFrameSize(frame.cols, frame.rows); - - if (!matToBlob(frame, input_frame_loc, 1, 0, engine)) { - return false; - } - return true; -} - -bool Models::ObjectDetectionYolov2Model::matToBlob( - const cv::Mat & orig_image, const cv::Rect &, float scale_factor, - int batch_index, const std::shared_ptr & engine) -{ - if (engine == nullptr) { - slog::err << "A frame is trying to be enqueued in a NULL Engine." << slog::endl; - return false; - } - - std::string input_name = getInputName(); - InferenceEngine::Blob::Ptr input_blob = - engine->getRequest()->GetBlob(input_name); - - InferenceEngine::SizeVector blob_size = input_blob->getTensorDesc().getDims(); - const int width = blob_size[3]; - const int height = blob_size[2]; - const int channels = blob_size[1]; - float * blob_data = input_blob->buffer().as(); - - - int dx = 0; - int dy = 0; - int srcw = 0; - int srch = 0; - - int IH = height; - int IW = width; - - cv::Mat image = orig_image.clone(); - cv::cvtColor(image, image, cv::COLOR_BGR2RGB); - - image.convertTo(image, CV_32F, 1.0 / 255.0, 0); - srcw = image.size().width; - srch = image.size().height; - - cv::Mat resizedImg(IH, IW, CV_32FC3); - resizedImg = cv::Scalar(0.5, 0.5, 0.5); - int imw = image.size().width; - int imh = image.size().height; - float resize_ratio = static_cast(IH) / static_cast(std::max(imw, imh)); - cv::resize(image, image, cv::Size(imw * resize_ratio, imh * resize_ratio)); - - int new_w = imw; - int new_h = imh; - if ((static_cast(IW) / imw) < (static_cast(IH) / imh)) { - new_w = IW; - new_h = (imh * IW) / imw; - } else { - new_h = IH; - new_w = (imw * IW) / imh; - } - dx = (IW - new_w) / 2; - dy = (IH - new_h) / 2; - - imh = image.size().height; - imw = image.size().width; - - for (int row = 0; row < imh; row++) { - for (int col = 0; col < imw; col++) { - for (int ch = 0; ch < 3; ch++) { - resizedImg.at(dy + row, dx + col)[ch] = image.at(row, col)[ch]; - } - } - } - - for (int c = 0; c < channels; c++) { - for (int h = 0; h < height; h++) { - for (int w = 0; w < width; w++) { - blob_data[c * width * height + h * width + w] = resizedImg.at(h, w)[c]; - } - } - } - - setFrameSize(srcw, srch); - return true; -} - -bool Models::ObjectDetectionYolov2Model::fetchResults( - const std::shared_ptr & engine, - std::vector & results, - const float & confidence_thresh, - const bool & enable_roi_constraint) -{ - try { - if (engine == nullptr) { - slog::err << "Trying to fetch results from Engines." << slog::endl; - return false; - } - - InferenceEngine::InferRequest::Ptr request = engine->getRequest(); - - std::string output = getOutputName(); - std::vector & labels = getLabels(); - const float * detections = - request->GetBlob(output)->buffer().as::value_type *>(); - ///InferenceEngine::CNNLayerPtr layer = - /// getNetReader()->getNetwork().getLayerByName(output.c_str()); - int input_height = input_info_->getTensorDesc().getDims()[2]; - int input_width = input_info_->getTensorDesc().getDims()[3]; - - // --------------------------- Validating output parameters -------------------------------- - ///if (layer != nullptr && layer->type != "RegionYolo") { - /// throw std::runtime_error("Invalid output type: " + layer->type + ". RegionYolo expected"); - ///} - // --------------------------- Extracting layer parameters -------------------------------- - const int num = 3; ///layer->GetParamAsInt("num"); - const int coords = 9; ///layer->GetParamAsInt("coords"); - const int classes = 21; ///layer->GetParamAsInt("classes"); - auto blob = request->GetBlob(output); - const int out_blob_h = static_cast(blob->getTensorDesc().getDims()[2]);; - - std::vector anchors = { - 0.572730, 0.677385, - 1.874460, 2.062530, - 3.338430, 5.474340, - 7.882820, 3.527780, - 9.770520, 9.168280 - }; - auto side = out_blob_h; - - auto side_square = side * side; - // --------------------------- Parsing YOLO Region output ------------------------------------- - std::vector raw_results; - for (int i = 0; i < side_square; ++i) { - int row = i / side; - int col = i % side; - - for (int n = 0; n < num; ++n) { - int obj_index = getEntryIndex(side, coords, classes, n * side * side + i, coords); - int box_index = getEntryIndex(side, coords, classes, n * side * side + i, 0); - - float scale = detections[obj_index]; - - if (scale < confidence_thresh) { - continue; - } - - float x = (col + detections[box_index + 0 * side_square]) / side * input_width; - float y = (row + detections[box_index + 1 * side_square]) / side * input_height; - float height = std::exp(detections[box_index + 3 * side_square]) * anchors[2 * n + 1] / - side * input_height; - float width = std::exp(detections[box_index + 2 * side_square]) * anchors[2 * n] / side * - input_width; - - for (int j = 0; j < classes; ++j) { - int class_index = - getEntryIndex(side, coords, classes, n * side_square + i, coords + 1 + j); - - float prob = scale * detections[class_index]; - if (prob < confidence_thresh) { - continue; - } - - float x_min = x - width / 2; - float y_min = y - height / 2; - - auto frame_size = getFrameSize(); - float x_min_resized = x_min / input_width * frame_size.width; - float y_min_resized = y_min / input_height * frame_size.height; - float width_resized = width / input_width * frame_size.width; - float height_resized = height / input_height * frame_size.height; - - cv::Rect r(x_min_resized, y_min_resized, width_resized, height_resized); - Result result(r); - // result.label_ = j; - std::string label = j < - labels.size() ? labels[j] : std::string("label #") + std::to_string(j); - result.setLabel(label); - - result.setConfidence(prob); - raw_results.emplace_back(result); - } - } - } - - std::sort(raw_results.begin(), raw_results.end()); - for (unsigned int i = 0; i < raw_results.size(); ++i) { - if (raw_results[i].getConfidence() == 0) { - continue; - } - for (unsigned int j = i + 1; j < raw_results.size(); ++j) { - auto iou = dynamic_vino_lib::ObjectDetection::calcIoU( - raw_results[i].getLocation(), raw_results[j].getLocation()); - if (iou >= 0.45) { - raw_results[j].setConfidence(0); - } - } - } - - for (auto & raw_result : raw_results) { - if (raw_result.getConfidence() < confidence_thresh) { - continue; - } - - results.push_back(raw_result); - } - - raw_results.clear(); - - return true; - } catch (const std::exception & error) { - slog::err << error.what() << slog::endl; - return false; - } catch (...) { - slog::err << "Unknown/internal exception happened." << slog::endl; - return false; - } -} - -int Models::ObjectDetectionYolov2Model::getEntryIndex( - int side, int lcoords, int lclasses, - int location, int entry) -{ - int n = location / (side * side); - int loc = location % (side * side); - return n * side * side * (lcoords + lclasses + 1) + entry * side * side + loc; -} diff --git a/dynamic_vino_lib/src/models/object_segmentation_model.cpp b/dynamic_vino_lib/src/models/object_segmentation_model.cpp deleted file mode 100644 index ab4797f0..00000000 --- a/dynamic_vino_lib/src/models/object_segmentation_model.cpp +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright (c) 2018 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * @brief a header file with declaration of ObjectSegmentationModel class - * @file object_segmentation_model.cpp - */ -#include -#include -#include -#include "dynamic_vino_lib/models/object_segmentation_model.hpp" -#include "dynamic_vino_lib/slog.hpp" -#include "dynamic_vino_lib/engines/engine.hpp" -// Validated Object Segmentation Network -Models::ObjectSegmentationModel::ObjectSegmentationModel( - const std::string & label_loc, - const std::string & model_loc, - int max_batch_size) - : BaseModel(label_loc, model_loc, max_batch_size) -{ -} - -bool Models::ObjectSegmentationModel::enqueue( - const std::shared_ptr &engine, - const cv::Mat &frame, - const cv::Rect &input_frame_loc) -{ - if (engine == nullptr) - { - slog::err << "A frame is trying to be enqueued in a NULL Engine." << slog::endl; - return false; - } - - for (const auto &inputInfoItem : input_info_) - { - // Fill first input tensor with images. First b channel, then g and r channels - slog::debug<<"first tensor"<getTensorDesc().getDims().size()<getTensorDesc().getDims().size()==4) - { - matToBlob(frame, input_frame_loc, 1.0, 0, engine); - } - - // Fill second input tensor with image info - if (inputInfoItem.second->getTensorDesc().getDims().size() == 2) - { - InferenceEngine::Blob::Ptr input = engine->getRequest()->GetBlob(inputInfoItem.first); - auto data = input->buffer().as::value_type *>(); - data[0] = static_cast(frame.rows); // height - data[1] = static_cast(frame.cols); // width - data[2] = 1; - } - } - return true; - -} - -bool Models::ObjectSegmentationModel::matToBlob( - const cv::Mat &orig_image, const cv::Rect &, float scale_factor, - int batch_index, const std::shared_ptr &engine) -{ - (void)scale_factor; - (void)batch_index; - - if (engine == nullptr) - { - slog::err << "A frame is trying to be enqueued in a NULL Engine." << slog::endl; - return false; - } - - size_t channels = orig_image.channels(); - size_t height = orig_image.size().height; - size_t width = orig_image.size().width; - - size_t strideH = orig_image.step.buf[0]; - size_t strideW = orig_image.step.buf[1]; - - bool is_dense = - strideW == channels && - strideH == channels * width; - - if (!is_dense){ - slog::err << "Doesn't support conversion from not dense cv::Mat." << slog::endl; - return false; - } - - InferenceEngine::TensorDesc tDesc(InferenceEngine::Precision::U8, - {1, channels, height, width}, - InferenceEngine::Layout::NHWC); - - auto shared_blob = InferenceEngine::make_shared_blob(tDesc, orig_image.data); - engine->getRequest()->SetBlob(getInputName(), shared_blob); - - return true; -} - -const std::string Models::ObjectSegmentationModel::getModelCategory() const -{ - return "Object Segmentation"; -} - -bool Models::ObjectSegmentationModel::updateLayerProperty( - InferenceEngine::CNNNetwork& net_reader) -{ - slog::info<< "Checking INPUTS for Model" <second; - slog::debug<<"channel size"<second; - inputInfo.getPreProcess().setResizeAlgorithm(InferenceEngine::ResizeAlgorithm::RESIZE_BILINEAR); - inputInfo.setLayout(InferenceEngine::Layout::NHWC); - inputInfo.setPrecision(InferenceEngine::Precision::U8); - - //InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; - //addInputInfo("input", input_info_map.begin()->first.c_str()); - addInputInfo("input", inputShapes.begin()->first); - - InferenceEngine::OutputsDataMap outputsDataMap = network.getOutputsInfo(); - if (outputsDataMap.size() != 1) { - //throw std::runtime_error("Demo supports topologies only with 1 output"); - slog::warn << "This inference sample should have only one output, but we got" - << std::to_string(outputsDataMap.size()) << "outputs" - << slog::endl; - return false; - } - - InferenceEngine::Data & data = *outputsDataMap.begin()->second; - data.setPrecision(InferenceEngine::Precision::FP32); - - const InferenceEngine::SizeVector& outSizeVector = data.getTensorDesc().getDims(); - int outChannels, outHeight, outWidth; - slog::debug << "output size vector " << outSizeVector.size() << slog::endl; - switch(outSizeVector.size()){ - case 3: - outChannels = 0; - outHeight = outSizeVector[1]; - outWidth = outSizeVector[2]; - break; - case 4: - outChannels = outSizeVector[1]; - outHeight = outSizeVector[2]; - outWidth = outSizeVector[3]; - break; - default: - throw std::runtime_error("Unexpected output blob shape. Only 4D and 3D output blobs are" - "supported."); - - } - if(outHeight == 0 || outWidth == 0){ - slog::err << "output_height or output_width is not set, please check the MaskOutput Info " - << "is set correctly." << slog::endl; - //throw std::runtime_error("output_height or output_width is not set, please check the MaskOutputInfo"); - return false; - } - - slog::debug << "output width " << outWidth<< slog::endl; - slog::debug << "output hEIGHT " << outHeight<< slog::endl; - slog::debug << "output CHANNALS " << outChannels<< slog::endl; - addOutputInfo("masks", (outputsDataMap.begin()++)->first); - addOutputInfo("detection", outputsDataMap.begin()->first); - - //const InferenceEngine::CNNLayerPtr output_layer = - //network.getLayerByName(outputsDataMap.begin()->first.c_str()); - ///const InferenceEngine::CNNLayerPtr output_layer = - /// network.getLayerByName(getOutputName("detection").c_str()); - //const int num_classes = output_layer->GetParamAsInt("num_classes"); - //slog::info << "Checking Object Segmentation output ... num_classes=" << num_classes << slog::endl; - -#if 0 - if (getLabels().size() != num_classes) - { - if (getLabels().size() == (num_classes - 1)) - { - getLabels().insert(getLabels().begin(), "fake"); - } - else - { - getLabels().clear(); - } - } -#endif -/* - const InferenceEngine::SizeVector output_dims = data.getTensorDesc().getDims(); - setMaxProposalCount(static_cast(output_dims[2])); - slog::info << "max proposal count is: " << getMaxProposalCount() << slog::endl; - auto object_size = static_cast(output_dims[3]); - setObjectSize(object_size); - - slog::debug << "model size" << output_dims.size() << slog::endl;*/ - printAttribute(); - slog::info << "This model is SSDNet-like, Layer Property updated!" << slog::endl; - return true; - -} diff --git a/dynamic_vino_lib/src/models/person_attribs_detection_model.cpp b/dynamic_vino_lib/src/models/person_attribs_detection_model.cpp deleted file mode 100644 index c12e4071..00000000 --- a/dynamic_vino_lib/src/models/person_attribs_detection_model.cpp +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (c) 2018 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * @brief a header file with declaration of PersonAttribsDetectionModel class - * @file person_attribs_detection_model.cpp - */ -#include -#include "dynamic_vino_lib/models/person_attribs_detection_model.hpp" -#include "dynamic_vino_lib/slog.hpp" -// Validated Person Attributes Detection Network -Models::PersonAttribsDetectionModel::PersonAttribsDetectionModel( - const std::string & label_loc, const std::string & model_loc, int max_batch_size) -: BaseModel(label_loc, model_loc, max_batch_size) {} - -bool Models::PersonAttribsDetectionModel::updateLayerProperty( - InferenceEngine::CNNNetwork& net_reader) -{ - slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; - InferenceEngine::InputsDataMap input_info_map( - net_reader.getInputsInfo()); - if (input_info_map.size() != 1) { - throw std::logic_error("Person Attribs topology should have only one input"); - } - InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; - input_info->setPrecision(InferenceEngine::Precision::U8); - input_info->getInputData()->setLayout(InferenceEngine::Layout::NCHW); - addInputInfo("input", input_info_map.begin()->first); - - slog::info << "Checking OUTPUTs for model " << getModelName() << slog::endl; - InferenceEngine::OutputsDataMap output_info_map( - net_reader.getOutputsInfo()); - if (output_info_map.size() != 3) { - throw std::logic_error("Person Attribs Network expects networks having 3 output"); - } - input_ = input_info_map.begin()->first; - output_ = output_info_map.begin()->first; - - auto output_iter = output_info_map.begin(); - InferenceEngine::DataPtr attribute_output_ptr = (output_iter++)->second; - InferenceEngine::DataPtr top_output_ptr = (output_iter++)->second; - InferenceEngine::DataPtr bottom_output_ptr = (output_iter++)->second; - - addOutputInfo("attributes_output_", attribute_output_ptr->getName()); - //output_gender_ = gender_output_ptr->name; - addOutputInfo("top_output_", top_output_ptr->getName()); - addOutputInfo("bottom_output_", bottom_output_ptr->getName()); - printAttribute(); - return true; -} - -const std::string Models::PersonAttribsDetectionModel::getModelCategory() const -{ - return "Person Attributes Detection"; -} diff --git a/dynamic_vino_lib/src/models/person_reidentification_model.cpp b/dynamic_vino_lib/src/models/person_reidentification_model.cpp deleted file mode 100644 index e9e2834c..00000000 --- a/dynamic_vino_lib/src/models/person_reidentification_model.cpp +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright (c) 2018 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * @brief a header file with declaration of PersonReidentificationModel class - * @file person_reidentification_model.cpp - */ -#include -#include "dynamic_vino_lib/models/person_reidentification_model.hpp" -#include "dynamic_vino_lib/slog.hpp" -// Validated Person Reidentification Network -Models::PersonReidentificationModel::PersonReidentificationModel( - const std::string & label_loc, const std::string & model_loc, int max_batch_size) -: BaseModel(label_loc, model_loc, max_batch_size) {} -/* -void Models::PersonReidentificationModel::setLayerProperty( - InferenceEngine::CNNNetReader::Ptr net_reader) -{ - // set input property - InferenceEngine::InputsDataMap input_info_map( - net_reader->getNetwork().getInputsInfo()); - InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; - input_info->setPrecision(InferenceEngine::Precision::U8); - input_info->getInputData()->setLayout(InferenceEngine::Layout::NCHW); - // set output property - InferenceEngine::OutputsDataMap output_info_map( - net_reader->getNetwork().getOutputsInfo()); - // set input and output layer name - input_ = input_info_map.begin()->first; - output_ = output_info_map.begin()->first; -} - -void Models::PersonReidentificationModel::checkLayerProperty( - const InferenceEngine::CNNNetReader::Ptr & net_reader) {} - -const std::string Models::PersonReidentificationModel::getModelCategory() const -{ - return "Person Reidentification"; -} -*/ -bool Models::PersonReidentificationModel::updateLayerProperty( - InferenceEngine::CNNNetwork& netreader) -{ - slog::info << "Checking Inputs for Model" << getModelName() << slog::endl; - - auto network = netreader; - - InferenceEngine::InputsDataMap input_info_map(network.getInputsInfo()); - - InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; - input_info->setPrecision(InferenceEngine::Precision::U8); - input_info->getInputData()->setLayout(InferenceEngine::Layout::NCHW); - // set output property - InferenceEngine::OutputsDataMap output_info_map( - network.getOutputsInfo()); - // set input and output layer name - input_ = input_info_map.begin()->first; - output_ = output_info_map.begin()->first; - - return true; -} - -const std::string Models::PersonReidentificationModel::getModelCategory() const -{ - return "Person Reidentification"; -} diff --git a/dynamic_vino_lib/src/outputs/rviz_output.cpp b/dynamic_vino_lib/src/outputs/rviz_output.cpp deleted file mode 100644 index a9778ccf..00000000 --- a/dynamic_vino_lib/src/outputs/rviz_output.cpp +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright (c) 2018 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * @brief a header file with declaration of RvizOutput class - * @file rviz_output.cpp - */ - -#include -#include -#include -#include -#include "cv_bridge/cv_bridge.h" -#include "dynamic_vino_lib/pipeline.hpp" -#include "dynamic_vino_lib/outputs/rviz_output.hpp" - -Outputs::RvizOutput::RvizOutput(std::string output_name, const rclcpp::Node::SharedPtr node) -: BaseOutput(output_name) -{ - if(node != nullptr){ - node_ = node; - } else { - node_ = rclcpp::Node::make_shared(output_name + "_image_publisher"); - } - image_topic_ = nullptr; - pub_image_ = node_->create_publisher( - "/openvino_toolkit/" + output_name_ + "/images", 16); - image_window_output_ = std::make_shared(output_name_, 950); -} - -void Outputs::RvizOutput::feedFrame(const cv::Mat & frame) -{ - image_window_output_->feedFrame(frame); -} - -void Outputs::RvizOutput::accept( - const std::vector & results) -{ - image_window_output_->accept(results); -} - -void Outputs::RvizOutput::accept( - const std::vector & results) -{ - image_window_output_->accept(results); -} - -void Outputs::RvizOutput::accept( - const std::vector & results) -{ - image_window_output_->accept(results); -} - -void Outputs::RvizOutput::accept( - const std::vector & results) -{ - image_window_output_->accept(results); -} - -void Outputs::RvizOutput::accept(const std::vector & results) -{ - image_window_output_->accept(results); -} - -void Outputs::RvizOutput::accept( - const std::vector & results) -{ - image_window_output_->accept(results); -} - -void Outputs::RvizOutput::accept( - const std::vector & results) -{ - image_window_output_->accept(results); -} - -void Outputs::RvizOutput::accept(const std::vector & results) -{ - image_window_output_->accept(results); -} - -void Outputs::RvizOutput::accept(const std::vector & results) -{ - image_window_output_->accept(results); -} - -void Outputs::RvizOutput::accept(const std::vector & results) -{ - image_window_output_->accept(results); -} - -void Outputs::RvizOutput::handleOutput() -{ - image_window_output_->setPipeline(getPipeline()); - image_window_output_->decorateFrame(); - cv::Mat frame = image_window_output_->getFrame(); - std_msgs::msg::Header header = - getPipeline()->getInputDevice()->getLockedHeader(); - std::shared_ptr cv_ptr = - std::make_shared(header, "bgr8", frame); - image_topic_ = cv_ptr->toImageMsg(); - pub_image_->publish(*image_topic_); -} diff --git a/pipeline_srv_msgs/CMakeLists.txt b/openvino_msgs/CMakeLists.txt similarity index 95% rename from pipeline_srv_msgs/CMakeLists.txt rename to openvino_msgs/CMakeLists.txt index b05b70bf..30b70b83 100644 --- a/pipeline_srv_msgs/CMakeLists.txt +++ b/openvino_msgs/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ cmake_minimum_required(VERSION 3.5) -project(pipeline_srv_msgs) +project(openvino_msgs) if(NOT CMAKE_CXX_STANDARD) set(CMAKE_CXX_STANDARD 14) diff --git a/pipeline_srv_msgs/msg/Connection.msg b/openvino_msgs/msg/Connection.msg similarity index 93% rename from pipeline_srv_msgs/msg/Connection.msg rename to openvino_msgs/msg/Connection.msg index 80f63a18..05fecda8 100644 --- a/pipeline_srv_msgs/msg/Connection.msg +++ b/openvino_msgs/msg/Connection.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/pipeline_srv_msgs/msg/Pipeline.msg b/openvino_msgs/msg/Pipeline.msg similarity index 94% rename from pipeline_srv_msgs/msg/Pipeline.msg rename to openvino_msgs/msg/Pipeline.msg index f2b7bab1..d4272961 100644 --- a/pipeline_srv_msgs/msg/Pipeline.msg +++ b/openvino_msgs/msg/Pipeline.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/pipeline_srv_msgs/msg/PipelineRequest.msg b/openvino_msgs/msg/PipelineRequest.msg similarity index 94% rename from pipeline_srv_msgs/msg/PipelineRequest.msg rename to openvino_msgs/msg/PipelineRequest.msg index 565ea4fc..0fc9e053 100644 --- a/pipeline_srv_msgs/msg/PipelineRequest.msg +++ b/openvino_msgs/msg/PipelineRequest.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/pipeline_srv_msgs/package.xml b/openvino_msgs/package.xml similarity index 96% rename from pipeline_srv_msgs/package.xml rename to openvino_msgs/package.xml index b78f2cfe..85d579d0 100644 --- a/pipeline_srv_msgs/package.xml +++ b/openvino_msgs/package.xml @@ -1,7 +1,7 @@ - pipeline_srv_msgs + openvino_msgs 0.9.0 A package containing pipeline service message definitions. Yang Lu diff --git a/pipeline_srv_msgs/srv/PipelineSrv.srv b/openvino_msgs/srv/PipelineSrv.srv similarity index 93% rename from pipeline_srv_msgs/srv/PipelineSrv.srv rename to openvino_msgs/srv/PipelineSrv.srv index fb23dec5..b72935b6 100644 --- a/pipeline_srv_msgs/srv/PipelineSrv.srv +++ b/openvino_msgs/srv/PipelineSrv.srv @@ -1,4 +1,4 @@ -# Copyright (c) 2017 Intel Corporation +# Copyright (c) 2017-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/vino_param_lib/CMakeLists.txt b/openvino_param_lib/CMakeLists.txt similarity index 94% rename from vino_param_lib/CMakeLists.txt rename to openvino_param_lib/CMakeLists.txt index c3cddd8f..8a78469a 100644 --- a/vino_param_lib/CMakeLists.txt +++ b/openvino_param_lib/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2017 Intel Corporation +# Copyright (c) 2017-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,7 +13,7 @@ # limitations under the License. cmake_minimum_required(VERSION 3.5) -project(vino_param_lib) +project(openvino_param_lib) find_package(ament_cmake REQUIRED) find_package(yaml_cpp_vendor REQUIRED) @@ -67,8 +67,8 @@ if(UNIX OR APPLE) # Generic flags. set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -fno-operator-names -Wformat -Wformat-security -Wall") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14") - # Dot not forward c++14 flag to GPU beucause it is not supported + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17") + # Dot not forward c++17 flag to GPU beucause it is not supported set(CUDA_PROPAGATE_HOST_FLAGS OFF) set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -D_FORTIFY_SOURCE=2") set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -pie") diff --git a/vino_param_lib/include/vino_param_lib/param_manager.hpp b/openvino_param_lib/include/openvino_param_lib/param_manager.hpp similarity index 89% rename from vino_param_lib/include/vino_param_lib/param_manager.hpp rename to openvino_param_lib/include/openvino_param_lib/param_manager.hpp index 8897ba84..75a4b133 100644 --- a/vino_param_lib/include/vino_param_lib/param_manager.hpp +++ b/openvino_param_lib/include/openvino_param_lib/param_manager.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,8 +16,8 @@ * @brief A header file with declaration for parameter management * @file param_manager.hpp */ -#ifndef VINO_PARAM_LIB__PARAM_MANAGER_HPP_ -#define VINO_PARAM_LIB__PARAM_MANAGER_HPP_ +#ifndef OPENVINO_PARAM_LIB__PARAM_MANAGER_HPP_ +#define OPENVINO_PARAM_LIB__PARAM_MANAGER_HPP_ #include #include @@ -46,7 +46,7 @@ class ParamManager // singleton * The instance will be created when first call. * @return The reference of paramManager class. */ - static ParamManager & getInstance() + static ParamManager& getInstance() { static ParamManager manager_; return manager_; @@ -65,9 +65,10 @@ class ParamManager // singleton std::string model; std::string model_type; std::string label; - int batch; + int batch = 1; float confidence_threshold = 0.5; bool enable_roi_constraint = false; + float nms_threshold = 0.45; }; struct FilterRawData @@ -125,7 +126,7 @@ class ParamManager // singleton * @param[in] name: the name of the pipeline to be retrieved. * @return The pipeline paratmeters, or throw a loginc error. */ - PipelineRawData getPipeline(const std::string & name) const; + PipelineRawData getPipeline(const std::string& name) const; /** * @brief Retrieve common parameters. @@ -140,12 +141,12 @@ class ParamManager // singleton ParamManager() { } - ParamManager(ParamManager const &); - void operator=(ParamManager const &); + ParamManager(ParamManager const&); + void operator=(ParamManager const&); std::vector pipelines_; CommonRawData common_; }; } // namespace Params -#endif // VINO_PARAM_LIB__PARAM_MANAGER_HPP_ +#endif // OPENVINO_PARAM_LIB__PARAM_MANAGER_HPP_ diff --git a/openvino_param_lib/include/openvino_param_lib/slog.hpp b/openvino_param_lib/include/openvino_param_lib/slog.hpp new file mode 100644 index 00000000..d1fb8db4 --- /dev/null +++ b/openvino_param_lib/include/openvino_param_lib/slog.hpp @@ -0,0 +1,177 @@ +// Copyright (c) 2018-2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a header file with logging facility for common samples + * @file slog.hpp + */ +#ifndef OPENVINO_WRAPPER_LIB__SLOG_HPP_ +#define OPENVINO_WRAPPER_LIB__SLOG_HPP_ + +#pragma once + +#include +#include + +namespace slog +{ +#if 1 +enum COLOR +{ + RESET = 0, + BLUE = 1, + GREEN = 2, + YELLOW = 3, + RED = 4, +}; + +#else +// the following are UBUNTU/LINUX ONLY terminal color codes. +#define RESET "\033[0m" +#define BLACK "\033[30m" /* Black */ +#define RED "\033[31m" /* Red */ +#define GREEN "\033[32m" /* Green */ +#define YELLOW "\033[33m" /* Yellow */ +#define BLUE "\033[34m" /* Blue */ +#define MAGENTA "\033[35m" /* Magenta */ +#define CYAN "\033[36m" /* Cyan */ +#define WHITE "\033[37m" /* White */ +#define BOLDBLACK "\033[1m\033[30m" /* Bold Black */ +#define BOLDRED "\033[1m\033[31m" /* Bold Red */ +#define BOLDGREEN "\033[1m\033[32m" /* Bold Green */ +#define BOLDYELLOW "\033[1m\033[33m" /* Bold Yellow */ +#define BOLDBLUE "\033[1m\033[34m" /* Bold Blue */ +#define BOLDMAGENTA "\033[1m\033[35m" /* Bold Magenta */ +#define BOLDCYAN "\033[1m\033[36m" /* Bold Cyan */ +#define BOLDWHITE "\033[1m\033[37m" /* Bold White */ +#endif + +/** + * @class LogStreamEndLine + * @brief The LogStreamEndLine class implements an end line marker for a log + * stream + */ +class LogStreamEndLine +{ +}; + +static constexpr LogStreamEndLine endl; + +/** + * @class LogStream + * @brief The LogStream class implements a stream for sample logging + */ +class LogStream +{ + std::string _prefix; + std::ostream* _log_stream; + bool _new_line; + int _color_id; + +public: + /** + * @brief A constructor. Creates an LogStream object + * @param prefix The prefix to print + */ + LogStream(const std::string& prefix, std::ostream& log_stream, const int color_id = -1) + : _prefix(prefix), _new_line(true), _color_id(color_id) + { + _log_stream = &log_stream; + } + + /** + * @brief A stream output operator to be used within the logger + * @param arg Object for serialization in the logger message + */ + template + LogStream& operator<<(const T& arg) + { + if (_new_line) { + setLineColor(); + (*_log_stream) << "[ " << _prefix << " ] "; + _new_line = false; + } + + (*_log_stream) << arg; + return *this; + } + + // Specializing for LogStreamEndLine to support slog::endl + LogStream& operator<<(const LogStreamEndLine& arg) + { + _new_line = true; + resetLineColor(); + (*_log_stream) << std::endl; + return *this; + } + + void setLineColor() + { + switch (_color_id) { + case BLUE: + (*_log_stream) << "\033[34m"; + break; + case GREEN: + (*_log_stream) << "\033[32m"; + break; + case YELLOW: + (*_log_stream) << "\033[33m"; + break; + case RED: + (*_log_stream) << "\033[31m"; + break; + default: + break; + } + } + + void resetLineColor() + { + if (_color_id > 0) { + (*_log_stream) << "\033[0m"; // RESET + } + } +}; + +class NullStream +{ +public: + NullStream() + { + } + + NullStream(const std::string& prefix, std::ostream& log_stream) + { + (void)prefix; + (void)log_stream; + } + + template + NullStream& operator<<(const T& arg) + { + return *this; + } +}; + +#ifdef LOG_LEVEL_DEBUG +static LogStream debug("DEBUG", std::cout, GREEN); +#else +static NullStream debug; +#endif +static LogStream info("INFO", std::cout, BLUE); +static LogStream warn("WARNING", std::cout, YELLOW); +static LogStream err("ERROR", std::cerr, RED); + +} // namespace slog +#endif // OPENVINO_WRAPPER_LIB__SLOG_HPP_ diff --git a/vino_param_lib/package.xml b/openvino_param_lib/package.xml similarity index 94% rename from vino_param_lib/package.xml rename to openvino_param_lib/package.xml index 982e42fa..6484dd58 100644 --- a/vino_param_lib/package.xml +++ b/openvino_param_lib/package.xml @@ -1,7 +1,7 @@ - vino_param_lib + openvino_param_lib 0.9.0 Library for ROS2 OpenVINO parameter management Weizhi Liu diff --git a/vino_param_lib/param/pipeline.yaml b/openvino_param_lib/param/pipeline.yaml similarity index 100% rename from vino_param_lib/param/pipeline.yaml rename to openvino_param_lib/param/pipeline.yaml diff --git a/vino_param_lib/src/param_manager.cpp b/openvino_param_lib/src/param_manager.cpp similarity index 64% rename from vino_param_lib/src/param_manager.cpp rename to openvino_param_lib/src/param_manager.cpp index dbc167da..6ad47587 100644 --- a/vino_param_lib/src/param_manager.cpp +++ b/openvino_param_lib/src/param_manager.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "vino_param_lib/param_manager.hpp" -#include +#include "openvino_param_lib/param_manager.hpp" +#include #include #include #include @@ -23,33 +23,28 @@ namespace Params { -void operator>>(const YAML::Node & node, ParamManager::PipelineRawData & pipeline); -void operator>>(const YAML::Node & node, std::vector & list); -void operator>>(const YAML::Node & node, ParamManager::InferenceRawData & infer); -void operator>>(const YAML::Node & node, std::vector & list); -void operator>>(const YAML::Node & node, std::multimap & connect); -void operator>>(const YAML::Node & node, std::vector & filters); -void operator>>(const YAML::Node & node, std::string & str); -void operator>>(const YAML::Node & node, bool & val); -void operator>>(const YAML::Node & node, int & val); -void operator>>(const YAML::Node & node, float & val); -void operator>>(const YAML::Node & node, ParamManager::CommonRawData & common); - -#define YAML_PARSE(node, key, val) \ - try \ - { \ - node[key] >> val; \ - } \ - catch (const YAML::Exception & e) \ - { \ - slog::warn << e.msg << slog::endl; \ - } \ - catch (...) \ - { \ - slog::warn << "Exception occurs when parsing string." << slog::endl; \ +void operator>>(const YAML::Node& node, ParamManager::PipelineRawData& pipeline); +void operator>>(const YAML::Node& node, std::vector& list); +void operator>>(const YAML::Node& node, ParamManager::InferenceRawData& infer); +void operator>>(const YAML::Node& node, std::vector& list); +void operator>>(const YAML::Node& node, std::multimap& connect); +void operator>>(const YAML::Node& node, std::vector& filters); +void operator>>(const YAML::Node& node, std::string& str); +void operator>>(const YAML::Node& node, bool& val); +void operator>>(const YAML::Node& node, int& val); +void operator>>(const YAML::Node& node, float& val); +void operator>>(const YAML::Node& node, ParamManager::CommonRawData& common); + +#define YAML_PARSE(node, key, val) \ + try { \ + node[key] >> val; \ + } catch (const YAML::Exception& e) { \ + slog::warn << e.msg << slog::endl; \ + } catch (...) { \ + slog::warn << "Exception occurs when parsing string." << slog::endl; \ } -void operator>>(const YAML::Node & node, std::vector & list) +void operator>>(const YAML::Node& node, std::vector& list) { slog::info << "Pipeline size: " << node.size() << slog::endl; for (unsigned i = 0; i < node.size(); i++) { @@ -59,7 +54,7 @@ void operator>>(const YAML::Node & node, std::vector>(const YAML::Node & node, ParamManager::CommonRawData & common) +void operator>>(const YAML::Node& node, ParamManager::CommonRawData& common) { YAML_PARSE(node, "camera_topic", common.camera_topic) YAML_PARSE(node, "custom_cpu_library", common.custom_cpu_library) @@ -67,7 +62,7 @@ void operator>>(const YAML::Node & node, ParamManager::CommonRawData & common) YAML_PARSE(node, "enable_performance_count", common.enable_performance_count) } -void operator>>(const YAML::Node & node, ParamManager::PipelineRawData & pipeline) +void operator>>(const YAML::Node& node, ParamManager::PipelineRawData& pipeline) { YAML_PARSE(node, "name", pipeline.name) YAML_PARSE(node, "inputs", pipeline.inputs) @@ -79,7 +74,7 @@ void operator>>(const YAML::Node & node, ParamManager::PipelineRawData & pipelin slog::info << "Pipeline Params:name=" << pipeline.name << slog::endl; } -void operator>>(const YAML::Node & node, std::vector & list) +void operator>>(const YAML::Node& node, std::vector& list) { slog::info << "Inferences size: " << node.size() << slog::endl; for (unsigned i = 0; i < node.size(); i++) { @@ -89,7 +84,7 @@ void operator>>(const YAML::Node & node, std::vector>(const YAML::Node & node, ParamManager::InferenceRawData & infer) +void operator>>(const YAML::Node& node, ParamManager::InferenceRawData& infer) { YAML_PARSE(node, "name", infer.name) YAML_PARSE(node, "model", infer.model) @@ -99,13 +94,14 @@ void operator>>(const YAML::Node & node, ParamManager::InferenceRawData & infer) YAML_PARSE(node, "batch", infer.batch) YAML_PARSE(node, "confidence_threshold", infer.confidence_threshold) YAML_PARSE(node, "enable_roi_constraint", infer.enable_roi_constraint) + YAML_PARSE(node, "nms_threshold", infer.nms_threshold) if (infer.model_type.size() == 0) { infer.model_type = "SSD"; } slog::info << "Inference Params:name=" << infer.name << slog::endl; } -void operator>>(const YAML::Node & node, std::vector & list) +void operator>>(const YAML::Node& node, std::vector& list) { for (unsigned i = 0; i < node.size(); i++) { std::string temp_i; @@ -114,7 +110,7 @@ void operator>>(const YAML::Node & node, std::vector & list) } } -void operator>>(const YAML::Node & node, std::multimap & connect) +void operator>>(const YAML::Node& node, std::multimap& connect) { for (unsigned i = 0; i < node.size(); i++) { std::string left; @@ -127,12 +123,12 @@ void operator>>(const YAML::Node & node, std::multimap } else { rights[i] >> right; } - connect.insert({left, right}); + connect.insert({ left, right }); } } } -void operator>>(const YAML::Node & node, std::vector & filters) +void operator>>(const YAML::Node& node, std::vector& filters) { for (unsigned i = 0; i < node.size(); i++) { std::string left; @@ -150,22 +146,22 @@ void operator>>(const YAML::Node & node, std::vector>(const YAML::Node & node, std::string & str) +void operator>>(const YAML::Node& node, std::string& str) { str = node.as(); } -void operator>>(const YAML::Node & node, bool & val) +void operator>>(const YAML::Node& node, bool& val) { val = node.as(); } -void operator>>(const YAML::Node & node, int & val) +void operator>>(const YAML::Node& node, int& val) { val = node.as(); } -void operator>>(const YAML::Node & node, float & val) +void operator>>(const YAML::Node& node, float& val) { val = node.as(); } @@ -173,33 +169,36 @@ void operator>>(const YAML::Node & node, float & val) void ParamManager::print() const { slog::info << "--------parameters DUMP---------------------" << slog::endl; - for (auto & pipeline : pipelines_) { + for (auto& pipeline : pipelines_) { slog::info << "Pipeline: " << pipeline.name << slog::endl; slog::info << "\tInputs: "; - for (auto & i : pipeline.inputs) { + for (auto& i : pipeline.inputs) { slog::info << i.c_str() << ", "; } slog::info << slog::endl; + slog::info << "\tInput_Meta: " << pipeline.input_meta << slog::endl; slog::info << "\tOutputs: "; - for (auto & i : pipeline.outputs) { + for (auto& i : pipeline.outputs) { slog::info << i.c_str() << ", "; } slog::info << slog::endl; slog::info << "\tInferences: " << slog::endl; - for (auto & infer : pipeline.infers) { + for (auto& infer : pipeline.infers) { slog::info << "\t\tName: " << infer.name << slog::endl; slog::info << "\t\tModel: " << infer.model << slog::endl; + slog::info << "\t\tModel-Type: " << infer.model_type << slog::endl; slog::info << "\t\tEngine: " << infer.engine << slog::endl; slog::info << "\t\tLabel: " << infer.label << slog::endl; slog::info << "\t\tBatch: " << infer.batch << slog::endl; slog::info << "\t\tConfidence_threshold: " << infer.confidence_threshold << slog::endl; slog::info << "\t\tEnable_roi_constraint: " << infer.enable_roi_constraint << slog::endl; + slog::info << "\t\tNMS_threshold: " << infer.nms_threshold << slog::endl; } slog::info << "\tConnections: " << slog::endl; - for (auto & c : pipeline.connects) { + for (auto& c : pipeline.connects) { slog::info << "\t\t" << c.first << "->" << c.second << slog::endl; } } @@ -228,16 +227,16 @@ void ParamManager::parse(std::string path) std::vector ParamManager::getPipelineNames() const { std::vector names; - for (auto & p : pipelines_) { + for (auto& p : pipelines_) { names.push_back(p.name); } return names; } -ParamManager::PipelineRawData ParamManager::getPipeline(const std::string & name) const +ParamManager::PipelineRawData ParamManager::getPipeline(const std::string& name) const { - for (auto & p : pipelines_) { + for (auto& p : pipelines_) { if (p.name == name) { return p; } diff --git a/people_msgs/CMakeLists.txt b/openvino_people_msgs/CMakeLists.txt similarity index 96% rename from people_msgs/CMakeLists.txt rename to openvino_people_msgs/CMakeLists.txt index 0500babb..d3257b00 100644 --- a/people_msgs/CMakeLists.txt +++ b/openvino_people_msgs/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ cmake_minimum_required(VERSION 3.5) -project(people_msgs) +project(openvino_people_msgs) if(NOT CMAKE_CXX_STANDARD) set(CMAKE_CXX_STANDARD 14) diff --git a/openvino_people_msgs/COLCON_IGNORE b/openvino_people_msgs/COLCON_IGNORE new file mode 100644 index 00000000..e69de29b diff --git a/people_msgs/msg/AgeGender.msg b/openvino_people_msgs/msg/AgeGender.msg similarity index 94% rename from people_msgs/msg/AgeGender.msg rename to openvino_people_msgs/msg/AgeGender.msg index ad02ad1a..8436e0e5 100644 --- a/people_msgs/msg/AgeGender.msg +++ b/openvino_people_msgs/msg/AgeGender.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/people_msgs/msg/AgeGenderStamped.msg b/openvino_people_msgs/msg/AgeGenderStamped.msg similarity index 87% rename from people_msgs/msg/AgeGenderStamped.msg rename to openvino_people_msgs/msg/AgeGenderStamped.msg index efa0c724..25217127 100644 --- a/people_msgs/msg/AgeGenderStamped.msg +++ b/openvino_people_msgs/msg/AgeGenderStamped.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,4 +13,4 @@ # limitations under the License. std_msgs/Header header -people_msgs/AgeGender[] objects +openvino_people_msgs/AgeGender[] objects diff --git a/people_msgs/msg/Emotion.msg b/openvino_people_msgs/msg/Emotion.msg similarity index 94% rename from people_msgs/msg/Emotion.msg rename to openvino_people_msgs/msg/Emotion.msg index 63f9b83f..af1fccb2 100644 --- a/people_msgs/msg/Emotion.msg +++ b/openvino_people_msgs/msg/Emotion.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/people_msgs/msg/EmotionsStamped.msg b/openvino_people_msgs/msg/EmotionsStamped.msg similarity index 87% rename from people_msgs/msg/EmotionsStamped.msg rename to openvino_people_msgs/msg/EmotionsStamped.msg index 1636fc02..78914367 100644 --- a/people_msgs/msg/EmotionsStamped.msg +++ b/openvino_people_msgs/msg/EmotionsStamped.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,4 +13,4 @@ # limitations under the License. std_msgs/Header header -people_msgs/Emotion[] emotions +openvino_people_msgs/Emotion[] emotions diff --git a/people_msgs/msg/HeadPose.msg b/openvino_people_msgs/msg/HeadPose.msg similarity index 94% rename from people_msgs/msg/HeadPose.msg rename to openvino_people_msgs/msg/HeadPose.msg index 11a717db..c757d8f7 100644 --- a/people_msgs/msg/HeadPose.msg +++ b/openvino_people_msgs/msg/HeadPose.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/people_msgs/msg/HeadPoseStamped.msg b/openvino_people_msgs/msg/HeadPoseStamped.msg similarity index 87% rename from people_msgs/msg/HeadPoseStamped.msg rename to openvino_people_msgs/msg/HeadPoseStamped.msg index 75d97828..de80904a 100644 --- a/people_msgs/msg/HeadPoseStamped.msg +++ b/openvino_people_msgs/msg/HeadPoseStamped.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,4 +13,4 @@ # limitations under the License. std_msgs/Header header -people_msgs/HeadPose[] headposes \ No newline at end of file +openvino_people_msgs/HeadPose[] headposes \ No newline at end of file diff --git a/people_msgs/msg/Landmark.msg b/openvino_people_msgs/msg/Landmark.msg similarity index 94% rename from people_msgs/msg/Landmark.msg rename to openvino_people_msgs/msg/Landmark.msg index 48513b54..1c0e24c4 100644 --- a/people_msgs/msg/Landmark.msg +++ b/openvino_people_msgs/msg/Landmark.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2017 Intel Corporation +# Copyright (c) 2017-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/people_msgs/msg/LandmarkStamped.msg b/openvino_people_msgs/msg/LandmarkStamped.msg similarity index 94% rename from people_msgs/msg/LandmarkStamped.msg rename to openvino_people_msgs/msg/LandmarkStamped.msg index 6da0b1c0..2b390576 100644 --- a/people_msgs/msg/LandmarkStamped.msg +++ b/openvino_people_msgs/msg/LandmarkStamped.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2017 Intel Corporation +# Copyright (c) 2017-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/people_msgs/msg/LicensePlate.msg b/openvino_people_msgs/msg/LicensePlate.msg similarity index 94% rename from people_msgs/msg/LicensePlate.msg rename to openvino_people_msgs/msg/LicensePlate.msg index 6ba97f8c..3f128920 100644 --- a/people_msgs/msg/LicensePlate.msg +++ b/openvino_people_msgs/msg/LicensePlate.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/people_msgs/msg/LicensePlateStamped.msg b/openvino_people_msgs/msg/LicensePlateStamped.msg similarity index 86% rename from people_msgs/msg/LicensePlateStamped.msg rename to openvino_people_msgs/msg/LicensePlateStamped.msg index fa4fbc75..04406dac 100644 --- a/people_msgs/msg/LicensePlateStamped.msg +++ b/openvino_people_msgs/msg/LicensePlateStamped.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,4 +13,4 @@ # limitations under the License. std_msgs/Header header -people_msgs/LicensePlate[] licenses \ No newline at end of file +openvino_people_msgs/LicensePlate[] licenses \ No newline at end of file diff --git a/people_msgs/msg/ObjectInMask.msg b/openvino_people_msgs/msg/ObjectInMask.msg similarity index 94% rename from people_msgs/msg/ObjectInMask.msg rename to openvino_people_msgs/msg/ObjectInMask.msg index aa981eef..b3e719f1 100644 --- a/people_msgs/msg/ObjectInMask.msg +++ b/openvino_people_msgs/msg/ObjectInMask.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2017 Intel Corporation +# Copyright (c) 2017-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/people_msgs/msg/ObjectsInMasks.msg b/openvino_people_msgs/msg/ObjectsInMasks.msg similarity index 94% rename from people_msgs/msg/ObjectsInMasks.msg rename to openvino_people_msgs/msg/ObjectsInMasks.msg index 28bd3eb2..f59a9f56 100644 --- a/people_msgs/msg/ObjectsInMasks.msg +++ b/openvino_people_msgs/msg/ObjectsInMasks.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2017 Intel Corporation +# Copyright (c) 2017-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/people_msgs/msg/PersonAttribute.msg b/openvino_people_msgs/msg/PersonAttribute.msg similarity index 94% rename from people_msgs/msg/PersonAttribute.msg rename to openvino_people_msgs/msg/PersonAttribute.msg index 12dd2793..3ad0b0a6 100644 --- a/people_msgs/msg/PersonAttribute.msg +++ b/openvino_people_msgs/msg/PersonAttribute.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2017 Intel Corporation +# Copyright (c) 2017-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/people_msgs/msg/PersonAttributeStamped.msg b/openvino_people_msgs/msg/PersonAttributeStamped.msg similarity index 94% rename from people_msgs/msg/PersonAttributeStamped.msg rename to openvino_people_msgs/msg/PersonAttributeStamped.msg index 0fb6dfcd..4738c51a 100644 --- a/people_msgs/msg/PersonAttributeStamped.msg +++ b/openvino_people_msgs/msg/PersonAttributeStamped.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2017 Intel Corporation +# Copyright (c) 2017-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/people_msgs/msg/PersonsStamped.msg b/openvino_people_msgs/msg/PersonsStamped.msg similarity index 78% rename from people_msgs/msg/PersonsStamped.msg rename to openvino_people_msgs/msg/PersonsStamped.msg index 2aae1a08..f8ee471f 100644 --- a/people_msgs/msg/PersonsStamped.msg +++ b/openvino_people_msgs/msg/PersonsStamped.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,6 +14,6 @@ std_msgs/Header header object_msgs/ObjectInBox[] faces -people_msgs/Emotion[] emotions -people_msgs/AgeGender[] agegenders -people_msgs/HeadPose[] headposes +openvino_people_msgs/Emotion[] emotions +openvino_people_msgs/AgeGender[] agegenders +openvino_people_msgs/HeadPose[] headposes diff --git a/people_msgs/msg/Reidentification.msg b/openvino_people_msgs/msg/Reidentification.msg similarity index 94% rename from people_msgs/msg/Reidentification.msg rename to openvino_people_msgs/msg/Reidentification.msg index 34cd1156..3e53b92a 100644 --- a/people_msgs/msg/Reidentification.msg +++ b/openvino_people_msgs/msg/Reidentification.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2017 Intel Corporation +# Copyright (c) 2017-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/people_msgs/msg/ReidentificationStamped.msg b/openvino_people_msgs/msg/ReidentificationStamped.msg similarity index 94% rename from people_msgs/msg/ReidentificationStamped.msg rename to openvino_people_msgs/msg/ReidentificationStamped.msg index cba3c33e..d5ebd9fd 100644 --- a/people_msgs/msg/ReidentificationStamped.msg +++ b/openvino_people_msgs/msg/ReidentificationStamped.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2017 Intel Corporation +# Copyright (c) 2017-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/people_msgs/msg/VehicleAttribs.msg b/openvino_people_msgs/msg/VehicleAttribs.msg similarity index 94% rename from people_msgs/msg/VehicleAttribs.msg rename to openvino_people_msgs/msg/VehicleAttribs.msg index 39b49696..ddc3718f 100644 --- a/people_msgs/msg/VehicleAttribs.msg +++ b/openvino_people_msgs/msg/VehicleAttribs.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/people_msgs/msg/VehicleAttribsStamped.msg b/openvino_people_msgs/msg/VehicleAttribsStamped.msg similarity index 86% rename from people_msgs/msg/VehicleAttribsStamped.msg rename to openvino_people_msgs/msg/VehicleAttribsStamped.msg index 3cdcd47e..4e97859a 100644 --- a/people_msgs/msg/VehicleAttribsStamped.msg +++ b/openvino_people_msgs/msg/VehicleAttribsStamped.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,4 +13,4 @@ # limitations under the License. std_msgs/Header header -people_msgs/VehicleAttribs[] vehicles +openvino_people_msgs/VehicleAttribs[] vehicles diff --git a/people_msgs/package.xml b/openvino_people_msgs/package.xml similarity index 96% rename from people_msgs/package.xml rename to openvino_people_msgs/package.xml index 2ed702fd..6e59ce10 100644 --- a/people_msgs/package.xml +++ b/openvino_people_msgs/package.xml @@ -1,7 +1,7 @@ - people_msgs + openvino_people_msgs 0.9.0 A package containing people message definitions. Weizhi Liu diff --git a/people_msgs/srv/AgeGenderSrv.srv b/openvino_people_msgs/srv/AgeGenderSrv.srv similarity index 93% rename from people_msgs/srv/AgeGenderSrv.srv rename to openvino_people_msgs/srv/AgeGenderSrv.srv index 30469660..1a4f0de1 100644 --- a/people_msgs/srv/AgeGenderSrv.srv +++ b/openvino_people_msgs/srv/AgeGenderSrv.srv @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/people_msgs/srv/EmotionSrv.srv b/openvino_people_msgs/srv/EmotionSrv.srv similarity index 93% rename from people_msgs/srv/EmotionSrv.srv rename to openvino_people_msgs/srv/EmotionSrv.srv index 836fbc38..da8f1c57 100644 --- a/people_msgs/srv/EmotionSrv.srv +++ b/openvino_people_msgs/srv/EmotionSrv.srv @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/people_msgs/srv/HeadPoseSrv.srv b/openvino_people_msgs/srv/HeadPoseSrv.srv similarity index 93% rename from people_msgs/srv/HeadPoseSrv.srv rename to openvino_people_msgs/srv/HeadPoseSrv.srv index 17a1eca4..5fd225d5 100644 --- a/people_msgs/srv/HeadPoseSrv.srv +++ b/openvino_people_msgs/srv/HeadPoseSrv.srv @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/people_msgs/srv/People.srv b/openvino_people_msgs/srv/People.srv similarity index 93% rename from people_msgs/srv/People.srv rename to openvino_people_msgs/srv/People.srv index 100142a0..c349b2e9 100644 --- a/people_msgs/srv/People.srv +++ b/openvino_people_msgs/srv/People.srv @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/dynamic_vino_lib/CMakeLists.txt b/openvino_wrapper_lib/CMakeLists.txt similarity index 85% rename from dynamic_vino_lib/CMakeLists.txt rename to openvino_wrapper_lib/CMakeLists.txt index 09adf376..131607a5 100644 --- a/dynamic_vino_lib/CMakeLists.txt +++ b/openvino_wrapper_lib/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2018-2020 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,38 +14,32 @@ cmake_minimum_required(VERSION 3.5) -project(dynamic_vino_lib) +project(openvino_wrapper_lib) #################################### -## to use C++14 -set(CMAKE_CXX_STANDARD 14) +## to use C++17 +set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD_REQUIRED ON) -set(CMAKE_CXX_FLAGS "-std=c++14 ${CMAKE_CXX_FLAGS}") +set(CMAKE_CXX_FLAGS "-std=c++17 ${CMAKE_CXX_FLAGS}") #################################### #################################### -## by default, new InferenceEngine API (InferenceEngine::Core) is used. -## If the deprecated InferenceEngine API (InferenceEngine::InferencePlugin) -## then, uncomment below line -## add_definitions(-DUSE_OLD_E_PLUGIN_API) +## by default, new OpenVINO API (ov::core) is used. #################################### #################################### ## to get verbose log, ## then, uncomment below line -add_definitions(-DLOG_LEVEL_DEBUG) +#add_definitions(-DLOG_LEVEL_DEBUG) #################################### -# environment variable InferenceEngine_DIR can be use instead of relaive path to specify location of configuration file -#set(InferenceEngine_DIR /opt/intel/computer_vision_sdk_2018.2.299/deployment_tools/inference_engine/share) -#set(OpenCV_DIR /opt/intel/computer_vision_sdk_2018.2.299/opencv/share/OpenCV) - +# environment variable OpenVINO_DIR can be use instead of relaive path to specify location of configuration file -message(STATUS "Looking for inference engine configuration file at: ${CMAKE_PREFIX_PATH}") -find_package(InferenceEngine REQUIRED) -if(NOT InferenceEngine_FOUND) +find_package(OpenVINO REQUIRED) +if(NOT OpenVINO_FOUND) message(FATAL_ERROR "") endif() +set(OpenVINO_LIBRARIES openvino::runtime) # Find OpenCV libray if exists find_package(OpenCV REQUIRED) @@ -68,20 +62,12 @@ find_package(rmw REQUIRED) find_package(std_msgs REQUIRED) find_package(sensor_msgs REQUIRED) find_package(object_msgs REQUIRED) -find_package(people_msgs REQUIRED) -find_package(pipeline_srv_msgs REQUIRED) +find_package(openvino_msgs REQUIRED) find_package(class_loader REQUIRED) find_package(cv_bridge REQUIRED) -find_package(vino_param_lib REQUIRED) +find_package(openvino_param_lib REQUIRED) find_package(yaml_cpp_vendor REQUIRED) -################### -#To be deleted -#set(CpuExtension_lib $ENV{CPU_EXTENSION_LIB}) -#add_library(cpu_extension SHARED IMPORTED) -#set_target_properties(cpu_extension PROPERTIES -# IMPORTED_LOCATION $ENV{CPU_EXTENSION_LIB}) - if("${CMAKE_BUILD_TYPE}" STREQUAL "") message(STATUS "CMAKE_BUILD_TYPE not defined, 'Release' will be used") set(CMAKE_BUILD_TYPE "Release") @@ -175,17 +161,15 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unused-parameter -Wno-deprecated-de # Properties->C/C++->General->Additional Include Directories include_directories( - # ${CMAKE_CURRENT_SOURCE_DIR}/common/format_reader ${CMAKE_CURRENT_SOURCE_DIR}/include - ${InferenceEngine_INCLUDE_DIRS} - #${realsense2_INCLUDE_DIRS} + ${OpenVINO_INCLUDE_DIRS} ) if(UNIX) set(LIB_DL dl) endif() -set(DEPENDENCIES ${realsense2_LIBRARY} ${OpenCV_LIBS} ${InferenceEngine_LIBRARIES}) +set(DEPENDENCIES ${realsense2_LIBRARY} ${OpenCV_LIBS} openvino::runtime) add_library(${PROJECT_NAME} SHARED src/services/pipeline_processing_server.cpp @@ -204,6 +188,8 @@ add_library(${PROJECT_NAME} SHARED src/inferences/object_detection.cpp src/inferences/head_pose_detection.cpp src/inferences/object_segmentation.cpp + src/inferences/object_segmentation_maskrcnn.cpp + src/inferences/object_segmentation_instance.cpp src/inferences/person_reidentification.cpp src/inferences/person_attribs_detection.cpp #src/inferences/landmarks_detection.cpp @@ -223,6 +209,9 @@ add_library(${PROJECT_NAME} SHARED src/models/face_detection_model.cpp src/models/head_pose_detection_model.cpp src/models/object_segmentation_model.cpp + src/models/object_segmentation_maskrcnn_model.cpp + src/models/object_segmentation_instance_model.cpp + src/models/object_segmentation_instance_maskrcnn_model.cpp src/models/person_reidentification_model.cpp src/models/person_attribs_detection_model.cpp #src/models/landmarks_detection_model.cpp @@ -230,7 +219,8 @@ add_library(${PROJECT_NAME} SHARED src/models/vehicle_attribs_detection_model.cpp src/models/license_plate_detection_model.cpp src/models/object_detection_ssd_model.cpp - src/models/object_detection_yolov2_model.cpp + src/models/object_detection_yolov5_model.cpp + src/models/object_detection_yolov8_model.cpp src/outputs/image_window_output.cpp src/outputs/ros_topic_output.cpp src/outputs/rviz_output.cpp @@ -246,13 +236,12 @@ ament_target_dependencies(${PROJECT_NAME} "std_msgs" "sensor_msgs" "object_msgs" - "people_msgs" - "pipeline_srv_msgs" + "openvino_msgs" "ament_index_cpp" "class_loader" "realsense2" "cv_bridge" - "vino_param_lib" + "openvino_param_lib" "yaml_cpp_vendor" ) diff --git a/dynamic_vino_lib/Doxyfile b/openvino_wrapper_lib/Doxyfile similarity index 100% rename from dynamic_vino_lib/Doxyfile rename to openvino_wrapper_lib/Doxyfile diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/engines/engine.hpp similarity index 74% rename from dynamic_vino_lib/include/dynamic_vino_lib/engines/engine.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/engines/engine.hpp index fbf974d4..ab5bec85 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/engines/engine.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,13 +16,13 @@ * @brief A header file with declaration for Inference Engine class * @file engine.hpp */ -#ifndef DYNAMIC_VINO_LIB__ENGINES__ENGINE_HPP_ -#define DYNAMIC_VINO_LIB__ENGINES__ENGINE_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__ENGINES__ENGINE_HPP_ +#define OPENVINO_WRAPPER_LIB__ENGINES__ENGINE_HPP_ #pragma once -#include "dynamic_vino_lib/models/base_model.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/models/base_model.hpp" +#include "openvino/openvino.hpp" namespace Engines { @@ -35,7 +35,7 @@ namespace Engines class Engine { public: -#if(defined(USE_OLD_E_PLUGIN_API)) +#if (defined(USE_OLD_E_PLUGIN_API)) /** * DEPRECATED! instead of using Engine(InferenceEngine::InferRequest::Ptr &) * @brief Create an NetworkEngine instance @@ -47,12 +47,12 @@ class Engine /** * @brief Using an Inference Request to initialize the inference Engine. */ - Engine(InferenceEngine::InferRequest::Ptr &); + Engine(ov::InferRequest&); /** * @brief Get the inference request this instance holds. * @return The inference request this instance holds. */ - inline InferenceEngine::InferRequest::Ptr & getRequest() + inline ov::InferRequest& getRequest() { return request_; } @@ -61,15 +61,15 @@ class Engine * @param[in] callbackToSet A lambda function as callback function. * The callback function will be called when request is finished. */ - template - void setCompletionCallback(const T & callbackToSet) + template + void setCompletionCallback(const T& callbackToSet) { - request_->SetCompletionCallback(callbackToSet); + request_.set_callback(callbackToSet); } private: - InferenceEngine::InferRequest::Ptr request_ = nullptr; + ov::InferRequest request_; }; } // namespace Engines -#endif // DYNAMIC_VINO_LIB__ENGINES__ENGINE_HPP_ +#endif // OPENVINO_WRAPPER_LIB__ENGINES__ENGINE_HPP_ diff --git a/openvino_wrapper_lib/include/openvino_wrapper_lib/engines/engine_manager.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/engines/engine_manager.hpp new file mode 100644 index 00000000..58ccaa76 --- /dev/null +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/engines/engine_manager.hpp @@ -0,0 +1,57 @@ +// Copyright (c) 2018-2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief A header file with declaration for NetworkEngine class + * @file engine.h + */ +#ifndef OPENVINO_WRAPPER_LIB__ENGINES__ENGINE_MANAGER_HPP_ +#define OPENVINO_WRAPPER_LIB__ENGINES__ENGINE_MANAGER_HPP_ + +#pragma once + +#include "openvino_wrapper_lib/models/base_model.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino/openvino.hpp" + +namespace Engines +{ +/** + * @class EngineManager + * @brief This class is used to create and manage Inference engines. + */ +class EngineManager +{ +public: + /** + * @brief Create OpenVINO instance by given Engine Name and Network. + * @return The shared pointer of created Engine instance. + */ + std::shared_ptr createEngine(const std::string&, const std::shared_ptr&); + +private: +#if (defined(USE_OLD_E_PLUGIN_API)) + std::map plugins_for_devices_; + std::unique_ptr makePluginByName(const std::string& device_name, + const std::string& custom_cpu_library_message, + const std::string& custom_cldnn_message, + bool performance_message); + std::shared_ptr createEngine_beforeV2019R2(const std::string&, const std::shared_ptr&); +#endif + + std::shared_ptr createEngine_V2022(const std::string&, const std::shared_ptr&); +}; +} // namespace Engines + +#endif // OPENVINO_WRAPPER_LIB__ENGINES__ENGINE_MANAGER_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/age_gender_detection.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/age_gender_detection.hpp similarity index 75% rename from dynamic_vino_lib/include/dynamic_vino_lib/inferences/age_gender_detection.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/age_gender_detection.hpp index 130041a5..9b9cfaeb 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/age_gender_detection.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/age_gender_detection.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,26 +16,25 @@ * @brief A header file with declaration for AgeGenderDetection Class * @file age_gender_recignition.h */ -#ifndef DYNAMIC_VINO_LIB__INFERENCES__AGE_GENDER_DETECTION_HPP_ -#define DYNAMIC_VINO_LIB__INFERENCES__AGE_GENDER_DETECTION_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INFERENCES__AGE_GENDER_DETECTION_HPP_ +#define OPENVINO_WRAPPER_LIB__INFERENCES__AGE_GENDER_DETECTION_HPP_ -#include -#include +#include +#include #include #include #include - -#include "dynamic_vino_lib/engines/engine.hpp" -#include "dynamic_vino_lib/inferences/base_inference.hpp" -#include "dynamic_vino_lib/models/age_gender_detection_model.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino_wrapper_lib/inferences/base_inference.hpp" +#include "openvino_wrapper_lib/models/age_gender_detection_model.hpp" +#include "openvino/openvino.hpp" #include "opencv2/opencv.hpp" namespace Outputs { class BaseOuput; } -namespace dynamic_vino_lib +namespace openvino_wrapper_lib { /** * @class AgeGenderResult @@ -44,7 +43,7 @@ namespace dynamic_vino_lib class AgeGenderResult : public Result { public: - explicit AgeGenderResult(const cv::Rect & location); + explicit AgeGenderResult(const cv::Rect& location); /** * @brief Get the age of the detected person from the result. * @return The predictea age. @@ -75,7 +74,7 @@ class AgeGenderResult : public Result class AgeGenderDetection : public BaseInference { public: - using Result = dynamic_vino_lib::AgeGenderResult; + using Result = openvino_wrapper_lib::AgeGenderResult; AgeGenderDetection(); ~AgeGenderDetection() override; /** @@ -90,7 +89,7 @@ class AgeGenderDetection : public BaseInference * to the frame generated by the input device. * @return Whether this operation is successful. */ - bool enqueue(const cv::Mat & frame, const cv::Rect &) override; + bool enqueue(const cv::Mat& frame, const cv::Rect&) override; /** * @brief Start inference for all buffered frames. * @return Whether this operation is successful. @@ -113,7 +112,7 @@ class AgeGenderDetection : public BaseInference * to the frame generated by the input device. * @param[in] idx The index of the result. */ - const dynamic_vino_lib::Result * getLocationResult(int idx) const override; + const openvino_wrapper_lib::Result* getLocationResult(int idx) const override; /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. @@ -123,15 +122,14 @@ class AgeGenderDetection : public BaseInference * @brief Show the observed detection result either through image window * or ROS topic. */ - void observeOutput(const std::shared_ptr & output) override; + void observeOutput(const std::shared_ptr& output) override; - const std::vector getFilteredROIs( - const std::string filter_conditions) const override; + const std::vector getFilteredROIs(const std::string filter_conditions) const override; private: std::shared_ptr valid_model_; std::vector results_; }; -} // namespace dynamic_vino_lib +} // namespace openvino_wrapper_lib -#endif // DYNAMIC_VINO_LIB__INFERENCES__AGE_GENDER_DETECTION_HPP_ +#endif // OPENVINO_WRAPPER_LIB__INFERENCES__AGE_GENDER_DETECTION_HPP_ diff --git a/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/base_filter.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/base_filter.hpp new file mode 100644 index 00000000..e8d04ff1 --- /dev/null +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/base_filter.hpp @@ -0,0 +1,194 @@ +// Copyright (c) 2018-2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief A header file with declaration for BaseFilter Class + * @file base_filter.hpp + */ +#ifndef OPENVINO_WRAPPER_LIB__INFERENCES__BASE_FILTER_HPP_ +#define OPENVINO_WRAPPER_LIB__INFERENCES__BASE_FILTER_HPP_ + +#include +#include +#include +#include +#include "openvino_wrapper_lib/inferences/base_inference.hpp" + +namespace openvino_wrapper_lib +{ + +/** + * @class BaseFilter + * @brief Base class for result filter. + */ +class BaseFilter +{ +public: + BaseFilter(); + /** + * @brief Initiate a result filter. + */ + virtual void init() = 0; + + /** + * @brief Get the filtered results' ROIs. + * @return The filtered ROIs. + */ + virtual std::vector getFilteredLocations() = 0; + + /** + * @brief Check if the filter conditions is valid for filtering. + * @param[in] Filter conditions. + * @return true if some of the conditions are valid, otherwise false. + */ + bool isValidFilterConditions(const std::string&); + + /** + * @brief Accept the filter conditions for filtering. + * @param[in] Filter conditions. + */ + void acceptFilterConditions(const std::string&); + + /** + * @brief Decide whether the input string is a relational operator or not. + * @param[in] A string to be decided. + * @return True if the input string is a relational operator, false if not. + */ + bool isRelationOperator(const std::string&); + + /** + * @brief Decide whether the input string is a logic operator or not. + * @param[in] A string to be decided. + * @return True if the input string is a logic operator, false if not. + */ + bool isLogicOperator(const std::string&); + + /** + * @brief Decide whether the an operator has a higher priority than anthor. + * @param[in] The two operators. + * @return True if the first operator has higher priority, false if not. + */ + bool isPriorTo(const std::string&, const std::string&); + + /** + * @brief Convert the input bool variable to a string type. + * @param[in] A bool type to be converted. + * @return A converted string result. + */ + std::string boolToStr(bool); + + /** + * @brief Convert the input string variable to a bool type. + * @param[in] A string type to be converted. + * @return A converted bool result. + */ + bool strToBool(const std::string&); + + /** + * @brief Get the filter conditions in the suffix order. + * @return A vector with suffix-order filter conditions. + */ + const std::vector& getSuffixConditions() const; + + /** + * @brief Do logic operation with the given bool values and the operator. + * @param[in] A bool string, an logic operator, the other bool string. + * @return The logic operation result. + */ + bool logicOperation(const std::string&, const std::string&, const std::string&); + + /** + * @brief Compare two strings with a given relational operator. + * @param[in] A string, an relational operator, the other string. + * @return True if valid, false if not. + */ + static bool stringCompare(const std::string&, const std::string&, const std::string&); + + /** + * @brief Compare two floats with a given relational operator. + * @param[in] A float number, an relational operator, the other float number. + * @return True if valid, false if not. + */ + static bool floatCompare(float, const std::string&, float); + + /** + * @brief Convert a string into a float number. + * @param[in] A string to be converted. + * @return The converted float number, 0 if string is invalid. + */ + static float stringToFloat(const std::string&); + +/** + * @brief A macro to decide whether a given result satisfies the filter condition. + * @param[in] A key to function mapping, a given result. + * @return True if valid, false if not. + */ +#define ISVALIDRESULT(key_to_function, result) \ + { \ + std::vector suffix_conditons = getSuffixConditions(); \ + std::stack result_stack; \ + for (auto elem : suffix_conditons) { \ + if (!isRelationOperator(elem) && !isLogicOperator(elem)) { \ + result_stack.push(elem); \ + } else { \ + try { \ + std::string str1 = result_stack.top(); \ + result_stack.pop(); \ + std::string str2 = result_stack.top(); \ + result_stack.pop(); \ + if (key_to_function.count(str2)) { \ + result_stack.push(boolToStr(key_to_function[str2](result, elem, str1))); \ + } else { \ + result_stack.push(boolToStr(logicOperation(str1, elem, str2))); \ + } \ + } catch (...) { \ + slog::err << "Invalid filter conditions format!" << slog::endl; \ + } \ + } \ + } \ + if (result_stack.empty()) { \ + return true; \ + } \ + return strToBool(result_stack.top()); \ + } + +private: + /** + * @brief Parse the filter conditions and stores it into a vector. + * @param[in] A string form filter conditions. + * @return The vector form filter conditions. + */ + std::vector split(const std::string& filter_conditions); + + /** + * @brief Convert the infix expression into suffix expression. + * @param[in] The infix form filter conditions. + */ + void infixToSuffix(std::vector& infix_conditions); + + /** + * @brief Strip the extra space in a string. + * @param[in] A string to be striped. + * @return The striped string. + */ + std::string strip(const std::string& str); + + std::string striped_conditions_ = ""; + std::vector suffix_conditons_; + std::vector relation_operators_ = { "==", "!=", "<=", ">=", "<", ">" }; + std::vector logic_operators_ = { "&&", "||" }; +}; +} // namespace openvino_wrapper_lib + +#endif // OPENVINO_WRAPPER_LIB__INFERENCES__BASE_FILTER_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_inference.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/base_inference.hpp similarity index 72% rename from dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_inference.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/base_inference.hpp index 8e830764..981f4185 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_inference.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/base_inference.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,17 +16,16 @@ * @brief A header file with declaration for BaseInference Class * @file base_inference.h */ -#ifndef DYNAMIC_VINO_LIB__INFERENCES__BASE_INFERENCE_HPP_ -#define DYNAMIC_VINO_LIB__INFERENCES__BASE_INFERENCE_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INFERENCES__BASE_INFERENCE_HPP_ +#define OPENVINO_WRAPPER_LIB__INFERENCES__BASE_INFERENCE_HPP_ #include #include #include - -#include "dynamic_vino_lib/engines/engine.hpp" -#include "dynamic_vino_lib/models/base_model.hpp" -#include "dynamic_vino_lib/slog.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino_wrapper_lib/models/base_model.hpp" +#include "openvino_wrapper_lib/slog.hpp" +#include "openvino/openvino.hpp" #include "opencv2/opencv.hpp" namespace Outputs @@ -40,16 +39,14 @@ class BaseOutput; * @param[in] scale_factor Scale factor for loading. * @param[in] batch_index Indicates the batch index for the frame. */ -template -void matU8ToBlob( - const cv::Mat & orig_image, InferenceEngine::Blob::Ptr & blob, - float scale_factor = 1.0, int batch_index = 0) +template +void matU8ToBlob(const cv::Mat& orig_image, ov::Tensor& input_tensor, float scale_factor = 1.0, int batch_index = 0) { - InferenceEngine::SizeVector blob_size = blob->getTensorDesc().getDims(); + ov::Shape blob_size = input_tensor.get_shape(); const size_t width = blob_size[3]; const size_t height = blob_size[2]; const size_t channels = blob_size[1]; - T * blob_data = blob->buffer().as(); + T* blob_data = input_tensor.data(); cv::Mat resized_image(orig_image); if (width != orig_image.size().width || height != orig_image.size().height) { @@ -61,13 +58,13 @@ void matU8ToBlob( for (size_t h = 0; h < height; h++) { for (size_t w = 0; w < width; w++) { blob_data[batchOffset + c * width * height + h * width + w] = - resized_image.at(h, w)[c] * scale_factor; + resized_image.at(h, w)[c] * scale_factor; } } } } -namespace dynamic_vino_lib +namespace openvino_wrapper_lib { /** * @class Result @@ -77,7 +74,7 @@ class Result { public: friend class BaseInference; - explicit Result(const cv::Rect & location); + explicit Result(const cv::Rect& location); inline const cv::Rect getLocation() const { return location_; @@ -127,7 +124,7 @@ class BaseInference return max_batch_size_; } - inline void setMaxBatchSize(int max) + inline void setMaxBatchSize(int max) { max_batch_size_ = max; } @@ -140,7 +137,7 @@ class BaseInference * to the frame generated by the input device. * @return Whether this operation is successful. */ - virtual bool enqueue(const cv::Mat & frame, const cv::Rect & input_frame_loc) = 0; + virtual bool enqueue(const cv::Mat& frame, const cv::Rect& input_frame_loc) = 0; /** * @brief Start inference for all buffered frames. * @return Whether this operation is successful. @@ -148,7 +145,7 @@ class BaseInference virtual bool submitRequest(); virtual bool SynchronousRequest(); - virtual void observeOutput(const std::shared_ptr & output) = 0; + virtual void observeOutput(const std::shared_ptr& output) = 0; /** * @brief This function will fetch the results of the previous inference and @@ -166,36 +163,34 @@ class BaseInference * to the frame generated by the input device. * @param[in] idx The index of the result. */ - virtual const dynamic_vino_lib::Result * getLocationResult(int idx) const = 0; + virtual const openvino_wrapper_lib::Result* getLocationResult(int idx) const = 0; /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. */ virtual const std::string getName() const = 0; - virtual const std::vector getFilteredROIs( - const std::string filter_conditions) const = 0; + virtual const std::vector getFilteredROIs(const std::string filter_conditions) const = 0; void addCandidatedModel(std::shared_ptr model); protected: /** - * @brief Enqueue the fram into the input blob of the target calculation - * device. Check OpenVINO document for detailed information. - * @return Whether this operation is successful. - */ - template - bool enqueue( - const cv::Mat & frame, const cv::Rect &, float scale_factor, int batch_index, - const std::string & input_name) + * @brief Enqueue the fram into the input blob of the target calculation + * device. Check OpenVINO document for detailed information. + * @return Whether this operation is successful. + */ + template + bool enqueue(const cv::Mat& frame, const cv::Rect&, float scale_factor, int batch_index, + const std::string& input_name) { if (enqueued_frames_ == max_batch_size_) { - slog::warn << "Number of " << getName() << "input more than maximum(" << max_batch_size_ << - ") processed by inference" << slog::endl; + slog::warn << "Number of " << getName() << "input more than maximum(" << max_batch_size_ + << ") processed by inference" << slog::endl; return false; } - InferenceEngine::Blob::Ptr input_blob = engine_->getRequest()->GetBlob(input_name); - matU8ToBlob(frame, input_blob, scale_factor, batch_index); + ov::Tensor input_tensor = engine_->getRequest().get_tensor(input_name); + matU8ToBlob(frame, input_tensor, scale_factor, batch_index); enqueued_frames_ += 1; return true; } @@ -209,6 +204,6 @@ class BaseInference int enqueued_frames_ = 0; bool results_fetched_ = false; }; -} // namespace dynamic_vino_lib +} // namespace openvino_wrapper_lib -#endif // DYNAMIC_VINO_LIB__INFERENCES__BASE_INFERENCE_HPP_ +#endif // OPENVINO_WRAPPER_LIB__INFERENCES__BASE_INFERENCE_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_reidentification.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/base_reidentification.hpp similarity index 77% rename from dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_reidentification.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/base_reidentification.hpp index 17fd90f0..8be3c057 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_reidentification.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/base_reidentification.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,8 +16,8 @@ * @brief A header file with declaration for BaseReidentification Class * @file base_reidentification.hpp */ -#ifndef DYNAMIC_VINO_LIB__INFERENCES__BASE_REIDENTIFICATION_HPP_ -#define DYNAMIC_VINO_LIB__INFERENCES__BASE_REIDENTIFICATION_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INFERENCES__BASE_REIDENTIFICATION_HPP_ +#define OPENVINO_WRAPPER_LIB__INFERENCES__BASE_REIDENTIFICATION_HPP_ #include #include #include @@ -26,7 +26,7 @@ #include // namespace -namespace dynamic_vino_lib +namespace openvino_wrapper_lib { /** * @class Tracker @@ -41,7 +41,7 @@ class Tracker * @param[in] feature The new detected track feature. * @return The detected track ID. */ - int processNewTrack(const std::vector & feature); + int processNewTrack(const std::vector& feature); private: /** @@ -50,13 +50,13 @@ class Tracker * @param[in] most similar track's ID to be recorded. * @return similarity with the most similar track. */ - double findMostSimilarTrack(const std::vector & feature, int & most_similar_id); + double findMostSimilarTrack(const std::vector& feature, int& most_similar_id); /** * @brief Update the matched track's feature by the new track. * @param[in] track_id The matched track ID. * @param[in] feature The matched track's feature */ - void updateMatchTrack(int track_id, const std::vector & feature); + void updateMatchTrack(int track_id, const std::vector& feature); /** * @brief Remove the earlest track from the recorded tracks. */ @@ -66,13 +66,12 @@ class Tracker * @param[in] feature A track's feature. * @return new added track's ID. */ - int addNewTrack(const std::vector & feature); + int addNewTrack(const std::vector& feature); /** * @brief Calculate the cosine similarity between two features. * @return The simlarity result. */ - double calcSimilarity( - const std::vector & feature_a, const std::vector & feature_b); + double calcSimilarity(const std::vector& feature_a, const std::vector& feature_b); /** * @brief get the current millisecond count since epoch. * @return millisecond count since epoch. @@ -96,5 +95,5 @@ class Tracker std::unordered_map recorded_tracks_; }; -} // namespace dynamic_vino_lib -#endif // DYNAMIC_VINO_LIB__INFERENCES__BASE_REIDENTIFICATION_HPP_ +} // namespace openvino_wrapper_lib +#endif // OPENVINO_WRAPPER_LIB__INFERENCES__BASE_REIDENTIFICATION_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/emotions_detection.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/emotions_detection.hpp similarity index 76% rename from dynamic_vino_lib/include/dynamic_vino_lib/inferences/emotions_detection.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/emotions_detection.hpp index a493c3f8..130c1dbd 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/emotions_detection.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/emotions_detection.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,24 +15,24 @@ * @brief A header file with declaration for EmotionsDetection Class * @file emotions_detection.hpp */ -#ifndef DYNAMIC_VINO_LIB__INFERENCES__EMOTIONS_DETECTION_HPP_ -#define DYNAMIC_VINO_LIB__INFERENCES__EMOTIONS_DETECTION_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INFERENCES__EMOTIONS_DETECTION_HPP_ +#define OPENVINO_WRAPPER_LIB__INFERENCES__EMOTIONS_DETECTION_HPP_ #include #include #include -#include "dynamic_vino_lib/engines/engine.hpp" -#include "dynamic_vino_lib/inferences/base_inference.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino_wrapper_lib/inferences/base_inference.hpp" +#include "openvino/openvino.hpp" #include "opencv2/opencv.hpp" -#include "dynamic_vino_lib/models/emotion_detection_model.hpp" +#include "openvino_wrapper_lib/models/emotion_detection_model.hpp" namespace Outputs { class BaseOuput; } -namespace dynamic_vino_lib +namespace openvino_wrapper_lib { /** * @class EmotionResult @@ -42,7 +42,7 @@ class EmotionsResult : public Result { public: friend class EmotionsDetection; - explicit EmotionsResult(const cv::Rect & location); + explicit EmotionsResult(const cv::Rect& location); /** * @brief Get the emotion label of the detected person. * @return The predictea emotion label. @@ -64,7 +64,7 @@ class EmotionsResult : public Result class EmotionsDetection : public BaseInference { public: - using Result = dynamic_vino_lib::EmotionsResult; + using Result = openvino_wrapper_lib::EmotionsResult; EmotionsDetection(); ~EmotionsDetection() override; /** @@ -79,7 +79,7 @@ class EmotionsDetection : public BaseInference * to the frame generated by the input device. * @return Whether this operation is successful. */ - bool enqueue(const cv::Mat &, const cv::Rect &) override; + bool enqueue(const cv::Mat&, const cv::Rect&) override; /** * @brief Start inference for all buffered frames. * @return Whether this operation is successful. @@ -102,7 +102,7 @@ class EmotionsDetection : public BaseInference * to the frame generated by the input device. * @param[in] idx The index of the result. */ - const dynamic_vino_lib::Result * getLocationResult(int idx) const override; + const openvino_wrapper_lib::Result* getLocationResult(int idx) const override; /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. @@ -112,19 +112,18 @@ class EmotionsDetection : public BaseInference * @brief Show the observed detection result either through image window * or ROS topic. */ - void observeOutput(const std::shared_ptr & output) override; + void observeOutput(const std::shared_ptr& output) override; std::vector getResults() { return results_; } - const std::vector getFilteredROIs( - const std::string filter_conditions) const override; + const std::vector getFilteredROIs(const std::string filter_conditions) const override; private: std::shared_ptr valid_model_; std::vector results_; }; -} // namespace dynamic_vino_lib +} // namespace openvino_wrapper_lib -#endif // DYNAMIC_VINO_LIB__INFERENCES__EMOTIONS_DETECTION_HPP_ +#endif // OPENVINO_WRAPPER_LIB__INFERENCES__EMOTIONS_DETECTION_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_detection.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/face_detection.hpp similarity index 70% rename from dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_detection.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/face_detection.hpp index f43a9f2e..c0cec975 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_detection.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/face_detection.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,8 +16,8 @@ * @brief A header file with declaration for FaceDetection Class * @file face_detection.h */ -#ifndef DYNAMIC_VINO_LIB__INFERENCES__FACE_DETECTION_HPP_ -#define DYNAMIC_VINO_LIB__INFERENCES__FACE_DETECTION_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INFERENCES__FACE_DETECTION_HPP_ +#define OPENVINO_WRAPPER_LIB__INFERENCES__FACE_DETECTION_HPP_ #include #include @@ -28,14 +28,13 @@ #include #include -#include "dynamic_vino_lib/models/face_detection_model.hpp" -#include "dynamic_vino_lib/engines/engine.hpp" -#include "dynamic_vino_lib/inferences/object_detection.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/models/face_detection_model.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino_wrapper_lib/inferences/object_detection.hpp" #include "opencv2/opencv.hpp" // namespace -namespace dynamic_vino_lib +namespace openvino_wrapper_lib { /** * @class FaceDetectionResult @@ -44,7 +43,7 @@ namespace dynamic_vino_lib class FaceDetectionResult : public ObjectDetectionResult { public: - explicit FaceDetectionResult(const cv::Rect & location); + explicit FaceDetectionResult(const cv::Rect& location); }; /** @@ -56,5 +55,5 @@ class FaceDetection : public ObjectDetection public: explicit FaceDetection(bool, double); }; -} // namespace dynamic_vino_lib -#endif // DYNAMIC_VINO_LIB__INFERENCES__FACE_DETECTION_HPP_ +} // namespace openvino_wrapper_lib +#endif // OPENVINO_WRAPPER_LIB__INFERENCES__FACE_DETECTION_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_reidentification.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/face_reidentification.hpp similarity index 71% rename from dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_reidentification.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/face_reidentification.hpp index 3785b371..93aa98d3 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_reidentification.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/face_reidentification.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,20 +16,20 @@ * @brief A header file with declaration for FaceReidentification Class * @file face_reidentification.hpp */ -#ifndef DYNAMIC_VINO_LIB__INFERENCES__FACE_REIDENTIFICATION_HPP_ -#define DYNAMIC_VINO_LIB__INFERENCES__FACE_REIDENTIFICATION_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INFERENCES__FACE_REIDENTIFICATION_HPP_ +#define OPENVINO_WRAPPER_LIB__INFERENCES__FACE_REIDENTIFICATION_HPP_ #include #include #include #include -#include "dynamic_vino_lib/models/face_reidentification_model.hpp" -#include "dynamic_vino_lib/engines/engine.hpp" -#include "dynamic_vino_lib/inferences/base_inference.hpp" -#include "dynamic_vino_lib/inferences/base_reidentification.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/models/face_reidentification_model.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino_wrapper_lib/inferences/base_inference.hpp" +#include "openvino_wrapper_lib/inferences/base_reidentification.hpp" +#include "openvino/openvino.hpp" #include "opencv2/opencv.hpp" // namespace -namespace dynamic_vino_lib +namespace openvino_wrapper_lib { /** * @class FaceReidentificationResult @@ -39,8 +39,11 @@ class FaceReidentificationResult : public Result { public: friend class FaceReidentification; - explicit FaceReidentificationResult(const cv::Rect & location); - std::string getFaceID() const {return face_id_;} + explicit FaceReidentificationResult(const cv::Rect& location); + std::string getFaceID() const + { + return face_id_; + } private: std::string face_id_ = "No.#"; @@ -53,7 +56,7 @@ class FaceReidentificationResult : public Result class FaceReidentification : public BaseInference { public: - using Result = dynamic_vino_lib::FaceReidentificationResult; + using Result = openvino_wrapper_lib::FaceReidentificationResult; explicit FaceReidentification(double); ~FaceReidentification() override; /** @@ -68,7 +71,7 @@ class FaceReidentification : public BaseInference * to the frame generated by the input device. * @return Whether this operation is successful. */ - bool enqueue(const cv::Mat &, const cv::Rect &) override; + bool enqueue(const cv::Mat&, const cv::Rect&) override; /** * @brief Start inference for all buffered frames. * @return Whether this operation is successful. @@ -91,24 +94,23 @@ class FaceReidentification : public BaseInference * to the frame generated by the input device. * @param[in] idx The index of the result. */ - const dynamic_vino_lib::Result * getLocationResult(int idx) const override; + const openvino_wrapper_lib::Result* getLocationResult(int idx) const override; /** * @brief Show the observed reidentification result either through image window or ROS topic. */ - void observeOutput(const std::shared_ptr & output); + void observeOutput(const std::shared_ptr& output); /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. */ const std::string getName() const override; - const std::vector getFilteredROIs( - const std::string filter_conditions) const override; + const std::vector getFilteredROIs(const std::string filter_conditions) const override; private: std::shared_ptr valid_model_; std::vector results_; - std::shared_ptr face_tracker_; + std::shared_ptr face_tracker_; }; -} // namespace dynamic_vino_lib -#endif // DYNAMIC_VINO_LIB__INFERENCES__FACE_REIDENTIFICATION_HPP_ +} // namespace openvino_wrapper_lib +#endif // OPENVINO_WRAPPER_LIB__INFERENCES__FACE_REIDENTIFICATION_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/head_pose_detection.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/head_pose_detection.hpp similarity index 77% rename from dynamic_vino_lib/include/dynamic_vino_lib/inferences/head_pose_detection.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/head_pose_detection.hpp index 06990cc5..7dccf775 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/head_pose_detection.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/head_pose_detection.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,20 +16,20 @@ * @brief A header file with declaration for FaceDetection Class * @file head_pose_detection.hpp */ -#ifndef DYNAMIC_VINO_LIB__INFERENCES__HEAD_POSE_DETECTION_HPP_ -#define DYNAMIC_VINO_LIB__INFERENCES__HEAD_POSE_DETECTION_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INFERENCES__HEAD_POSE_DETECTION_HPP_ +#define OPENVINO_WRAPPER_LIB__INFERENCES__HEAD_POSE_DETECTION_HPP_ #include #include #include -#include "dynamic_vino_lib/engines/engine.hpp" -#include "dynamic_vino_lib/inferences/base_inference.hpp" -#include "dynamic_vino_lib/models/head_pose_detection_model.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino_wrapper_lib/inferences/base_inference.hpp" +#include "openvino_wrapper_lib/models/head_pose_detection_model.hpp" +#include "openvino/openvino.hpp" #include "opencv2/opencv.hpp" -namespace dynamic_vino_lib +namespace openvino_wrapper_lib { /** * @class HeadPoseResult @@ -39,7 +39,7 @@ class HeadPoseResult : public Result { public: friend class HeadPoseDetection; - explicit HeadPoseResult(const cv::Rect & location); + explicit HeadPoseResult(const cv::Rect& location); /** * @brief Get the yawl angle of the headpose. * @return The yawl value. @@ -78,7 +78,7 @@ class HeadPoseResult : public Result class HeadPoseDetection : public BaseInference { public: - using Result = dynamic_vino_lib::HeadPoseResult; + using Result = openvino_wrapper_lib::HeadPoseResult; HeadPoseDetection(); ~HeadPoseDetection() override; /** @@ -93,7 +93,7 @@ class HeadPoseDetection : public BaseInference * to the frame generated by the input device. * @return Whether this operation is successful. */ - bool enqueue(const cv::Mat & frame, const cv::Rect &) override; + bool enqueue(const cv::Mat& frame, const cv::Rect&) override; /** * @brief Start inference for all buffered frames. * @return Whether this operation is successful. @@ -116,7 +116,7 @@ class HeadPoseDetection : public BaseInference * to the frame generated by the input device. * @param[in] idx The index of the result. */ - const dynamic_vino_lib::Result * getLocationResult(int idx) const override; + const openvino_wrapper_lib::Result* getLocationResult(int idx) const override; /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. @@ -126,18 +126,17 @@ class HeadPoseDetection : public BaseInference * @brief Show the observed detection result either through image window or ROS topic. */ - void observeOutput(const std::shared_ptr & output) override; + void observeOutput(const std::shared_ptr& output) override; std::vector getResults() { return results_; } - const std::vector getFilteredROIs( - const std::string filter_conditions) const override; + const std::vector getFilteredROIs(const std::string filter_conditions) const override; private: std::shared_ptr valid_model_; std::vector results_; }; -} // namespace dynamic_vino_lib -#endif // DYNAMIC_VINO_LIB__INFERENCES__HEAD_POSE_DETECTION_HPP_ +} // namespace openvino_wrapper_lib +#endif // OPENVINO_WRAPPER_LIB__INFERENCES__HEAD_POSE_DETECTION_HPP_ diff --git a/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/inference_manager.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/inference_manager.hpp new file mode 100644 index 00000000..fddc197c --- /dev/null +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/inference_manager.hpp @@ -0,0 +1,102 @@ +// Copyright (c) 2018-2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a header file with declaration of Inference Manager class + * @file inference_manager.hpp + */ +#ifndef OPENVINO_WRAPPER_LIB__INFERENCES__INFERENCE_MANAGER_HPP_ +#define OPENVINO_WRAPPER_LIB__INFERENCES__INFERENCE_MANAGER_HPP_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "openvino_wrapper_lib/pipeline.hpp" + +/** + * @class InferenceManager + * @brief This class manages inference resources. + */ +class InferenceManager +{ +public: + /** + * @brief Get the singleton instance of InferenceManager class. + * The instance will be created when first call. + * @return The reference of InferenceManager instance. + */ + static InferenceManager& getInstance() + { + static InferenceManager manager_; + return manager_; + } + + std::shared_ptr createPipeline(const Params::ParamManager::PipelineRawData& params); + void removePipeline(const std::string& name); + InferenceManager& updatePipeline(const std::string& name, const Params::ParamManager::PipelineRawData& params); + + void runAll(); + void stopAll(); + void joinAll(); + + enum PipelineState + { + PipelineState_ThreadNotCreated, + PipelineState_ThreadStopped, + PipelineState_ThreadRunning, + PipelineState_Error + }; + struct PipelineData + { + Params::ParamManager::PipelineRawData params; + std::shared_ptr pipeline; + std::vector> spin_nodes; + std::shared_ptr thread; + PipelineState state; + }; + +private: + InferenceManager() + { + } + InferenceManager(InferenceManager const&); + void operator=(InferenceManager const&); + void threadPipeline(const char* name); + std::map> + parseInputDevice(const Params::ParamManager::PipelineRawData& params); + std::map> + parseOutput(const Params::ParamManager::PipelineRawData& params); + std::map> + parseInference(const Params::ParamManager::PipelineRawData& params); + std::shared_ptr + createFaceDetection(const Params::ParamManager::InferenceParams& infer); + std::shared_ptr + createAgeGenderRecognition(const Params::ParamManager::InferenceParams& infer); + std::shared_ptr + createEmotionRecognition(const Params::ParamManager::InferenceParams& infer); + std::shared_ptr + createHeadPoseEstimation(const Params::ParamManager::InferenceParams& infer); + std::shared_ptr + createObjectDetection(const Params::ParamManager::InferenceParams& infer); + + std::map pipelines_; +}; + +#endif // OPENVINO_WRAPPER_LIB__INFERENCES__INFERENCE_MANAGER_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/landmarks_detection.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/landmarks_detection.hpp similarity index 76% rename from dynamic_vino_lib/include/dynamic_vino_lib/inferences/landmarks_detection.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/landmarks_detection.hpp index e706dd8c..e2675fde 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/landmarks_detection.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/landmarks_detection.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,19 +16,18 @@ * @brief A header file with declaration for LandmarksDetection Class * @file landmarks_detection.hpp */ -#ifndef DYNAMIC_VINO_LIB__INFERENCES__LANDMARKS_DETECTION_HPP_ -#define DYNAMIC_VINO_LIB__INFERENCES__LANDMARKS_DETECTION_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INFERENCES__LANDMARKS_DETECTION_HPP_ +#define OPENVINO_WRAPPER_LIB__INFERENCES__LANDMARKS_DETECTION_HPP_ #include #include #include #include -#include "dynamic_vino_lib/models/landmarks_detection_model.hpp" -#include "dynamic_vino_lib/engines/engine.hpp" -#include "dynamic_vino_lib/inferences/base_inference.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/models/landmarks_detection_model.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino_wrapper_lib/inferences/base_inference.hpp" #include "opencv2/opencv.hpp" // namespace -namespace dynamic_vino_lib +namespace openvino_wrapper_lib { /** * @class LandmarksDetectionResult @@ -38,7 +37,7 @@ class LandmarksDetectionResult : public Result { public: friend class LandmarksDetection; - explicit LandmarksDetectionResult(const cv::Rect & location); + explicit LandmarksDetectionResult(const cv::Rect& location); std::vector getLandmarks() const { return landmark_points_; @@ -54,7 +53,7 @@ class LandmarksDetectionResult : public Result class LandmarksDetection : public BaseInference { public: - using Result = dynamic_vino_lib::LandmarksDetectionResult; + using Result = openvino_wrapper_lib::LandmarksDetectionResult; LandmarksDetection(); ~LandmarksDetection() override; /** @@ -69,7 +68,7 @@ class LandmarksDetection : public BaseInference * to the frame generated by the input device. * @return Whether this operation is successful. */ - bool enqueue(const cv::Mat &, const cv::Rect &) override; + bool enqueue(const cv::Mat&, const cv::Rect&) override; /** * @brief Start inference for all buffered frames. * @return Whether this operation is successful. @@ -92,23 +91,22 @@ class LandmarksDetection : public BaseInference * to the frame generated by the input device. * @param[in] idx The index of the result. */ - const dynamic_vino_lib::Result * getLocationResult(int idx) const override; + const openvino_wrapper_lib::Result* getLocationResult(int idx) const override; /** * @brief Show the observed detection result either through image window or ROS topic. */ - void observeOutput(const std::shared_ptr & output); + void observeOutput(const std::shared_ptr& output); /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. */ const std::string getName() const override; - const std::vector getFilteredROIs( - const std::string filter_conditions) const override; + const std::vector getFilteredROIs(const std::string filter_conditions) const override; private: std::shared_ptr valid_model_; std::vector results_; }; -} // namespace dynamic_vino_lib -#endif // DYNAMIC_VINO_LIB__INFERENCES__LANDMARKS_DETECTION_HPP_ +} // namespace openvino_wrapper_lib +#endif // OPENVINO_WRAPPER_LIB__INFERENCES__LANDMARKS_DETECTION_HPP_ diff --git a/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/license_plate_detection.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/license_plate_detection.hpp new file mode 100644 index 00000000..290fe345 --- /dev/null +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/license_plate_detection.hpp @@ -0,0 +1,186 @@ +// Copyright (c) 2018-2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief A header file with declaration for LicensePlateDetection Class + * @file license_plate_detection.hpp + */ +#ifndef OPENVINO_WRAPPER_LIB__INFERENCES__LICENSE_PLATE_DETECTION_HPP_ +#define OPENVINO_WRAPPER_LIB__INFERENCES__LICENSE_PLATE_DETECTION_HPP_ +#include +#include +#include +#include +#include "openvino_wrapper_lib/models/license_plate_detection_model.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino_wrapper_lib/inferences/base_inference.hpp" +#include "opencv2/opencv.hpp" +// namespace +namespace openvino_wrapper_lib +{ +/** + * @class LicensePlateDetectionResult + * @brief Class for storing and processing license plate detection result. + */ +class LicensePlateDetectionResult : public Result +{ +public: + friend class LicensePlateDetection; + explicit LicensePlateDetectionResult(const cv::Rect& location); + std::string getLicense() const + { + return license_; + } + +private: + std::string license_ = ""; +}; +/** + * @class LicensePlateDetection + * @brief Class to load license plate detection model and perform detection. + */ +class LicensePlateDetection : public BaseInference +{ +public: + using Result = openvino_wrapper_lib::LicensePlateDetectionResult; + LicensePlateDetection(); + ~LicensePlateDetection() override; + /** + * @brief Load the license plate detection model. + */ + void loadNetwork(std::shared_ptr); + /** + * @brief Enqueue a frame to this class. + * The frame will be buffered but not infered yet. + * @param[in] frame The frame to be enqueued. + * @param[in] input_frame_loc The location of the enqueued frame with respect + * to the frame generated by the input device. + * @return Whether this operation is successful. + */ + bool enqueue(const cv::Mat&, const cv::Rect&) override; + /** + * @brief Set the sequence input blob + */ + void fillSeqBlob(); + /** + * @brief Start inference for all buffered frames. + * @return Whether this operation is successful. + */ + bool submitRequest() override; + /** + * @brief This function will fetch the results of the previous inference and + * stores the results in a result buffer array. All buffered frames will be + * cleared. + * @return Whether the Inference object fetches a result this time + */ + bool fetchResults() override; + /** + * @brief Get the length of the buffer result array. + * @return The length of the buffer result array. + */ + int getResultsLength() const override; + /** + * @brief Get the location of result with respect + * to the frame generated by the input device. + * @param[in] idx The index of the result. + */ + const openvino_wrapper_lib::Result* getLocationResult(int idx) const override; + /** + * @brief Show the observed detection result either through image window + or ROS topic. + */ + void observeOutput(const std::shared_ptr& output); + /** + * @brief Get the name of the Inference instance. + * @return The name of the Inference instance. + */ + const std::string getName() const override; + const std::vector getFilteredROIs(const std::string filter_conditions) const override; + +private: + std::shared_ptr valid_model_; + std::vector results_; + const std::vector licenses_ = { "0", + "1", + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "A", + "B", + "C", + "D", + "E", + "F", + "G", + "H", + "I", + "J", + "K", + "L", + "M", + "N", + "O", + "P", + "Q", + "R", + "S", + "T", + "U", + "V", + "W", + "X", + "Y", + "Z" }; +}; +} // namespace openvino_wrapper_lib +#endif // OPENVINO_WRAPPER_LIB__INFERENCES__LICENSE_PLATE_DETECTION_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/object_detection.hpp similarity index 73% rename from dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/object_detection.hpp index ac9304c0..51aaa248 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/object_detection.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,8 +16,8 @@ * @brief A header file with declaration for ObjectDetection Class * @file object_detection.hpp */ -#ifndef DYNAMIC_VINO_LIB__INFERENCES__OBJECT_DETECTION_HPP_ -#define DYNAMIC_VINO_LIB__INFERENCES__OBJECT_DETECTION_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INFERENCES__OBJECT_DETECTION_HPP_ +#define OPENVINO_WRAPPER_LIB__INFERENCES__OBJECT_DETECTION_HPP_ #include #include #include @@ -27,14 +27,13 @@ #include #include #include -#include "dynamic_vino_lib/models/base_model.hpp" -#include "dynamic_vino_lib/engines/engine.hpp" -#include "dynamic_vino_lib/inferences/base_inference.hpp" -#include "dynamic_vino_lib/inferences/base_filter.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/models/base_model.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino_wrapper_lib/inferences/base_inference.hpp" +#include "openvino_wrapper_lib/inferences/base_filter.hpp" #include "opencv2/opencv.hpp" // namespace -namespace dynamic_vino_lib +namespace openvino_wrapper_lib { /** * @class ObjectDetectionResult @@ -44,13 +43,13 @@ class ObjectDetectionResult : public Result { public: friend class ObjectDetection; - explicit ObjectDetectionResult(const cv::Rect & location); + explicit ObjectDetectionResult(const cv::Rect& location); std::string getLabel() const { return label_; } - void setLabel(const std::string & label) + void setLabel(const std::string& label) { label_ = label; } @@ -63,12 +62,12 @@ class ObjectDetectionResult : public Result return confidence_; } - void setConfidence(const float & con) + void setConfidence(const float& con) { confidence_ = con; } - bool operator<(const ObjectDetectionResult & s2) const + bool operator<(const ObjectDetectionResult& s2) const { return this->confidence_ > s2.confidence_; } @@ -85,7 +84,7 @@ class ObjectDetectionResult : public Result class ObjectDetectionResultFilter : public BaseFilter { public: - using Result = dynamic_vino_lib::ObjectDetectionResult; + using Result = openvino_wrapper_lib::ObjectDetectionResult; ObjectDetectionResultFilter(); @@ -97,7 +96,7 @@ class ObjectDetectionResultFilter : public BaseFilter * @brief Set the object detection results into filter. * @param[in] The object detection results. */ - void acceptResults(const std::vector & results); + void acceptResults(const std::vector& results); /** * @brief Get the filtered results' ROIs. * @return The filtered ROIs. @@ -110,27 +109,22 @@ class ObjectDetectionResultFilter : public BaseFilter * @param[in] Result to be decided, filter operator, target label value. * @return True if valid, false if not. */ - static bool isValidLabel( - const Result & result, - const std::string & op, const std::string & target); + static bool isValidLabel(const Result& result, const std::string& op, const std::string& target); /** * @brief Decide whether a result is valid for confidence filter condition. * @param[in] Result to be decided, filter operator, target confidence value. * @return True if valid, false if not. */ - static bool isValidConfidence( - const Result & result, - const std::string & op, const std::string & target); + static bool isValidConfidence(const Result& result, const std::string& op, const std::string& target); /** * @brief Decide whether a result is valid. * @param[in] Result to be decided. * @return True if valid, false if not. */ - bool isValidResult(const Result & result); + bool isValidResult(const Result& result); - std::map key_to_function_; + std::map key_to_function_; std::vector results_; }; @@ -141,8 +135,8 @@ class ObjectDetectionResultFilter : public BaseFilter class ObjectDetection : public BaseInference { public: - using Result = dynamic_vino_lib::ObjectDetectionResult; - using Filter = dynamic_vino_lib::ObjectDetectionResultFilter; + using Result = openvino_wrapper_lib::ObjectDetectionResult; + using Filter = openvino_wrapper_lib::ObjectDetectionResultFilter; explicit ObjectDetection(bool, double); ~ObjectDetection() override; /** @@ -157,7 +151,7 @@ class ObjectDetection : public BaseInference * to the frame generated by the input device. * @return Whether this operation is successful. */ - bool enqueue(const cv::Mat &, const cv::Rect &) override; + bool enqueue(const cv::Mat&, const cv::Rect&) override; /** * @brief This function will fetch the results of the previous inference and @@ -176,25 +170,24 @@ class ObjectDetection : public BaseInference * to the frame generated by the input device. * @param[in] idx The index of the result. */ - const Result * getLocationResult(int idx) const override; + const Result* getLocationResult(int idx) const override; /** * @brief Show the observed detection result either through image window or ROS topic. */ - void observeOutput(const std::shared_ptr & output); + void observeOutput(const std::shared_ptr& output); /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. */ const std::string getName() const override; - const std::vector getFilteredROIs( - const std::string filter_conditions) const override; + const std::vector getFilteredROIs(const std::string filter_conditions) const override; /** * @brief Calculate the IoU ratio for the given rectangles. * @return IoU Ratio of the given rectangles. */ - static double calcIoU(const cv::Rect & box_1, const cv::Rect & box_2); + static double calcIoU(const cv::Rect& box_1, const cv::Rect& box_2); private: std::shared_ptr valid_model_; @@ -207,5 +200,5 @@ class ObjectDetection : public BaseInference double show_output_thresh_ = 0; bool enable_roi_constraint_ = false; }; -} // namespace dynamic_vino_lib -#endif // DYNAMIC_VINO_LIB__INFERENCES__OBJECT_DETECTION_HPP_ +} // namespace openvino_wrapper_lib +#endif // OPENVINO_WRAPPER_LIB__INFERENCES__OBJECT_DETECTION_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_segmentation.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/object_segmentation.hpp similarity index 67% rename from dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_segmentation.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/object_segmentation.hpp index 68f90a1c..1a84d2cf 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_segmentation.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/object_segmentation.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,8 +16,8 @@ * @brief A header file with declaration for ObjectSegmentation Class * @file object_detection.hpp */ -#ifndef DYNAMIC_VINO_LIB__INFERENCES__OBJECT_SEGMENTATION_HPP_ -#define DYNAMIC_VINO_LIB__INFERENCES__OBJECT_SEGMENTATION_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INFERENCES__OBJECT_SEGMENTATION_HPP_ +#define OPENVINO_WRAPPER_LIB__INFERENCES__OBJECT_SEGMENTATION_HPP_ #include #include #include @@ -25,13 +25,13 @@ #include #include #include -#include "dynamic_vino_lib/models/object_segmentation_model.hpp" -#include "dynamic_vino_lib/engines/engine.hpp" -#include "dynamic_vino_lib/inferences/base_inference.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/models/object_segmentation_model.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino_wrapper_lib/inferences/base_inference.hpp" +#include "openvino/openvino.hpp" #include "opencv2/opencv.hpp" // namespace -namespace dynamic_vino_lib +namespace openvino_wrapper_lib { /** * @class ObjectSegmentationResult @@ -41,7 +41,7 @@ class ObjectSegmentationResult : public Result { public: friend class ObjectSegmentation; - explicit ObjectSegmentationResult(const cv::Rect & location); + explicit ObjectSegmentationResult(const cv::Rect& location); std::string getLabel() const { return label_; @@ -71,7 +71,7 @@ class ObjectSegmentationResult : public Result class ObjectSegmentation : public BaseInference { public: - using Result = dynamic_vino_lib::ObjectSegmentationResult; + using Result = openvino_wrapper_lib::ObjectSegmentationResult; explicit ObjectSegmentation(double); ~ObjectSegmentation() override; /** @@ -86,10 +86,10 @@ class ObjectSegmentation : public BaseInference * to the frame generated by the input device. * @return Whether this operation is successful. */ - bool enqueue(const cv::Mat &, const cv::Rect &) override; + bool enqueue(const cv::Mat&, const cv::Rect&) override; - //Deprecated!! - bool enqueue_for_one_input(const cv::Mat &, const cv::Rect &); + // Deprecated!! + bool enqueue_for_one_input(const cv::Mat&, const cv::Rect&); /** * @brief Start inference for all buffered frames. @@ -113,19 +113,18 @@ class ObjectSegmentation : public BaseInference * to the frame generated by the input device. * @param[in] idx The index of the result. */ - const dynamic_vino_lib::Result * getLocationResult(int idx) const override; + const openvino_wrapper_lib::Result* getLocationResult(int idx) const override; /** * @brief Show the observed detection result either through image window or ROS topic. */ - void observeOutput(const std::shared_ptr & output); + void observeOutput(const std::shared_ptr& output); /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. */ const std::string getName() const override; - const std::vector getFilteredROIs( - const std::string filter_conditions) const override; + const std::vector getFilteredROIs(const std::string filter_conditions) const override; private: std::shared_ptr valid_model_; @@ -134,13 +133,12 @@ class ObjectSegmentation : public BaseInference int height_ = 0; double show_output_thresh_ = 0; - std::vector colors_ = { - {128, 64, 128}, {232, 35, 244}, {70, 70, 70}, {156, 102, 102}, {153, 153, 190}, - {153, 153, 153}, {30, 170, 250}, {0, 220, 220}, {35, 142, 107}, {152, 251, 152}, - {180, 130, 70}, {60, 20, 220}, {0, 0, 255}, {142, 0, 0}, {70, 0, 0}, - {100, 60, 0}, {90, 0, 0}, {230, 0, 0}, {32, 11, 119}, {0, 74, 111}, - {81, 0, 81} - }; + std::vector colors_ = { { 128, 64, 128 }, { 232, 35, 244 }, { 70, 70, 70 }, { 156, 102, 102 }, + { 153, 153, 190 }, { 153, 153, 153 }, { 30, 170, 250 }, { 0, 220, 220 }, + { 35, 142, 107 }, { 152, 251, 152 }, { 180, 130, 70 }, { 60, 20, 220 }, + { 0, 0, 255 }, { 142, 0, 0 }, { 70, 0, 0 }, { 100, 60, 0 }, + { 90, 0, 0 }, { 230, 0, 0 }, { 32, 11, 119 }, { 0, 74, 111 }, + { 81, 0, 81 } }; }; -} // namespace dynamic_vino_lib -#endif // DYNAMIC_VINO_LIB__INFERENCES__OBJECT_SEGMENTATION_HPP_ +} // namespace openvino_wrapper_lib +#endif // OPENVINO_WRAPPER_LIB__INFERENCES__OBJECT_SEGMENTATION_HPP_ diff --git a/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/object_segmentation_instance.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/object_segmentation_instance.hpp new file mode 100644 index 00000000..42f070c1 --- /dev/null +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/object_segmentation_instance.hpp @@ -0,0 +1,149 @@ +// Copyright (c) 2023 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef OPENVINO_WRAPPER_LIB__INFERENCES__OBJECT_SEGMENTATION_INSTANCE_HPP_ +#define OPENVINO_WRAPPER_LIB__INFERENCES__OBJECT_SEGMENTATION_INSTANCE_HPP_ +#include +#include +#include +#include +#include +#include +#include +#include "openvino_wrapper_lib/models/object_segmentation_instance_model.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino_wrapper_lib/inferences/base_inference.hpp" +#include "openvino/openvino.hpp" +#include "opencv2/opencv.hpp" +// namespace +namespace openvino_wrapper_lib +{ +/** + * @class ObjectSegmentationInstanceResult + * @brief Class for storing and processing object segmentation result. + */ +class ObjectSegmentationInstanceResult : public Result +{ +public: + friend class ObjectSegmentationInstance; + explicit ObjectSegmentationInstanceResult(const cv::Rect& location); + inline std::string getLabel() const + { + return label_; + } + inline void setLabel(const std::string& label) + { + label_ = label; + } + /** + * @brief Get the confidence that the detected area is a face. + * @return The confidence value. + */ + inline float getConfidence() const + { + return confidence_; + } + inline void setConfidence(float conf) + { + confidence_ = conf; + } + inline cv::Mat getMask() const + { + return mask_; + } + inline void setMask(const cv::Mat& mask) + { + mask_ = mask; + } + +private: + std::string label_ = ""; + float confidence_ = -1; + cv::Mat mask_; +}; +/** + * @class ObjectSegmentation + * @brief Class to load object segmentation model and perform object segmentation. + */ +class ObjectSegmentationInstance : public BaseInference +{ +public: + using Result = openvino_wrapper_lib::ObjectSegmentationInstanceResult; + explicit ObjectSegmentationInstance(double); + ~ObjectSegmentationInstance() override; + /** + * @brief Load the object segmentation model. + */ + void loadNetwork(std::shared_ptr); + /** + * @brief Enqueue a frame to this class. + * The frame will be buffered but not infered yet. + * @param[in] frame The frame to be enqueued. + * @param[in] input_frame_loc The location of the enqueued frame with respect + * to the frame generated by the input device. + * @return Whether this operation is successful. + */ + bool enqueue(const cv::Mat&, const cv::Rect&) override; + + /** + * @brief Start inference for all buffered frames. + * @return Whether this operation is successful. + */ + bool submitRequest() override; + /** + * @brief This function will fetch the results of the previous inference and + * stores the results in a result buffer array. All buffered frames will be + * cleared. + * @return Whether the Inference object fetches a result this time + */ + bool fetchResults() override; + /** + * @brief Get the length of the buffer result array. + * @return The length of the buffer result array. + */ + int getResultsLength() const override; + /** + * @brief Get the location of result with respect + * to the frame generated by the input device. + * @param[in] idx The index of the result. + */ + const openvino_wrapper_lib::Result* getLocationResult(int idx) const override; + /** + * @brief Show the observed detection result either through image window + or ROS topic. + */ + void observeOutput(const std::shared_ptr& output); + /** + * @brief Get the name of the Inference instance. + * @return The name of the Inference instance. + */ + const std::string getName() const override; + const std::vector getFilteredROIs(const std::string filter_conditions) const override; + +private: + std::shared_ptr valid_model_; + std::vector results_; + int width_ = 0; + int height_ = 0; + double show_output_thresh_ = 0; + + std::vector colors_ = { { 128, 64, 128 }, { 232, 35, 244 }, { 70, 70, 70 }, { 156, 102, 102 }, + { 153, 153, 190 }, { 153, 153, 153 }, { 30, 170, 250 }, { 0, 220, 220 }, + { 35, 142, 107 }, { 152, 251, 152 }, { 180, 130, 70 }, { 60, 20, 220 }, + { 0, 0, 255 }, { 142, 0, 0 }, { 70, 0, 0 }, { 100, 60, 0 }, + { 90, 0, 0 }, { 230, 0, 0 }, { 32, 11, 119 }, { 0, 74, 111 }, + { 81, 0, 81 } }; +}; +} // namespace openvino_wrapper_lib +#endif // OPENVINO_WRAPPER_LIB__INFERENCES__OBJECT_SEGMENTATION_HPP_ diff --git a/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/object_segmentation_maskrcnn.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/object_segmentation_maskrcnn.hpp new file mode 100644 index 00000000..8346bd6f --- /dev/null +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/object_segmentation_maskrcnn.hpp @@ -0,0 +1,144 @@ +// Copyright (c) 2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief A header file with declaration for ObjectSegmentation Class + * @file object_detection.hpp + */ +#ifndef OPENVINO_WRAPPER_LIB__INFERENCES__OBJECT_SEGMENTATION_MASKRCNN_HPP_ +#define OPENVINO_WRAPPER_LIB__INFERENCES__OBJECT_SEGMENTATION_MASKRCNN_HPP_ +#include +#include +#include +#include +#include +#include +#include +#include "openvino_wrapper_lib/models/object_segmentation_maskrcnn_model.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino_wrapper_lib/inferences/base_inference.hpp" +#include "openvino/openvino.hpp" +#include "opencv2/opencv.hpp" +// namespace +namespace openvino_wrapper_lib +{ +/** + * @class ObjectSegmentationMaskrcnnResult + * @brief Class for storing and processing object segmentation result. + */ +class ObjectSegmentationMaskrcnnResult : public Result +{ +public: + friend class ObjectSegmentationMaskrcnn; + explicit ObjectSegmentationMaskrcnnResult(const cv::Rect& location); + std::string getLabel() const + { + return label_; + } + /** + * @brief Get the confidence that the detected area is a face. + * @return The confidence value. + */ + float getConfidence() const + { + return confidence_; + } + cv::Mat getMask() const + { + return mask_; + } + +private: + std::string label_ = ""; + float confidence_ = -1; + cv::Mat mask_; +}; +/** + * @class ObjectSegmentation + * @brief Class to load object segmentation model and perform object segmentation. + */ +class ObjectSegmentationMaskrcnn : public BaseInference +{ +public: + using Result = openvino_wrapper_lib::ObjectSegmentationMaskrcnnResult; + explicit ObjectSegmentationMaskrcnn(double); + ~ObjectSegmentationMaskrcnn() override; + /** + * @brief Load the object segmentation model. + */ + void loadNetwork(std::shared_ptr); + /** + * @brief Enqueue a frame to this class. + * The frame will be buffered but not infered yet. + * @param[in] frame The frame to be enqueued. + * @param[in] input_frame_loc The location of the enqueued frame with respect + * to the frame generated by the input device. + * @return Whether this operation is successful. + */ + bool enqueue(const cv::Mat&, const cv::Rect&) override; + + // Deprecated!! + bool enqueue_for_one_input(const cv::Mat&, const cv::Rect&); + + /** + * @brief Start inference for all buffered frames. + * @return Whether this operation is successful. + */ + bool submitRequest() override; + /** + * @brief This function will fetch the results of the previous inference and + * stores the results in a result buffer array. All buffered frames will be + * cleared. + * @return Whether the Inference object fetches a result this time + */ + bool fetchResults() override; + /** + * @brief Get the length of the buffer result array. + * @return The length of the buffer result array. + */ + int getResultsLength() const override; + /** + * @brief Get the location of result with respect + * to the frame generated by the input device. + * @param[in] idx The index of the result. + */ + const openvino_wrapper_lib::Result* getLocationResult(int idx) const override; + /** + * @brief Show the observed detection result either through image window + or ROS topic. + */ + void observeOutput(const std::shared_ptr& output); + /** + * @brief Get the name of the Inference instance. + * @return The name of the Inference instance. + */ + const std::string getName() const override; + const std::vector getFilteredROIs(const std::string filter_conditions) const override; + +private: + std::shared_ptr valid_model_; + std::vector results_; + int width_ = 0; + int height_ = 0; + double show_output_thresh_ = 0; + + std::vector colors_ = { { 128, 64, 128 }, { 232, 35, 244 }, { 70, 70, 70 }, { 156, 102, 102 }, + { 153, 153, 190 }, { 153, 153, 153 }, { 30, 170, 250 }, { 0, 220, 220 }, + { 35, 142, 107 }, { 152, 251, 152 }, { 180, 130, 70 }, { 60, 20, 220 }, + { 0, 0, 255 }, { 142, 0, 0 }, { 70, 0, 0 }, { 100, 60, 0 }, + { 90, 0, 0 }, { 230, 0, 0 }, { 32, 11, 119 }, { 0, 74, 111 }, + { 81, 0, 81 } }; +}; +} // namespace openvino_wrapper_lib +#endif // OPENVINO_WRAPPER_LIB__INFERENCES__OBJECT_SEGMENTATION_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_attribs_detection.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/person_attribs_detection.hpp similarity index 72% rename from dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_attribs_detection.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/person_attribs_detection.hpp index fbea1e8c..0d1ce1ce 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_attribs_detection.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/person_attribs_detection.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,19 +16,19 @@ * @brief A header file with declaration for PersonAttribsDetection Class * @file person_attribs_detection.hpp */ -#ifndef DYNAMIC_VINO_LIB__INFERENCES__PERSON_ATTRIBS_DETECTION_HPP_ -#define DYNAMIC_VINO_LIB__INFERENCES__PERSON_ATTRIBS_DETECTION_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INFERENCES__PERSON_ATTRIBS_DETECTION_HPP_ +#define OPENVINO_WRAPPER_LIB__INFERENCES__PERSON_ATTRIBS_DETECTION_HPP_ #include #include #include #include -#include "dynamic_vino_lib/models/person_attribs_detection_model.hpp" -#include "dynamic_vino_lib/engines/engine.hpp" -#include "dynamic_vino_lib/inferences/base_inference.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/models/person_attribs_detection_model.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino_wrapper_lib/inferences/base_inference.hpp" +#include "openvino/openvino.hpp" #include "opencv2/opencv.hpp" // namespace -namespace dynamic_vino_lib +namespace openvino_wrapper_lib { /** * @class PersonAttribsDetectionResult @@ -38,7 +38,7 @@ class PersonAttribsDetectionResult : public Result { public: friend class PersonAttribsDetection; - explicit PersonAttribsDetectionResult(const cv::Rect & location); + explicit PersonAttribsDetectionResult(const cv::Rect& location); std::string getAttributes() const { @@ -71,7 +71,7 @@ class PersonAttribsDetectionResult : public Result class PersonAttribsDetection : public BaseInference { public: - using Result = dynamic_vino_lib::PersonAttribsDetectionResult; + using Result = openvino_wrapper_lib::PersonAttribsDetectionResult; explicit PersonAttribsDetection(double); ~PersonAttribsDetection() override; /** @@ -86,7 +86,7 @@ class PersonAttribsDetection : public BaseInference * to the frame generated by the input device. * @return Whether this operation is successful. */ - bool enqueue(const cv::Mat &, const cv::Rect &) override; + bool enqueue(const cv::Mat&, const cv::Rect&) override; /** * @brief Start inference for all buffered frames. * @return Whether this operation is successful. @@ -109,33 +109,26 @@ class PersonAttribsDetection : public BaseInference * to the frame generated by the input device. * @param[in] idx The index of the result. */ - const dynamic_vino_lib::Result * getLocationResult(int idx) const override; + const openvino_wrapper_lib::Result* getLocationResult(int idx) const override; /** * @brief Show the observed detection result either through image window or ROS topic. */ - void observeOutput(const std::shared_ptr & output); + void observeOutput(const std::shared_ptr& output); /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. */ const std::string getName() const override; - const std::vector getFilteredROIs( - const std::string filter_conditions) const override; + const std::vector getFilteredROIs(const std::string filter_conditions) const override; private: std::shared_ptr valid_model_; std::vector results_; double attribs_confidence_; - const std::vector net_attributes_ = { - "is male", - "has_bag", - "has_backpack" , - "has hat", - "has longsleeves", - "has longpants", - "has longhair", - "has coat_jacket"}; + const std::vector net_attributes_ = { "is male", "has_bag", "has_backpack", + "has hat", "has longsleeves", "has longpants", + "has longhair", "has coat_jacket" }; }; -} // namespace dynamic_vino_lib -#endif // DYNAMIC_VINO_LIB__INFERENCES__PERSON_ATTRIBS_DETECTION_HPP_ +} // namespace openvino_wrapper_lib +#endif // OPENVINO_WRAPPER_LIB__INFERENCES__PERSON_ATTRIBS_DETECTION_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_reidentification.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/person_reidentification.hpp similarity index 71% rename from dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_reidentification.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/person_reidentification.hpp index 2d47dc3e..96688fae 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_reidentification.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/person_reidentification.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,20 +16,19 @@ * @brief A header file with declaration for PersonReidentification Class * @file person_reidentification.hpp */ -#ifndef DYNAMIC_VINO_LIB__INFERENCES__PERSON_REIDENTIFICATION_HPP_ -#define DYNAMIC_VINO_LIB__INFERENCES__PERSON_REIDENTIFICATION_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INFERENCES__PERSON_REIDENTIFICATION_HPP_ +#define OPENVINO_WRAPPER_LIB__INFERENCES__PERSON_REIDENTIFICATION_HPP_ #include #include #include #include -#include "dynamic_vino_lib/models/person_reidentification_model.hpp" -#include "dynamic_vino_lib/engines/engine.hpp" -#include "dynamic_vino_lib/inferences/base_inference.hpp" -#include "dynamic_vino_lib/inferences/base_reidentification.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/models/person_reidentification_model.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino_wrapper_lib/inferences/base_inference.hpp" +#include "openvino_wrapper_lib/inferences/base_reidentification.hpp" #include "opencv2/opencv.hpp" // namespace -namespace dynamic_vino_lib +namespace openvino_wrapper_lib { /** * @class PersonReidentificationResult @@ -39,8 +38,11 @@ class PersonReidentificationResult : public Result { public: friend class PersonReidentification; - explicit PersonReidentificationResult(const cv::Rect & location); - std::string getPersonID() const {return person_id_;} + explicit PersonReidentificationResult(const cv::Rect& location); + std::string getPersonID() const + { + return person_id_; + } private: std::string person_id_ = "No.#"; @@ -52,7 +54,7 @@ class PersonReidentificationResult : public Result class PersonReidentification : public BaseInference { public: - using Result = dynamic_vino_lib::PersonReidentificationResult; + using Result = openvino_wrapper_lib::PersonReidentificationResult; explicit PersonReidentification(double); ~PersonReidentification() override; /** @@ -67,7 +69,7 @@ class PersonReidentification : public BaseInference * to the frame generated by the input device. * @return Whether this operation is successful. */ - bool enqueue(const cv::Mat &, const cv::Rect &) override; + bool enqueue(const cv::Mat&, const cv::Rect&) override; /** * @brief Start inference for all buffered frames. * @return Whether this operation is successful. @@ -90,24 +92,23 @@ class PersonReidentification : public BaseInference * to the frame generated by the input device. * @param[in] idx The index of the result. */ - const dynamic_vino_lib::Result * getLocationResult(int idx) const override; + const openvino_wrapper_lib::Result* getLocationResult(int idx) const override; /** * @brief Show the observed detection result either through image window or ROS topic. */ - void observeOutput(const std::shared_ptr & output); + void observeOutput(const std::shared_ptr& output); /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. */ const std::string getName() const override; - const std::vector getFilteredROIs( - const std::string filter_conditions) const override; + const std::vector getFilteredROIs(const std::string filter_conditions) const override; private: std::shared_ptr valid_model_; std::vector results_; - std::shared_ptr person_tracker_; + std::shared_ptr person_tracker_; }; -} // namespace dynamic_vino_lib -#endif // DYNAMIC_VINO_LIB__INFERENCES__PERSON_REIDENTIFICATION_HPP_ +} // namespace openvino_wrapper_lib +#endif // OPENVINO_WRAPPER_LIB__INFERENCES__PERSON_REIDENTIFICATION_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/vehicle_attribs_detection.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/vehicle_attribs_detection.hpp similarity index 72% rename from dynamic_vino_lib/include/dynamic_vino_lib/inferences/vehicle_attribs_detection.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/vehicle_attribs_detection.hpp index 03ff1427..95cbcadb 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/vehicle_attribs_detection.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/vehicle_attribs_detection.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,19 +16,18 @@ * @brief A header file with declaration for VehicleAttribsDetection Class * @file vehicle_attribs_detection.hpp */ -#ifndef DYNAMIC_VINO_LIB__INFERENCES__VEHICLE_ATTRIBS_DETECTION_HPP_ -#define DYNAMIC_VINO_LIB__INFERENCES__VEHICLE_ATTRIBS_DETECTION_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INFERENCES__VEHICLE_ATTRIBS_DETECTION_HPP_ +#define OPENVINO_WRAPPER_LIB__INFERENCES__VEHICLE_ATTRIBS_DETECTION_HPP_ #include #include #include #include -#include "dynamic_vino_lib/models/vehicle_attribs_detection_model.hpp" -#include "dynamic_vino_lib/engines/engine.hpp" -#include "dynamic_vino_lib/inferences/base_inference.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/models/vehicle_attribs_detection_model.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino_wrapper_lib/inferences/base_inference.hpp" #include "opencv2/opencv.hpp" // namespace -namespace dynamic_vino_lib +namespace openvino_wrapper_lib { /** * @class VehicleAttribsDetectionResult @@ -38,7 +37,7 @@ class VehicleAttribsDetectionResult : public Result { public: friend class VehicleAttribsDetection; - explicit VehicleAttribsDetectionResult(const cv::Rect & location); + explicit VehicleAttribsDetectionResult(const cv::Rect& location); std::string getColor() const { return color_; @@ -59,7 +58,7 @@ class VehicleAttribsDetectionResult : public Result class VehicleAttribsDetection : public BaseInference { public: - using Result = dynamic_vino_lib::VehicleAttribsDetectionResult; + using Result = openvino_wrapper_lib::VehicleAttribsDetectionResult; VehicleAttribsDetection(); ~VehicleAttribsDetection() override; /** @@ -74,7 +73,7 @@ class VehicleAttribsDetection : public BaseInference * to the frame generated by the input device. * @return Whether this operation is successful. */ - bool enqueue(const cv::Mat &, const cv::Rect &) override; + bool enqueue(const cv::Mat&, const cv::Rect&) override; /** * @brief Start inference for all buffered frames. * @return Whether this operation is successful. @@ -97,27 +96,24 @@ class VehicleAttribsDetection : public BaseInference * to the frame generated by the input device. * @param[in] idx The index of the result. */ - const dynamic_vino_lib::Result * getLocationResult(int idx) const override; + const openvino_wrapper_lib::Result* getLocationResult(int idx) const override; /** * @brief Show the observed detection result either through image window or ROS topic. */ - void observeOutput(const std::shared_ptr & output); + void observeOutput(const std::shared_ptr& output); /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. */ const std::string getName() const override; - const std::vector getFilteredROIs( - const std::string filter_conditions) const override; + const std::vector getFilteredROIs(const std::string filter_conditions) const override; private: std::shared_ptr valid_model_; std::vector results_; - const std::vector types_ = { - "car", "van", "truck", "bus"}; - const std::vector colors_ = { - "white", "gray", "yellow", "red", "green", "blue", "black"}; + const std::vector types_ = { "car", "van", "truck", "bus" }; + const std::vector colors_ = { "white", "gray", "yellow", "red", "green", "blue", "black" }; }; -} // namespace dynamic_vino_lib -#endif // DYNAMIC_VINO_LIB__INFERENCES__VEHICLE_ATTRIBS_DETECTION_HPP_ +} // namespace openvino_wrapper_lib +#endif // OPENVINO_WRAPPER_LIB__INFERENCES__VEHICLE_ATTRIBS_DETECTION_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/base_input.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/base_input.hpp similarity index 85% rename from dynamic_vino_lib/include/dynamic_vino_lib/inputs/base_input.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/base_input.hpp index 695e7200..2420a096 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/base_input.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/base_input.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,8 +16,8 @@ * @brief A header file with declaration for BaseInput Class * @file base_input.h */ -#ifndef DYNAMIC_VINO_LIB__INPUTS__BASE_INPUT_HPP_ -#define DYNAMIC_VINO_LIB__INPUTS__BASE_INPUT_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INPUTS__BASE_INPUT_HPP_ +#define OPENVINO_WRAPPER_LIB__INPUTS__BASE_INPUT_HPP_ #include #include @@ -25,7 +25,7 @@ #include #include #include -#include "dynamic_vino_lib/inputs/ros2_handler.hpp" +#include "openvino_wrapper_lib/inputs/ros2_handler.hpp" /** * @class BaseInputDevice @@ -59,12 +59,14 @@ class BaseInputDevice : public Ros2Handler * @brief Read next frame, and give the value to argument frame. * @return Whether the next frame is successfully read. */ - virtual bool read(cv::Mat * frame) = 0; - virtual bool readService(cv::Mat * frame, std::string config_path) + virtual bool read(cv::Mat* frame) = 0; + virtual bool readService(cv::Mat* frame, std::string config_path) { return true; } - virtual void config(const Config &) {} + virtual void config(const Config&) + { + } virtual ~BaseInputDevice() = default; /** * @brief Get the width of the frame read from input device. @@ -116,9 +118,9 @@ class BaseInputDevice : public Ros2Handler } private: - size_t width_ = 0; // 0 means using the original size - size_t height_ = 0; // 0 means using the original size + size_t width_ = 0; // 0 means using the original size + size_t height_ = 0; // 0 means using the original size bool is_init_ = false; }; } // namespace Input -#endif // DYNAMIC_VINO_LIB__INPUTS__BASE_INPUT_HPP_ +#endif // OPENVINO_WRAPPER_LIB__INPUTS__BASE_INPUT_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_input.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/image_input.hpp similarity index 79% rename from dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_input.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/image_input.hpp index 08874c49..c7591d23 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_input.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/image_input.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,12 +16,12 @@ * @brief A header file with declaration for Image class * @file file_input.h */ -#ifndef DYNAMIC_VINO_LIB__INPUTS__IMAGE_INPUT_HPP_ -#define DYNAMIC_VINO_LIB__INPUTS__IMAGE_INPUT_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INPUTS__IMAGE_INPUT_HPP_ +#define OPENVINO_WRAPPER_LIB__INPUTS__IMAGE_INPUT_HPP_ #include #include -#include "dynamic_vino_lib/inputs/base_input.hpp" +#include "openvino_wrapper_lib/inputs/base_input.hpp" namespace Input { @@ -32,7 +32,7 @@ namespace Input class Image : public BaseInputDevice { public: - explicit Image(const std::string &); + explicit Image(const std::string&); /** * @brief Read an image file from the file path. * @param[in] An image file path. @@ -52,9 +52,9 @@ class Image : public BaseInputDevice * @brief Read next frame, and give the value to argument frame. * @return Whether the next frame is successfully read. */ - bool read(cv::Mat * frame) override; + bool read(cv::Mat* frame) override; - void config(const Config &) override; + void config(const Config&) override; private: cv::Mat image_; @@ -62,4 +62,4 @@ class Image : public BaseInputDevice }; } // namespace Input -#endif // DYNAMIC_VINO_LIB__INPUTS__IMAGE_INPUT_HPP_ +#endif // OPENVINO_WRAPPER_LIB__INPUTS__IMAGE_INPUT_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_topic.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/image_topic.hpp similarity index 78% rename from dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_topic.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/image_topic.hpp index 196a934b..96df03ca 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_topic.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/image_topic.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,15 +17,15 @@ * @file image_topic.h */ -#ifndef DYNAMIC_VINO_LIB__INPUTS__IMAGE_TOPIC_HPP_ -#define DYNAMIC_VINO_LIB__INPUTS__IMAGE_TOPIC_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INPUTS__IMAGE_TOPIC_HPP_ +#define OPENVINO_WRAPPER_LIB__INPUTS__IMAGE_TOPIC_HPP_ #include #include #include #include -#include "dynamic_vino_lib/utils/mutex_counter.hpp" -#include "dynamic_vino_lib/inputs/base_input.hpp" +#include "openvino_wrapper_lib/utils/mutex_counter.hpp" +#include "openvino_wrapper_lib/inputs/base_input.hpp" namespace Input { @@ -39,7 +39,7 @@ class ImageTopic : public BaseInputDevice ImageTopic(rclcpp::Node::SharedPtr node = nullptr); bool initialize() override; bool initialize(size_t width, size_t height) override; - bool read(cv::Mat * frame) override; + bool read(cv::Mat* frame) override; private: rclcpp::Subscription::SharedPtr sub_; @@ -51,4 +51,4 @@ class ImageTopic : public BaseInputDevice }; } // namespace Input -#endif // DYNAMIC_VINO_LIB__INPUTS__IMAGE_TOPIC_HPP_ +#endif // OPENVINO_WRAPPER_LIB__INPUTS__IMAGE_TOPIC_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/ip_camera.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/ip_camera.hpp similarity index 80% rename from dynamic_vino_lib/include/dynamic_vino_lib/inputs/ip_camera.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/ip_camera.hpp index 02ffb3ce..03680624 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/ip_camera.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/ip_camera.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,12 +17,12 @@ * @file ip_camera.hpp */ -#ifndef DYNAMIC_VINO_LIB__INPUTS__IP_CAMERA_HPP_ -#define DYNAMIC_VINO_LIB__INPUTS__IP_CAMERA_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INPUTS__IP_CAMERA_HPP_ +#define OPENVINO_WRAPPER_LIB__INPUTS__IP_CAMERA_HPP_ #include #include -#include "dynamic_vino_lib/inputs/base_input.hpp" +#include "openvino_wrapper_lib/inputs/base_input.hpp" namespace Input { @@ -33,7 +33,9 @@ namespace Input class IpCamera : public BaseInputDevice { public: - explicit IpCamera(const std::string & ip_uri) : ip_uri_(ip_uri) {} + explicit IpCamera(const std::string& ip_uri) : ip_uri_(ip_uri) + { + } /** * @brief Initialize the input device, * for cameras, it will turn the camera on and get ready to read frames, @@ -50,11 +52,11 @@ class IpCamera : public BaseInputDevice * @brief Read next frame, and give the value to argument frame. * @return Whether the next frame is successfully read. */ - bool read(cv::Mat * frame) override; + bool read(cv::Mat* frame) override; private: cv::VideoCapture cap; std::string ip_uri_; }; } // namespace Input -#endif // DYNAMIC_VINO_LIB__INPUTS__IP_CAMERA_HPP_ +#endif // OPENVINO_WRAPPER_LIB__INPUTS__IP_CAMERA_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/realsense_camera.hpp similarity index 83% rename from dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/realsense_camera.hpp index 3d399927..d04ed1d0 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/realsense_camera.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,12 +17,12 @@ * @file realsense_camera.h */ -#ifndef DYNAMIC_VINO_LIB__INPUTS__REALSENSE_CAMERA_HPP_ -#define DYNAMIC_VINO_LIB__INPUTS__REALSENSE_CAMERA_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INPUTS__REALSENSE_CAMERA_HPP_ +#define OPENVINO_WRAPPER_LIB__INPUTS__REALSENSE_CAMERA_HPP_ #include #include -#include "dynamic_vino_lib/inputs/base_input.hpp" +#include "openvino_wrapper_lib/inputs/base_input.hpp" namespace Input { @@ -48,7 +48,7 @@ class RealSenseCamera : public BaseInputDevice * @brief Read next frame, and give the value to argument frame. * @return Whether the next frame is successfully read. */ - bool read(cv::Mat * frame) override; + bool read(cv::Mat* frame) override; private: void bypassFewFramesOnceInited(); @@ -61,4 +61,4 @@ class RealSenseCamera : public BaseInputDevice }; } // namespace Input -#endif // DYNAMIC_VINO_LIB__INPUTS__REALSENSE_CAMERA_HPP_ +#endif // OPENVINO_WRAPPER_LIB__INPUTS__REALSENSE_CAMERA_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera_topic.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/realsense_camera_topic.hpp similarity index 75% rename from dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera_topic.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/realsense_camera_topic.hpp index 2b62c643..8d9de35c 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera_topic.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/realsense_camera_topic.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,10 +17,10 @@ * @file realsense_camera.h */ -#ifndef DYNAMIC_VINO_LIB__INPUTS__REALSENSE_CAMERA_TOPIC_HPP_ -#define DYNAMIC_VINO_LIB__INPUTS__REALSENSE_CAMERA_TOPIC_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INPUTS__REALSENSE_CAMERA_TOPIC_HPP_ +#define OPENVINO_WRAPPER_LIB__INPUTS__REALSENSE_CAMERA_TOPIC_HPP_ -#include "dynamic_vino_lib/inputs/image_topic.hpp" +#include "openvino_wrapper_lib/inputs/image_topic.hpp" namespace Input { @@ -34,4 +34,4 @@ typedef ImageTopic RealSenseCameraTopic; } // namespace Input -#endif // DYNAMIC_VINO_LIB__INPUTS__REALSENSE_CAMERA_TOPIC_HPP_ +#endif // OPENVINO_WRAPPER_LIB__INPUTS__REALSENSE_CAMERA_TOPIC_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/ros2_handler.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/ros2_handler.hpp similarity index 84% rename from dynamic_vino_lib/include/dynamic_vino_lib/inputs/ros2_handler.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/ros2_handler.hpp index 6a2ac311..f9d4a29a 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/ros2_handler.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/ros2_handler.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,8 +17,8 @@ * @file ros_handler.h */ -#ifndef DYNAMIC_VINO_LIB__INPUTS__ROS2_HANDLER_HPP_ -#define DYNAMIC_VINO_LIB__INPUTS__ROS2_HANDLER_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INPUTS__ROS2_HANDLER_HPP_ +#define OPENVINO_WRAPPER_LIB__INPUTS__ROS2_HANDLER_HPP_ #include #include @@ -28,7 +28,7 @@ namespace Input class Ros2Handler { public: - void setHandler(const std::shared_ptr & node) + void setHandler(const std::shared_ptr& node) { node_ = node; } @@ -43,14 +43,14 @@ class Ros2Handler inline void setHeader(std::string frame_id) { header_.frame_id = frame_id; - #if true //directly use RCLCPP api for time stamp generation. +#if true // directly use RCLCPP api for time stamp generation. header_.stamp = rclcpp::Clock(RCL_ROS_TIME).now(); - #else +#else std::chrono::high_resolution_clock::time_point tp = std::chrono::high_resolution_clock::now(); int64 ns = tp.time_since_epoch().count(); header_.stamp.sec = ns / 1000000000; header_.stamp.nanosec = ns % 1000000000; - #endif +#endif } inline void setHeader(std_msgs::msg::Header header) @@ -76,6 +76,7 @@ class Ros2Handler { return locked_header_; } + private: std::shared_ptr node_; std_msgs::msg::Header header_; @@ -84,4 +85,4 @@ class Ros2Handler } // namespace Input -#endif // DYNAMIC_VINO_LIB__INPUTS__ROS2_HANDLER_HPP_ +#endif // OPENVINO_WRAPPER_LIB__INPUTS__ROS2_HANDLER_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/standard_camera.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/standard_camera.hpp similarity index 80% rename from dynamic_vino_lib/include/dynamic_vino_lib/inputs/standard_camera.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/standard_camera.hpp index ef63e65b..5e595c1c 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/standard_camera.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/standard_camera.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,20 +17,18 @@ * @file standard_camera.h */ -#ifndef DYNAMIC_VINO_LIB__INPUTS__STANDARD_CAMERA_HPP_ -#define DYNAMIC_VINO_LIB__INPUTS__STANDARD_CAMERA_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INPUTS__STANDARD_CAMERA_HPP_ +#define OPENVINO_WRAPPER_LIB__INPUTS__STANDARD_CAMERA_HPP_ #include - -#include "dynamic_vino_lib/inputs/base_input.hpp" +#include "openvino_wrapper_lib/inputs/base_input.hpp" #include #include #include #include #include - namespace Input { /** @@ -40,6 +38,7 @@ namespace Input class StandardCamera : public BaseInputDevice { public: + StandardCamera(const std::string& camera = ""); /** * @brief Initialize the input device, * for cameras, it will turn the camera on and get ready to read frames, @@ -56,12 +55,13 @@ class StandardCamera : public BaseInputDevice * @brief Read next frame, and give the value to argument frame. * @return Whether the next frame is successfully read. */ - bool read(cv::Mat * frame) override; + bool read(cv::Mat* frame) override; private: int getCameraId(); cv::VideoCapture cap; int camera_id_ = -1; + std::string device_path_; }; } // namespace Input -#endif // DYNAMIC_VINO_LIB__INPUTS__STANDARD_CAMERA_HPP_ +#endif // OPENVINO_WRAPPER_LIB__INPUTS__STANDARD_CAMERA_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/video_input.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/video_input.hpp similarity index 81% rename from dynamic_vino_lib/include/dynamic_vino_lib/inputs/video_input.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/video_input.hpp index e02a5f16..9b6ef5bc 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/video_input.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/video_input.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,12 +16,12 @@ * @brief A header file with declaration for Video class * @file video_input.h */ -#ifndef DYNAMIC_VINO_LIB__INPUTS__VIDEO_INPUT_HPP_ -#define DYNAMIC_VINO_LIB__INPUTS__VIDEO_INPUT_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INPUTS__VIDEO_INPUT_HPP_ +#define OPENVINO_WRAPPER_LIB__INPUTS__VIDEO_INPUT_HPP_ #include #include -#include "dynamic_vino_lib/inputs/base_input.hpp" +#include "openvino_wrapper_lib/inputs/base_input.hpp" namespace Input { @@ -32,7 +32,7 @@ namespace Input class Video : public BaseInputDevice { public: - explicit Video(const std::string &); + explicit Video(const std::string&); /** * @brief Read a video file from the file path. * @param[in] An video file path. @@ -49,7 +49,7 @@ class Video : public BaseInputDevice * @brief Read next frame, and give the value to argument frame. * @return Whether the next frame is successfully read. */ - bool read(cv::Mat * frame) override; + bool read(cv::Mat* frame) override; private: cv::VideoCapture cap; @@ -57,4 +57,4 @@ class Video : public BaseInputDevice }; } // namespace Input -#endif // DYNAMIC_VINO_LIB__INPUTS__VIDEO_INPUT_HPP_ +#endif // OPENVINO_WRAPPER_LIB__INPUTS__VIDEO_INPUT_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/age_gender_detection_model.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/age_gender_detection_model.hpp similarity index 81% rename from dynamic_vino_lib/include/dynamic_vino_lib/models/age_gender_detection_model.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/models/age_gender_detection_model.hpp index 7ef53bfd..564f93a3 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/age_gender_detection_model.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/age_gender_detection_model.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,11 +17,11 @@ * @file age_gender_detection_model.h */ -#ifndef DYNAMIC_VINO_LIB__MODELS__AGE_GENDER_DETECTION_MODEL_HPP_ -#define DYNAMIC_VINO_LIB__MODELS__AGE_GENDER_DETECTION_MODEL_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__MODELS__AGE_GENDER_DETECTION_MODEL_HPP_ +#define OPENVINO_WRAPPER_LIB__MODELS__AGE_GENDER_DETECTION_MODEL_HPP_ #include -#include "dynamic_vino_lib/models/base_model.hpp" +#include "openvino_wrapper_lib/models/base_model.hpp" namespace Models { @@ -61,9 +61,8 @@ class AgeGenderDetectionModel : public BaseModel const std::string getModelCategory() const override; protected: - bool updateLayerProperty(InferenceEngine::CNNNetwork&) override; - + bool updateLayerProperty(std::shared_ptr&) override; }; } // namespace Models -#endif // DYNAMIC_VINO_LIB__MODELS__AGE_GENDER_DETECTION_MODEL_HPP_ +#endif // OPENVINO_WRAPPER_LIB__MODELS__AGE_GENDER_DETECTION_MODEL_HPP_ diff --git a/openvino_wrapper_lib/include/openvino_wrapper_lib/models/attributes/base_attribute.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/attributes/base_attribute.hpp new file mode 100644 index 00000000..b7d04700 --- /dev/null +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/attributes/base_attribute.hpp @@ -0,0 +1,314 @@ +// Copyright (c) 2018-2023 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief A header file with declaration for ModelAttribute class. + * @file base_attribute.hpp + */ + +#ifndef OPENVINO_WRAPPER_LIB__MODELS__ATTRIBUTES_BASE_ATTRIBUTE_HPP_ +#define OPENVINO_WRAPPER_LIB__MODELS__ATTRIBUTES_BASE_ATTRIBUTE_HPP_ + +#include +#include +#include +#include +#include + +#include "openvino/openvino.hpp" +#include "openvino_wrapper_lib/slog.hpp" + +namespace Models +{ +/** + * @class ModelAttribute + * @brief This class represents the network given by .xml and .bin file + */ +class ModelAttribute +{ +public: + using Ptr = std::shared_ptr; + const char* DefaultInputName{ "input0" }; + const char* DefaultOutputName = "output0"; + struct ModelAttr + { + // Input Tensor Size + int input_height = 0; + int input_width = 0; + + // Input/Output Tensor Info + int input_tensor_count = 1; // The number of input tensors + int output_tensor_count = 1; // The number of output tensors + bool has_confidence_output = true; // Yolov5~7 have a float for confidence, while yolov8 hasn't. + bool need_transpose = false; // If the output tensor needs transpose + int max_proposal_count = 0; // The max number of objects in inference output tensor + int object_size = 0; // The size of each object in inference output tensor + std::map input_names; + std::map output_names; + + std::string model_name; + std::vector labels; + }; + + ModelAttribute(const std::string model_name) + { + attr_.model_name = model_name; + } + + inline bool isVerified() + { + return (attr_.max_proposal_count > 0 && attr_.object_size > 0 && attr_.input_height > 0 && attr_.input_width > 0 && + attr_.input_names.empty() && attr_.output_names.empty()); + } + inline void printAttribute() + { + slog::info << "-------------------- Attributes for Model ----------------------" << slog::endl; + slog::info << "| model_name: " << attr_.model_name << slog::endl; + slog::info << "| max_proposal_count: " << attr_.max_proposal_count << slog::endl; + slog::info << "| object_size: " << attr_.object_size << slog::endl; + slog::info << "| input_height: " << attr_.input_height << slog::endl; + slog::info << "| input_width: " << attr_.input_width << slog::endl; + slog::info << "| input_tensor_count: " << attr_.input_tensor_count << slog::endl; + slog::info << "| output_tensor_count: " << attr_.output_tensor_count << slog::endl; + slog::info << "| need_transpose (max_proposal_count < object_size): " << std::boolalpha << attr_.need_transpose + << slog::endl; + slog::info << "| has_confidence_output: " << std::boolalpha << attr_.has_confidence_output << slog::endl; + + slog::info << "| input_names: " << slog::endl; + for (auto& item : attr_.input_names) { + slog::info << "| " << item.first << "-->" << item.second << slog::endl; + } + slog::info << "| output_names: " << slog::endl; + for (auto& item : attr_.output_names) { + slog::info << "| " << item.first << "-->" << item.second << slog::endl; + } + + slog::info << "| lables:" << slog::endl; + for (size_t i = 0; i < attr_.labels.size(); i++) { + if (i % 8 == 0) + slog::info << "| "; + slog::info << "[" << i << ":" << attr_.labels[i] << "]"; + if (i % 8 == 7) + slog::info << slog::endl; + } + slog::info << slog::endl; + + if (attr_.max_proposal_count <= 0 || attr_.object_size <= 0 || attr_.input_height <= 0 || attr_.input_width <= 0 || + attr_.input_names.empty() || attr_.output_names.empty()) { + slog::info << "--------" << slog::endl; + slog::warn << "Not all attributes are set correctly! not 0 or empty is allowed in" + << " the above list." << slog::endl; + } + if (attr_.input_tensor_count != static_cast(attr_.input_names.size())) { + slog::info << "--------" << slog::endl; + slog::warn << "The count of input_tensor(s) is not aligned with input names!" << slog::endl; + } + if (attr_.output_tensor_count != static_cast(attr_.output_names.size())) { + slog::info << "--------" << slog::endl; + slog::warn << "The count of output_tensor(s) is not aligned with output names!" << slog::endl; + } + slog::info << "-------------------- Attributes for Model ----------------------" << slog::endl; + } + + virtual bool updateLayerProperty(const std::shared_ptr&) + { + return false; + } + + inline std::string getModelName() const + { + return attr_.model_name; + } + + inline void setModelName(std::string name) + { + attr_.model_name = name; + } + + inline std::string getInputName(std::string name = "input0") const + { + auto it = attr_.input_names.find(name); + if (it == attr_.input_names.end()) { + slog::warn << "No input named: " << name << slog::endl; + return std::string(""); + } + + return it->second; + } + + inline std::string getOutputName(std::string name = "output0") const + { + auto it = attr_.output_names.find(name); + if (it == attr_.output_names.end()) { + slog::warn << "No output named: " << name << slog::endl; + return std::string(""); + } + + return it->second; + } + + inline int getMaxProposalCount() const + { + return attr_.max_proposal_count; + } + + inline int getObjectSize() const + { + return attr_.object_size; + } + + inline void loadLabelsFromFile(const std::string file_path) + { + std::ifstream input_file(file_path); + for (std::string name; std::getline(input_file, name);) { + attr_.labels.push_back(name); + } + } + + inline std::vector& getLabels() + { + return attr_.labels; + } + + inline void addInputInfo(std::string key, std::string value) + { + attr_.input_names[key] = value; + } + + inline const std::string getInputInfo(std::string key) + { + return attr_.input_names[key]; + } + + inline void addOutputInfo(std::string key, std::string value) + { + attr_.output_names[key] = value; + } + + inline void setInputHeight(const int height) + { + attr_.input_height = height; + } + + inline int getInputHeight() const + { + return attr_.input_height; + } + + inline void setInputWidth(const int width) + { + attr_.input_width = width; + } + + inline int getInputWidth() const + { + return attr_.input_width; + } + + inline void setMaxProposalCount(const int max) + { + attr_.max_proposal_count = max; + } + + inline void setObjectSize(const int size) + { + attr_.object_size = size; + } + + inline void setHasConfidenceOutput(const bool has) + { + attr_.has_confidence_output = has; + } + + inline bool hasConfidenceOutput() const + { + return attr_.has_confidence_output; + } + + inline void setCountOfInputs(const int count) + { + attr_.input_tensor_count = count; + } + + inline int getCountOfInputs() const + { + return attr_.input_tensor_count; + } + + inline void setCountOfOutputs(const int count) + { + attr_.output_tensor_count = count; + } + + inline int getCountOfOutputs() const + { + return attr_.output_tensor_count; + } + + inline void setTranspose(bool trans) + { + attr_.need_transpose = trans; + } + + inline bool needTranspose() const + { + return attr_.need_transpose; + } + + inline bool _renameMapKeyByValue(std::map& map, const std::string& value, + const std::string& new_key) + { + for (auto& item : map) { + auto n = item.second.find(value); + if (std::string::npos != n) { + // if(item.second.contains(value)){ + auto nh = map.extract(item.first); + nh.key() = new_key; + map.insert(std::move(nh)); + return true; + } + } + + return false; + } + + inline bool retagOutputByValue(const std::string& value, const std::string& new_tag) + { + return _renameMapKeyByValue(attr_.output_names, value, new_tag); + } + + inline bool retagInputByValue(const std::string& value, const std::string& new_tag) + { + return _renameMapKeyByValue(attr_.input_names, value, new_tag); + } + +protected: + ModelAttr attr_; + std::string input_tensor_name_; + std::string output_tensor_name_; + std::vector> inputs_info_; + std::vector> outputs_info_; +}; + +class SSDModelAttr : public ModelAttribute +{ +public: + explicit SSDModelAttr(const std::string model_name = "SSDNet-like"); + + bool updateLayerProperty(const std::shared_ptr&); +}; + +} // namespace Models + +#endif // OPENVINO_WRAPPER_LIB__MODELS__ATTRIBUTES_BASE_ATTRIBUTE_HPP_ diff --git a/openvino_wrapper_lib/include/openvino_wrapper_lib/models/base_model.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/base_model.hpp new file mode 100644 index 00000000..f04efd1d --- /dev/null +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/base_model.hpp @@ -0,0 +1,207 @@ +// Copyright (c) 2018-2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief A header file with declaration for BaseModel Class + * @file base_model.h + */ + +#ifndef OPENVINO_WRAPPER_LIB__MODELS__BASE_MODEL_HPP_ +#define OPENVINO_WRAPPER_LIB__MODELS__BASE_MODEL_HPP_ + +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include "openvino_wrapper_lib/slog.hpp" +#include "openvino_wrapper_lib/models/attributes/base_attribute.hpp" + +namespace Engines +{ +class Engine; +} + +namespace openvino_wrapper_lib +{ +class ObjectDetectionResult; +} + +namespace Models +{ +/** + * @class BaseModel + * @brief This class represents the network given by .xml and .bin file + */ +class BaseModel : public ModelAttribute +{ +public: + using Ptr = std::shared_ptr; + /** + * @brief Initialize the class with given .xml, .bin and .labels file. It will + * also check whether the number of input and output are fit. + * @param[in] model_loc The location of model' s .xml file + * (model' s bin file should be the same as .xml file except for extension) + * @param[in] input_num The number of input the network should have. + * @param[in] output_num The number of output the network should have. + * @param[in] batch_size The number of batch size (default: 1) the network should have. + * @return Whether the input device is successfully turned on. + */ + BaseModel(const std::string& label_loc, const std::string& model_loc, int batch_size = 1); + + /** + * @brief Initialize the class with given external parameters. It requires some items should + * be set, such as: [lable path, model path, batch size] + * @param[in] config the configuration structure to be set to the model class. + */ + BaseModel(const Params::ParamManager::InferenceRawData& config); + + /** + * @brief Get the maximum batch size of the model. + * @return The maximum batch size of the model. + */ + inline int getMaxBatchSize() const + { + return max_batch_size_; + } + inline void setMaxBatchSize(int max_batch_size) + { + max_batch_size_ = max_batch_size; + } + + virtual bool enqueue(const std::shared_ptr& engine, const cv::Mat& frame, + const cv::Rect& input_frame_loc) + { + return true; + } + /** + * @brief Initialize the model. During the process the class will check + * the network input, output size, check layer property and + * set layer property. + */ + void modelInit(); + /** + * @brief Get the name of the model. + * @return The name of the model. + */ + virtual const std::string getModelCategory() const = 0; + inline ModelAttr getAttribute() + { + return attr_; + } + + inline std::shared_ptr getModel() const + { + return model_; + } + +protected: + /** + * New infterface to check and update Layer Property + * @brief Set the layer property (layer layout, layer precision, etc.). + * @param[in] network_reader The reader of the network to be set. + */ + virtual bool updateLayerProperty(std::shared_ptr& network_reader) = 0; + + virtual bool matToBlob(const cv::Mat& orig_image, const cv::Rect&, float scale_factor, int batch_index, + const std::shared_ptr& engine); + + cv::Mat extendFrameToInputRatio(const cv::Mat); + ov::Core engine; + std::shared_ptr model_; + void setFrameSize(const int& w, const int& h) + { + frame_size_.width = w; + frame_size_.height = h; + } + cv::Size getFrameSize() + { + return frame_size_; + } + + inline void setFrameResizeeRatioWidth(const float r) + { + frame_resize_ratio_width_ = r; + } + + inline void setFrameResizeeRatioHeight(const float r) + { + frame_resize_ratio_height_ = r; + } + + inline float getFrameResizeRatioWidth() const + { + return frame_resize_ratio_width_; + } + + inline float getFrameResizeRatioHeight() const + { + return frame_resize_ratio_height_; + } + + inline void setKeepInputShapeRatio(bool keep) + { + keep_input_shape_ratio_ = keep; + } + + inline bool isKeepInputRatio() const + { + return keep_input_shape_ratio_; + } + + inline void setExpectedFrameSize(cv::Size expect) + { + expected_frame_size_ = expect; + } + + inline cv::Size getExpectedFrameSize() const + { + return expected_frame_size_; + } + +protected: + // config_ archives the model configuration from extermal YAML file + Params::ParamManager::InferenceRawData config_; + +private: + int max_batch_size_; + std::string model_loc_; + std::string label_loc_; + + // Information about Input Data + cv::Size frame_size_; + cv::Size expected_frame_size_{ 224, 224 }; + float frame_resize_ratio_width_ = 1.0; + float frame_resize_ratio_height_ = 1.0; + bool keep_input_shape_ratio_ = false; +}; + +class ObjectDetectionModel : public BaseModel +{ +public: + ObjectDetectionModel(const std::string& label_loc, const std::string& model_loc, int batch_size = 1); + virtual bool fetchResults(const std::shared_ptr& engine, + std::vector& result, + const float& confidence_thresh = 0.3, const bool& enable_roi_constraint = false) = 0; +}; + +} // namespace Models + +#endif // OPENVINO_WRAPPER_LIB__MODELS__BASE_MODEL_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/emotion_detection_model.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/emotion_detection_model.hpp similarity index 73% rename from dynamic_vino_lib/include/dynamic_vino_lib/models/emotion_detection_model.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/models/emotion_detection_model.hpp index de5d4dfb..98d48016 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/emotion_detection_model.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/emotion_detection_model.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,11 +17,11 @@ * @file emotion_detection_model.h */ -#ifndef DYNAMIC_VINO_LIB__MODELS__EMOTION_DETECTION_MODEL_HPP_ -#define DYNAMIC_VINO_LIB__MODELS__EMOTION_DETECTION_MODEL_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__MODELS__EMOTION_DETECTION_MODEL_HPP_ +#define OPENVINO_WRAPPER_LIB__MODELS__EMOTION_DETECTION_MODEL_HPP_ #include -#include "dynamic_vino_lib/models/base_model.hpp" +#include "openvino_wrapper_lib/models/base_model.hpp" namespace Models { @@ -39,12 +39,8 @@ class EmotionDetectionModel : public BaseModel * @return Name of the model. */ const std::string getModelCategory() const override; - bool updateLayerProperty(InferenceEngine::CNNNetwork&) override; - -private: - bool verifyOutputLayer(const InferenceEngine::DataPtr & ptr); - + bool updateLayerProperty(std::shared_ptr&) override; }; } // namespace Models -#endif // DYNAMIC_VINO_LIB__MODELS__EMOTION_DETECTION_MODEL_HPP_ +#endif // OPENVINO_WRAPPER_LIB__MODELS__EMOTION_DETECTION_MODEL_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/face_detection_model.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/face_detection_model.hpp similarity index 72% rename from dynamic_vino_lib/include/dynamic_vino_lib/models/face_detection_model.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/models/face_detection_model.hpp index 11c7efae..0b1b2307 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/face_detection_model.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/face_detection_model.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,11 +17,11 @@ * @file face_detection_model.h */ -#ifndef DYNAMIC_VINO_LIB__MODELS__FACE_DETECTION_MODEL_HPP_ -#define DYNAMIC_VINO_LIB__MODELS__FACE_DETECTION_MODEL_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__MODELS__FACE_DETECTION_MODEL_HPP_ +#define OPENVINO_WRAPPER_LIB__MODELS__FACE_DETECTION_MODEL_HPP_ #include -#include "dynamic_vino_lib/models/base_model.hpp" +#include "openvino_wrapper_lib/models/base_model.hpp" namespace Models { @@ -32,8 +32,8 @@ namespace Models class FaceDetectionModel : public ObjectDetectionModel { public: - FaceDetectionModel(const std::string& label_loc, const std::string & model_loc, int batch_size = 1); - //void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; + FaceDetectionModel(const std::string& label_loc, const std::string& model_loc, int batch_size = 1); + // void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; /** * @brief Get the name of this detection model. * @return Name of the model. @@ -42,4 +42,4 @@ class FaceDetectionModel : public ObjectDetectionModel }; } // namespace Models -#endif // DYNAMIC_VINO_LIB__MODELS__FACE_DETECTION_MODEL_HPP_ +#endif // OPENVINO_WRAPPER_LIB__MODELS__FACE_DETECTION_MODEL_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/face_reidentification_model.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/face_reidentification_model.hpp similarity index 65% rename from dynamic_vino_lib/include/dynamic_vino_lib/models/face_reidentification_model.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/models/face_reidentification_model.hpp index 1939cf05..caf10b55 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/face_reidentification_model.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/face_reidentification_model.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,10 +16,11 @@ * @brief A header file with declaration for FaceReidentificationModel Class * @file person_reidentification_model.h */ -#ifndef DYNAMIC_VINO_LIB__MODELS__FACE_REIDENTIFICATION_MODEL_HPP_ -#define DYNAMIC_VINO_LIB__MODELS__FACE_REIDENTIFICATION_MODEL_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__MODELS__FACE_REIDENTIFICATION_MODEL_HPP_ +#define OPENVINO_WRAPPER_LIB__MODELS__FACE_REIDENTIFICATION_MODEL_HPP_ #include -#include "dynamic_vino_lib/models/base_model.hpp" +#include "openvino_wrapper_lib/models/base_model.hpp" + namespace Models { /** @@ -29,9 +30,15 @@ namespace Models class FaceReidentificationModel : public BaseModel { public: - FaceReidentificationModel(const std::string& label_loc, const std::string & model_loc, int batch_size = 1); - inline const std::string getInputName() {return input_;} - inline const std::string getOutputName() {return output_;} + FaceReidentificationModel(const std::string& label_loc, const std::string& model_loc, int batch_size = 1); + inline const std::string getInputName() + { + return input_; + } + inline const std::string getOutputName() + { + return output_; + } /** * @brief Get the name of this detection model. * @return Name of the model. @@ -39,10 +46,8 @@ class FaceReidentificationModel : public BaseModel const std::string getModelCategory() const override; protected: - //void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; - //void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; std::string input_; std::string output_; }; } // namespace Models -#endif // DYNAMIC_VINO_LIB__MODELS__FACE_REIDENTIFICATION_MODEL_HPP_ +#endif // OPENVINO_WRAPPER_LIB__MODELS__FACE_REIDENTIFICATION_MODEL_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/head_pose_detection_model.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/head_pose_detection_model.hpp similarity index 80% rename from dynamic_vino_lib/include/dynamic_vino_lib/models/head_pose_detection_model.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/models/head_pose_detection_model.hpp index 5afce9b3..663356d8 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/head_pose_detection_model.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/head_pose_detection_model.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,11 +17,11 @@ * @file head_pose_detection_model.h */ -#ifndef DYNAMIC_VINO_LIB__MODELS__HEAD_POSE_DETECTION_MODEL_HPP_ -#define DYNAMIC_VINO_LIB__MODELS__HEAD_POSE_DETECTION_MODEL_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__MODELS__HEAD_POSE_DETECTION_MODEL_HPP_ +#define OPENVINO_WRAPPER_LIB__MODELS__HEAD_POSE_DETECTION_MODEL_HPP_ #include -#include "dynamic_vino_lib/models/base_model.hpp" +#include "openvino_wrapper_lib/models/base_model.hpp" namespace Models { @@ -32,7 +32,7 @@ namespace Models class HeadPoseDetectionModel : public BaseModel { public: - HeadPoseDetectionModel(const std::string& label_loc, const std::string & model_loc, int batch_size = 1); + HeadPoseDetectionModel(const std::string& label_loc, const std::string& model_loc, int batch_size = 1); /** * @brief Get the output angle roll. @@ -63,8 +63,7 @@ class HeadPoseDetectionModel : public BaseModel * @return Name of the model. */ const std::string getModelCategory() const override; - bool updateLayerProperty(InferenceEngine::CNNNetwork&) override; - + bool updateLayerProperty(std::shared_ptr&) override; private: std::string output_angle_r_ = "angle_r_fc"; @@ -73,4 +72,4 @@ class HeadPoseDetectionModel : public BaseModel }; } // namespace Models -#endif // DYNAMIC_VINO_LIB__MODELS__HEAD_POSE_DETECTION_MODEL_HPP_ +#endif // OPENVINO_WRAPPER_LIB__MODELS__HEAD_POSE_DETECTION_MODEL_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/landmarks_detection_model.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/landmarks_detection_model.hpp similarity index 65% rename from dynamic_vino_lib/include/dynamic_vino_lib/models/landmarks_detection_model.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/models/landmarks_detection_model.hpp index 7bbb51e5..3b38385a 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/landmarks_detection_model.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/landmarks_detection_model.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,10 +16,10 @@ * @brief A header file with declaration for LandmarksDetectionModel Class * @file landmarks_detection_model.h */ -#ifndef DYNAMIC_VINO_LIB__MODELS__LANDMARKS_DETECTION_MODEL_HPP_ -#define DYNAMIC_VINO_LIB__MODELS__LANDMARKS_DETECTION_MODEL_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__MODELS__LANDMARKS_DETECTION_MODEL_HPP_ +#define OPENVINO_WRAPPER_LIB__MODELS__LANDMARKS_DETECTION_MODEL_HPP_ #include -#include "dynamic_vino_lib/models/base_model.hpp" +#include "openvino_wrapper_lib/models/base_model.hpp" namespace Models { /** @@ -29,9 +29,15 @@ namespace Models class LandmarksDetectionModel : public BaseModel { public: - LandmarksDetectionModel(const std::string& label_loc, const std::string & model_loc, int batch_size = 1); - inline const std::string getInputName() {return input_;} - inline const std::string getOutputName() {return output_;} + LandmarksDetectionModel(const std::string& label_loc, const std::string& model_loc, int batch_size = 1); + inline const std::string getInputName() + { + return input_; + } + inline const std::string getOutputName() + { + return output_; + } /** * @brief Get the name of this detection model. * @return Name of the model. @@ -39,10 +45,8 @@ class LandmarksDetectionModel : public BaseModel const std::string getModelCategory() const override; protected: - //void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; - //void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; std::string input_; std::string output_; }; } // namespace Models -#endif // DYNAMIC_VINO_LIB__MODELS__LANDMARKS_DETECTION_MODEL_HPP_ +#endif // OPENVINO_WRAPPER_LIB__MODELS__LANDMARKS_DETECTION_MODEL_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/license_plate_detection_model.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/license_plate_detection_model.hpp similarity index 58% rename from dynamic_vino_lib/include/dynamic_vino_lib/models/license_plate_detection_model.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/models/license_plate_detection_model.hpp index 9357160a..7a0084a7 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/license_plate_detection_model.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/license_plate_detection_model.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,10 +16,10 @@ * @brief A header file with declaration for LicensePlateDetectionModel Class * @file vehicle_attribs_detection_model.h */ -#ifndef DYNAMIC_VINO_LIB__MODELS__LICENSE_PLATE_DETECTION_MODEL_HPP_ -#define DYNAMIC_VINO_LIB__MODELS__LICENSE_PLATE_DETECTION_MODEL_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__MODELS__LICENSE_PLATE_DETECTION_MODEL_HPP_ +#define OPENVINO_WRAPPER_LIB__MODELS__LICENSE_PLATE_DETECTION_MODEL_HPP_ #include -#include "dynamic_vino_lib/models/base_model.hpp" +#include "openvino_wrapper_lib/models/base_model.hpp" namespace Models { /** @@ -29,11 +29,23 @@ namespace Models class LicensePlateDetectionModel : public BaseModel { public: - LicensePlateDetectionModel(const std::string& label_loc, const std::string & model_loc, int batch_size = 1); - inline const std::string getInputName() {return input_;} - inline const std::string getSeqInputName() {return seq_input_;} - inline const std::string getOutputName() {return output_;} - inline int getMaxSequenceSize() const {return max_sequence_size_;} + LicensePlateDetectionModel(const std::string& label_loc, const std::string& model_loc, int batch_size = 1); + inline const std::string getInputName() + { + return input_; + } + inline const std::string getSeqInputName() + { + return seq_input_; + } + inline const std::string getOutputName() + { + return output_; + } + inline int getMaxSequenceSize() const + { + return max_sequence_size_; + } /** * @brief Get the name of this detection model. * @return Name of the model. @@ -41,14 +53,11 @@ class LicensePlateDetectionModel : public BaseModel const std::string getModelCategory() const override; protected: - //void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; - //void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; - bool updateLayerProperty(InferenceEngine::CNNNetwork&) override; - // up to 88 items per license plate, ended with "-1" + bool updateLayerProperty(std::shared_ptr&) override; const int max_sequence_size_ = 88; std::string input_; std::string seq_input_; std::string output_; }; } // namespace Models -#endif // DYNAMIC_VINO_LIB__MODELS__LICENSE_PLATE_DETECTION_MODEL_HPP_ +#endif // OPENVINO_WRAPPER_LIB__MODELS__LICENSE_PLATE_DETECTION_MODEL_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_ssd_model.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/object_detection_ssd_model.hpp similarity index 52% rename from dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_ssd_model.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/models/object_detection_ssd_model.hpp index 76bb6354..ebe88b1b 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_ssd_model.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/object_detection_ssd_model.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,12 +15,12 @@ * @brief A header file with declaration for ObjectDetectionModel Class * @file face_detection_model.h */ -#ifndef DYNAMIC_VINO_LIB__MODELS__OBJECT_DETECTION_SSD_MODEL_HPP_ -#define DYNAMIC_VINO_LIB__MODELS__OBJECT_DETECTION_SSD_MODEL_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__MODELS__OBJECT_DETECTION_SSD_MODEL_HPP_ +#define OPENVINO_WRAPPER_LIB__MODELS__OBJECT_DETECTION_SSD_MODEL_HPP_ #include #include #include -#include "dynamic_vino_lib/models/base_model.hpp" +#include "openvino_wrapper_lib/models/base_model.hpp" namespace Models { /** @@ -29,25 +29,20 @@ namespace Models */ class ObjectDetectionSSDModel : public ObjectDetectionModel { - using Result = dynamic_vino_lib::ObjectDetectionResult; + using Result = openvino_wrapper_lib::ObjectDetectionResult; public: - ObjectDetectionSSDModel(const std::string& label_loc, const std::string & model_loc, int batch_size = 1); + ObjectDetectionSSDModel(const std::string& label_loc, const std::string& model_loc, int batch_size = 1); - bool fetchResults( - const std::shared_ptr & engine, - std::vector & results, - const float & confidence_thresh = 0.3, - const bool & enable_roi_constraint = false) override; + bool fetchResults(const std::shared_ptr& engine, + std::vector& results, + const float& confidence_thresh = 0.3, const bool& enable_roi_constraint = false) override; - bool enqueue( - const std::shared_ptr & engine, - const cv::Mat & frame, - const cv::Rect & input_frame_loc) override; + bool enqueue(const std::shared_ptr& engine, const cv::Mat& frame, + const cv::Rect& input_frame_loc) override; - bool matToBlob( - const cv::Mat & orig_image, const cv::Rect &, float scale_factor, - int batch_index, const std::shared_ptr & engine) override; + bool matToBlob(const cv::Mat& orig_image, const cv::Rect&, float scale_factor, int batch_index, + const std::shared_ptr& engine) override; /** * @brief Get the name of this detection model. @@ -55,8 +50,7 @@ class ObjectDetectionSSDModel : public ObjectDetectionModel */ const std::string getModelCategory() const override; - bool updateLayerProperty(InferenceEngine::CNNNetwork&) override; - + bool updateLayerProperty(std::shared_ptr&) override; }; } // namespace Models -#endif // DYNAMIC_VINO_LIB__MODELS__OBJECT_DETECTION_SSD_MODEL_HPP_ +#endif // OPENVINO_WRAPPER_LIB__MODELS__OBJECT_DETECTION_SSD_MODEL_HPP_ diff --git a/openvino_wrapper_lib/include/openvino_wrapper_lib/models/object_detection_yolov5_model.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/object_detection_yolov5_model.hpp new file mode 100644 index 00000000..603c5f72 --- /dev/null +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/object_detection_yolov5_model.hpp @@ -0,0 +1,61 @@ +// Copyright (c) 2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +/** + * @brief A header file with declaration for ObjectDetectionModel Class + * @file face_detection_model.h + */ +#ifndef OPENVINO_WRAPPER_LIB__MODELS__OBJECT_DETECTION_YOLOV5_MODEL_HPP_ +#define OPENVINO_WRAPPER_LIB__MODELS__OBJECT_DETECTION_YOLOV5_MODEL_HPP_ +#include +#include +#include +#include "openvino_wrapper_lib/models/base_model.hpp" +namespace Models +{ +/** + * @class ObjectDetectionModel + * @brief This class generates the face detection model. + */ +#pragma pack(1) +typedef struct Resize +{ + cv::Mat resized_image; + int dw{}; + int dh{}; +} Resize_t; +#pragma pack() + +class ObjectDetectionYolov5Model : public ObjectDetectionModel +{ + using Result = openvino_wrapper_lib::ObjectDetectionResult; + +public: + ObjectDetectionYolov5Model(const std::string& label_loc, const std::string& model_loc, int batch_size = 1); + + bool fetchResults(const std::shared_ptr& engine, + std::vector& results, + const float& confidence_thresh = 0.3, const bool& enable_roi_constraint = false) override; + + bool enqueue(const std::shared_ptr& engine, const cv::Mat& frame, + const cv::Rect& input_frame_loc) override; + + /** + * @brief Get the name of this detection model. + * @return Name of the model. + */ + const std::string getModelCategory() const override; + bool updateLayerProperty(std::shared_ptr&) override; +}; +} // namespace Models +#endif // OPENVINO_WRAPPER_LIB__MODELS__OBJECT_DETECTION_YOLOV5_MODEL_HPP_ diff --git a/openvino_wrapper_lib/include/openvino_wrapper_lib/models/object_detection_yolov8_model.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/object_detection_yolov8_model.hpp new file mode 100644 index 00000000..08acc38c --- /dev/null +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/object_detection_yolov8_model.hpp @@ -0,0 +1,33 @@ +// Copyright (c) 2023 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef OPENVINO_WRAPPER_LIB__MODELS__OBJECT_DETECTION_YOLOV8_MODEL_HPP_ +#define OPENVINO_WRAPPER_LIB__MODELS__OBJECT_DETECTION_YOLOV8_MODEL_HPP_ +#include +#include +#include +#include "openvino_wrapper_lib/models/object_detection_yolov5_model.hpp" + +namespace Models +{ + +class ObjectDetectionYolov8Model : public ObjectDetectionYolov5Model +{ + using Result = openvino_wrapper_lib::ObjectDetectionResult; + +public: + explicit ObjectDetectionYolov8Model(const std::string& label_loc, const std::string& model_loc, int batch_size = 1); +}; +} // namespace Models +#endif // OPENVINO_WRAPPER_LIB__MODELS__OBJECT_DETECTION_YOLOV8_MODEL_HPP_ diff --git a/openvino_wrapper_lib/include/openvino_wrapper_lib/models/object_segmentation_instance_maskrcnn_model.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/object_segmentation_instance_maskrcnn_model.hpp new file mode 100644 index 00000000..1e74f629 --- /dev/null +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/object_segmentation_instance_maskrcnn_model.hpp @@ -0,0 +1,42 @@ +// Copyright (c) 2023 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#ifndef OPENVINO_WRAPPER_LIB__MODELS__OBJECT_SEGMENTATION_INSTANCE_MASKRCNN_MODEL_HPP_ +#define OPENVINO_WRAPPER_LIB__MODELS__OBJECT_SEGMENTATION_INSTANCE_MASKRCNN_MODEL_HPP_ +#include +#include +#include "openvino_wrapper_lib/models/base_model.hpp" + +namespace Models +{ + +/** + * @class ObjectSegmentationInstanceMaskrcnnModel + * @brief This class generates the object segmentation model. + */ +class ObjectSegmentationInstanceMaskrcnnModel : public ObjectSegmentationInstanceModel +{ + using Result = openvino_wrapper_lib::ObjectSegmentationInstanceResult; + +public: + ObjectSegmentationInstanceMaskrcnnModel(const std::string& label_loc, const std::string& model_loc, + int batch_size = 1); + + bool fetchResults(const std::shared_ptr& engine, + std::vector& results, + const float& confidence_thresh = 0.3, const bool& enable_roi_constraint = false); + + bool updateLayerProperty(std::shared_ptr&) override; +}; +} // namespace Models +#endif // OPENVINO_WRAPPER_LIB__MODELS__OBJECT_SEGMENTATION_INSTANCE_MASKRCNN_MODEL_HPP_ diff --git a/openvino_wrapper_lib/include/openvino_wrapper_lib/models/object_segmentation_instance_model.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/object_segmentation_instance_model.hpp new file mode 100644 index 00000000..5f333912 --- /dev/null +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/object_segmentation_instance_model.hpp @@ -0,0 +1,57 @@ +// Copyright (c) 2023 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#ifndef OPENVINO_WRAPPER_LIB__MODELS__OBJECT_SEGMENTATION_INSTANCE_MODEL_HPP_ +#define OPENVINO_WRAPPER_LIB__MODELS__OBJECT_SEGMENTATION_INSTANCE_MODEL_HPP_ +#include +#include +#include "openvino_wrapper_lib/models/base_model.hpp" + +namespace openvino_wrapper_lib +{ +class ObjectSegmentationInstanceResult; +} + +namespace Models +{ + +/** + * @class ObjectSegmentationInstanceModel + * @brief This class generates the object segmentation model. + */ +class ObjectSegmentationInstanceModel : public BaseModel +{ + using Result = openvino_wrapper_lib::ObjectSegmentationInstanceResult; + +public: + ObjectSegmentationInstanceModel(const std::string& label_loc, const std::string& model_loc, int batch_size = 1); + ObjectSegmentationInstanceModel(const Params::ParamManager::InferenceRawData& config); + + virtual bool fetchResults(const std::shared_ptr& engine, + std::vector& results, + const float& confidence_thresh = 0.3, const bool& enable_roi_constraint = false); + + bool enqueue(const std::shared_ptr&, const cv::Mat&, const cv::Rect&) override; + + /** + * @brief Get the name of this segmentation model. + * @return Name of the model. + */ + const std::string getModelCategory() const override; + virtual bool updateLayerProperty(std::shared_ptr&) override; + +private: + void setDefaultConfig(); +}; +} // namespace Models +#endif // OPENVINO_WRAPPER_LIB__MODELS__OBJECT_SEGMENTATION_INSTANCE_MODEL_HPP_ diff --git a/openvino_wrapper_lib/include/openvino_wrapper_lib/models/object_segmentation_maskrcnn_model.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/object_segmentation_maskrcnn_model.hpp new file mode 100644 index 00000000..c16497a1 --- /dev/null +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/object_segmentation_maskrcnn_model.hpp @@ -0,0 +1,58 @@ +// Copyright (c) 2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +/** + * @brief A header file with declaration for ObjectSegmentationMaskrcnnModel Class + * @file face_detection_model.h + */ +#ifndef OPENVINO_WRAPPER_LIB__MODELS__OBJECT_SEGMENTATION_MASKRCNN_MODEL_HPP_ +#define OPENVINO_WRAPPER_LIB__MODELS__OBJECT_SEGMENTATION_MASKRCNN_MODEL_HPP_ +#include +#include +#include "openvino_wrapper_lib/models/base_model.hpp" +namespace Models +{ +/** + * @class ObjectSegmentationMaskrcnnModel + * @brief This class generates the object segmentation model. + */ +class ObjectSegmentationMaskrcnnModel : public BaseModel +{ +public: + ObjectSegmentationMaskrcnnModel(const std::string& label_loc, const std::string& model_loc, int batch_size = 1); + inline int getMaxProposalCount() const + { + return max_proposal_count_; + } + inline int getObjectSize() const + { + return object_size_; + } + + bool enqueue(const std::shared_ptr&, const cv::Mat&, const cv::Rect&) override; + + bool matToBlob(const cv::Mat&, const cv::Rect&, float, int, const std::shared_ptr&); + + /** + * @brief Get the name of this segmentation model. + * @return Name of the model. + */ + const std::string getModelCategory() const override; + bool updateLayerProperty(std::shared_ptr&) override; + +private: + int max_proposal_count_; + int object_size_; +}; +} // namespace Models +#endif // OPENVINO_WRAPPER_LIB__MODELS__OBJECT_SEGMENTATION_MASKRCNN_MODEL_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/object_segmentation_model.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/object_segmentation_model.hpp similarity index 66% rename from dynamic_vino_lib/include/dynamic_vino_lib/models/object_segmentation_model.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/models/object_segmentation_model.hpp index af047bcc..92af6678 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/object_segmentation_model.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/object_segmentation_model.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,10 +15,11 @@ * @brief A header file with declaration for ObjectSegmentationModel Class * @file face_detection_model.h */ -#ifndef DYNAMIC_VINO_LIB__MODELS__OBJECT_SEGMENTATION_MODEL_HPP_ -#define DYNAMIC_VINO_LIB__MODELS__OBJECT_SEGMENTATION_MODEL_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__MODELS__OBJECT_SEGMENTATION_MODEL_HPP_ +#define OPENVINO_WRAPPER_LIB__MODELS__OBJECT_SEGMENTATION_MODEL_HPP_ #include -#include "dynamic_vino_lib/models/base_model.hpp" +#include +#include "openvino_wrapper_lib/models/base_model.hpp" namespace Models { /** @@ -28,7 +29,7 @@ namespace Models class ObjectSegmentationModel : public BaseModel { public: - ObjectSegmentationModel(const std::string& label_loc, const std::string & model_loc, int batch_size = 1); + ObjectSegmentationModel(const std::string& label_loc, const std::string& model_loc, int batch_size = 1); inline int getMaxProposalCount() const { return max_proposal_count_; @@ -38,25 +39,20 @@ class ObjectSegmentationModel : public BaseModel return object_size_; } - bool enqueue(const std::shared_ptr & ,const cv::Mat &, - const cv::Rect & ) override; + bool enqueue(const std::shared_ptr&, const cv::Mat&, const cv::Rect&) override; - bool matToBlob( - const cv::Mat & , const cv::Rect &, float , - int , const std::shared_ptr & ); + bool matToBlob(const cv::Mat&, const cv::Rect&, float, int, const std::shared_ptr&); /** * @brief Get the name of this segmentation model. * @return Name of the model. */ const std::string getModelCategory() const override; - bool updateLayerProperty(InferenceEngine::CNNNetwork&) override; + bool updateLayerProperty(std::shared_ptr&) override; private: int max_proposal_count_; int object_size_; - - InferenceEngine::InputsDataMap input_info_; }; } // namespace Models -#endif // DYNAMIC_VINO_LIB__MODELS__OBJECT_SEGMENTATION_MODEL_HPP_ +#endif // OPENVINO_WRAPPER_LIB__MODELS__OBJECT_SEGMENTATION_MODEL_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/person_attribs_detection_model.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/person_attribs_detection_model.hpp similarity index 63% rename from dynamic_vino_lib/include/dynamic_vino_lib/models/person_attribs_detection_model.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/models/person_attribs_detection_model.hpp index d05e67a6..236798f5 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/person_attribs_detection_model.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/person_attribs_detection_model.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,10 +16,10 @@ * @brief A header file with declaration for PersonAttribsDetectionModel Class * @file person_attribs_detection_model.h */ -#ifndef DYNAMIC_VINO_LIB__MODELS__PERSON_ATTRIBS_DETECTION_MODEL_HPP_ -#define DYNAMIC_VINO_LIB__MODELS__PERSON_ATTRIBS_DETECTION_MODEL_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__MODELS__PERSON_ATTRIBS_DETECTION_MODEL_HPP_ +#define OPENVINO_WRAPPER_LIB__MODELS__PERSON_ATTRIBS_DETECTION_MODEL_HPP_ #include -#include "dynamic_vino_lib/models/base_model.hpp" +#include "openvino_wrapper_lib/models/base_model.hpp" namespace Models { /** @@ -29,9 +29,7 @@ namespace Models class PersonAttribsDetectionModel : public BaseModel { public: - PersonAttribsDetectionModel(const std::string& label_loc, const std::string & model_loc, int batch_size = 1); - //inline const std::string getInputName() {return input_;} - //inline const std::string getOutputName() {return output_;} + PersonAttribsDetectionModel(const std::string& label_loc, const std::string& model_loc, int batch_size = 1); /** * @brief Get the name of this detection model. * @return Name of the model. @@ -39,11 +37,9 @@ class PersonAttribsDetectionModel : public BaseModel const std::string getModelCategory() const override; protected: - //void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; - //void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; - bool updateLayerProperty(InferenceEngine::CNNNetwork&) override; + bool updateLayerProperty(std::shared_ptr&) override; std::string input_; std::string output_; }; } // namespace Models -#endif // DYNAMIC_VINO_LIB__MODELS__PERSON_ATTRIBS_DETECTION_MODEL_HPP_ +#endif // OPENVINO_WRAPPER_LIB__MODELS__PERSON_ATTRIBS_DETECTION_MODEL_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/person_reidentification_model.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/person_reidentification_model.hpp similarity index 63% rename from dynamic_vino_lib/include/dynamic_vino_lib/models/person_reidentification_model.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/models/person_reidentification_model.hpp index 41ff85c7..83683339 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/person_reidentification_model.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/person_reidentification_model.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,10 +16,10 @@ * @brief A header file with declaration for PersonReidentificationModel Class * @file person_reidentification_model.h */ -#ifndef DYNAMIC_VINO_LIB__MODELS__PERSON_REIDENTIFICATION_MODEL_HPP_ -#define DYNAMIC_VINO_LIB__MODELS__PERSON_REIDENTIFICATION_MODEL_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__MODELS__PERSON_REIDENTIFICATION_MODEL_HPP_ +#define OPENVINO_WRAPPER_LIB__MODELS__PERSON_REIDENTIFICATION_MODEL_HPP_ #include -#include "dynamic_vino_lib/models/base_model.hpp" +#include "openvino_wrapper_lib/models/base_model.hpp" namespace Models { /** @@ -29,9 +29,15 @@ namespace Models class PersonReidentificationModel : public BaseModel { public: - PersonReidentificationModel(const std::string& label_loc, const std::string & model_loc, int batch_size = 1); - inline const std::string getInputName() {return input_;} - inline const std::string getOutputName() {return output_;} + PersonReidentificationModel(const std::string& label_loc, const std::string& model_loc, int batch_size = 1); + inline const std::string getInputName() + { + return input_; + } + inline const std::string getOutputName() + { + return output_; + } /** * @brief Get the name of this detection model. * @return Name of the model. @@ -39,11 +45,9 @@ class PersonReidentificationModel : public BaseModel const std::string getModelCategory() const override; protected: - bool updateLayerProperty(InferenceEngine::CNNNetwork&) override; - //void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; - //void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; + bool updateLayerProperty(std::shared_ptr&) override; std::string input_; std::string output_; }; } // namespace Models -#endif // DYNAMIC_VINO_LIB__MODELS__PERSON_REIDENTIFICATION_MODEL_HPP_ +#endif // OPENVINO_WRAPPER_LIB__MODELS__PERSON_REIDENTIFICATION_MODEL_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/vehicle_attribs_detection_model.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/vehicle_attribs_detection_model.hpp similarity index 60% rename from dynamic_vino_lib/include/dynamic_vino_lib/models/vehicle_attribs_detection_model.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/models/vehicle_attribs_detection_model.hpp index 9ed5acdc..486a21b1 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/vehicle_attribs_detection_model.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/vehicle_attribs_detection_model.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,10 +16,10 @@ * @brief A header file with declaration for VehicleAttribsDetectionModel Class * @file vehicle_attribs_detection_model.h */ -#ifndef DYNAMIC_VINO_LIB__MODELS__VEHICLE_ATTRIBS_DETECTION_MODEL_HPP_ -#define DYNAMIC_VINO_LIB__MODELS__VEHICLE_ATTRIBS_DETECTION_MODEL_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__MODELS__VEHICLE_ATTRIBS_DETECTION_MODEL_HPP_ +#define OPENVINO_WRAPPER_LIB__MODELS__VEHICLE_ATTRIBS_DETECTION_MODEL_HPP_ #include -#include "dynamic_vino_lib/models/base_model.hpp" +#include "openvino_wrapper_lib/models/base_model.hpp" namespace Models { /** @@ -29,10 +29,19 @@ namespace Models class VehicleAttribsDetectionModel : public BaseModel { public: - VehicleAttribsDetectionModel(const std::string& label_loc, const std::string & model_loc, int batch_size = 1); - inline const std::string getInputName() {return input_;} - inline const std::string getColorOutputName() {return color_output_;} - inline const std::string getTypeOutputName() {return type_output_;} + VehicleAttribsDetectionModel(const std::string& label_loc, const std::string& model_loc, int batch_size = 1); + inline const std::string getInputName() + { + return input_tensor_name_; + } + inline const std::string getColorOutputName() + { + return color_output_; + } + inline const std::string getTypeOutputName() + { + return type_output_; + } /** * @brief Get the name of this detection model. * @return Name of the model. @@ -40,12 +49,9 @@ class VehicleAttribsDetectionModel : public BaseModel const std::string getModelCategory() const override; protected: - //void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; - //void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; - bool updateLayerProperty(InferenceEngine::CNNNetwork&) override; - std::string input_; + bool updateLayerProperty(std::shared_ptr&) override; std::string color_output_; std::string type_output_; }; } // namespace Models -#endif // DYNAMIC_VINO_LIB__MODELS__VEHICLE_ATTRIBS_DETECTION_MODEL_HPP_ +#endif // OPENVINO_WRAPPER_LIB__MODELS__VEHICLE_ATTRIBS_DETECTION_MODEL_HPP_ diff --git a/openvino_wrapper_lib/include/openvino_wrapper_lib/outputs/base_output.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/outputs/base_output.hpp new file mode 100644 index 00000000..5672f1ce --- /dev/null +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/outputs/base_output.hpp @@ -0,0 +1,200 @@ +// Copyright (c) 2018-2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief A header file with declaration for HeadPoseDetectionModel Class + * @file head_pose_detection_model.h + */ + +#ifndef OPENVINO_WRAPPER_LIB__OUTPUTS__BASE_OUTPUT_HPP_ +#define OPENVINO_WRAPPER_LIB__OUTPUTS__BASE_OUTPUT_HPP_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "openvino_wrapper_lib/inferences/age_gender_detection.hpp" +#include "openvino_wrapper_lib/inferences/base_inference.hpp" +#include "openvino_wrapper_lib/inferences/emotions_detection.hpp" +#include "openvino_wrapper_lib/inferences/face_detection.hpp" +#include "openvino_wrapper_lib/inferences/head_pose_detection.hpp" +#include "openvino_wrapper_lib/inferences/object_detection.hpp" +#include "openvino_wrapper_lib/inferences/object_segmentation.hpp" +#include "openvino_wrapper_lib/inferences/person_reidentification.hpp" +#include "openvino_wrapper_lib/inferences/person_attribs_detection.hpp" +#include "openvino_wrapper_lib/inferences/landmarks_detection.hpp" +#include "openvino_wrapper_lib/inferences/face_reidentification.hpp" +#include "openvino_wrapper_lib/inferences/vehicle_attribs_detection.hpp" +#include "openvino_wrapper_lib/inferences/license_plate_detection.hpp" +#include "openvino_wrapper_lib/inferences/object_segmentation_maskrcnn.hpp" +#include "openvino_wrapper_lib/inferences/object_segmentation_instance.hpp" +#include "opencv2/opencv.hpp" + +class Pipeline; +namespace Outputs +{ +/** + * @class BaseOutput + * @brief This class is a base class for various output devices. It employs + * visitor pattern to perform different operations to different inference + * result with different output device + */ +class BaseOutput +{ +public: + explicit BaseOutput(std::string output_name) : output_name_(output_name) + { + } + /** + * @brief Generate output content according to the license plate detection result. + */ + virtual void accept(const std::vector&) + { + } + /** + * @brief Generate output content according to the vehicle attributes detection result. + */ + virtual void accept(const std::vector&) + { + } + /** + * @brief Generate output content according to the face reidentification result. + */ + virtual void accept(const std::vector&) + { + } + /** + * @brief Generate output content according to the landmarks detection result. + */ + virtual void accept(const std::vector&) + { + } + /** + * @brief Generate output content according to the person reidentification result. + */ + virtual void accept(const std::vector&) + { + } + /** + * @brief Generate output content according to the person reidentification result. + */ + virtual void accept(const std::vector&) + { + } + /** + * @brief Generate output content according to the object segmentation result. + */ + virtual void accept(const std::vector&) + { + } + /** + * @brief Generate output content according to the object segmentation maskrcnn result. + */ + virtual void accept(const std::vector&) + { + } + /** + * @brief Generate output content according to the object segmentation result for instance models. + */ + virtual void accept(const std::vector&) + { + } + /** + * @brief Generate output content according to the object detection result. + */ + virtual void accept(const std::vector&) + { + } + /** + * @brief Generate output content according to the face detection result. + */ + virtual void accept(const std::vector&) + { + } + /** + * @brief Generate output content according to the emotion detection result. + */ + virtual void accept(const std::vector&) + { + } + /** + * @brief Generate output content according to the age and gender detection + * result. + */ + virtual void accept(const std::vector&) + { + } + /** + * @brief Generate output content according to the headpose detection result. + */ + virtual void accept(const std::vector&) + { + } + /** + * @brief Calculate the camera matrix of a frame for image window output, no + implementation for ros topic output. + */ + virtual void feedFrame(const cv::Mat&) + { + } + /** + * @brief Show all the contents generated by the accept functions. + */ + virtual void handleOutput() = 0; + + void setPipeline(Pipeline* const pipeline); + virtual void setServiceResponse(std::shared_ptr response) + { + } + virtual void setServiceResponseForFace(std::shared_ptr response) + { + } + virtual void setServiceResponse(std::shared_ptr response) + { + } + virtual void setServiceResponse(std::shared_ptr response) + { + } + virtual void setServiceResponse(std::shared_ptr response) + { + } + virtual void setServiceResponse(std::shared_ptr response) + { + } + Pipeline* getPipeline() const; + cv::Mat getFrame() const; + virtual void clearData() + { + } + +protected: + cv::Mat frame_; + Pipeline* pipeline_; + std::string output_name_; +}; +} // namespace Outputs +#endif // OPENVINO_WRAPPER_LIB__OUTPUTS__BASE_OUTPUT_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/image_window_output.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/outputs/image_window_output.hpp similarity index 51% rename from dynamic_vino_lib/include/dynamic_vino_lib/outputs/image_window_output.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/outputs/image_window_output.hpp index e34950af..0cf4b191 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/image_window_output.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/outputs/image_window_output.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -13,16 +13,16 @@ // limitations under the License. /** -* @brief A header file with declaration for ImageWindowOutput Class -* @file image_window_output.h -*/ + * @brief A header file with declaration for ImageWindowOutput Class + * @file image_window_output.h + */ -#ifndef DYNAMIC_VINO_LIB__OUTPUTS__IMAGE_WINDOW_OUTPUT_HPP_ -#define DYNAMIC_VINO_LIB__OUTPUTS__IMAGE_WINDOW_OUTPUT_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__OUTPUTS__IMAGE_WINDOW_OUTPUT_HPP_ +#define OPENVINO_WRAPPER_LIB__OUTPUTS__IMAGE_WINDOW_OUTPUT_HPP_ #include #include -#include "dynamic_vino_lib/outputs/base_output.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" namespace Outputs { @@ -33,14 +33,14 @@ namespace Outputs class ImageWindowOutput : public BaseOutput { public: - explicit ImageWindowOutput(const std::string & output_name, int focal_length = 950); + explicit ImageWindowOutput(const std::string& output_name, int focal_length = 950); /** * @brief Calculate the camera matrix of a frame for image * window output. * @param[in] A frame. */ - void feedFrame(const cv::Mat &) override; + void feedFrame(const cv::Mat&) override; /** * @brief Decorate frame according to detection result @@ -55,82 +55,88 @@ class ImageWindowOutput : public BaseOutput * the license plate detection result. * @param[in] A license plate detection result objetc. */ - void accept( - const std::vector &) override; + void accept(const std::vector&) override; /** * @brief Generate image window output content according to * the vehicle attributes detection result. * @param[in] A vehicle attributes detection result objetc. */ - void accept( - const std::vector &) override; + void accept(const std::vector&) override; /** * @brief Generate image window output content according to * the face reidentification result. * @param[in] A face reidentification result objetc. */ - void accept( - const std::vector &) override; + void accept(const std::vector&) override; /** * @brief Generate image window output content according to * the landmarks detection result. * @param[in] A landmarks detection result objetc. */ - void accept( - const std::vector &) override; + void accept(const std::vector&) override; /** * @brief Generate image window output content according to * the person attributes detection result. * @param[in] A person attributes detection result objetc. */ - void accept( - const std::vector &) override; + void accept(const std::vector&) override; /** * @brief Generate image window output content according to * the person reidentification result. * @param[in] A person reidentification result objetc. */ - void accept( - const std::vector &) override; + void accept(const std::vector&) override; /** * @brief Generate image window output content according to * the object segmentation result. * @param[in] An obejct segmentation result objetc. */ - void accept(const std::vector &) override; + void accept(const std::vector&) override; + /** + * @brief Generate image window output content according to + * the object segmentation maskrcnn result. + * @param[in] An obejct segmentation result objetc. + */ + void accept(const std::vector&) override; + /** + * @brief Generate image window output content according to + * the object segmentation instance result. + * @param[in] An obejct segmentation result objetc. + */ + void accept(const std::vector&) override; /** * @brief Generate image window output content according to * the face detection result. * @param[in] A face detection result objetc. */ - void accept(const std::vector &) override; + void accept(const std::vector&) override; /** * @brief Generate image window output content according to * the object detection result. * @param[in] results A bundle of object detection results. */ - void accept(const std::vector &) override; + void accept(const std::vector&) override; /** * @brief Generate image window output content according to * the emotion detection result. * @param[in] A emotion detection result objetc. */ - void accept(const std::vector &) override; + void accept(const std::vector&) override; /** * @brief Generate image window output content according to * the age and gender detection result. * @param[in] A head pose detection result objetc. */ - void accept(const std::vector &) override; + void accept(const std::vector&) override; /** * @brief Generate image window output content according to * the headpose detection result. * @param[in] An age gender detection result objetc. */ - void accept(const std::vector &) override; + void accept(const std::vector&) override; private: - unsigned findOutput(const cv::Rect &); + unsigned findOutput(const cv::Rect&); /** * @brief Calculate the axises of the coordinates for showing * the image window. @@ -144,33 +150,34 @@ class ImageWindowOutput : public BaseOutput */ cv::Mat getRotationTransform(double yaw, double pitch, double roll); - void mergeMask(const std::vector &); + void mergeMask(const std::vector&); + void mergeMask(const std::vector&); + void mergeMask(const std::vector&); struct OutputData { std::string desc; cv::Rect rect; cv::Scalar scalar; - cv::Point hp_cp; // for headpose, center point - cv::Point hp_x; // for headpose, end point of xAxis - cv::Point hp_y; // for headpose, end point of yAxis - cv::Point hp_zs; // for headpose, start point of zAxis - cv::Point hp_ze; // for headpose, end point of zAxis - cv::Point pa_top; // for person attributes, top position - cv::Point pa_bottom; //for person attributes, bottom position + cv::Point hp_cp; // for headpose, center point + cv::Point hp_x; // for headpose, end point of xAxis + cv::Point hp_y; // for headpose, end point of yAxis + cv::Point hp_zs; // for headpose, start point of zAxis + cv::Point hp_ze; // for headpose, end point of zAxis + cv::Point pa_top; // for person attributes, top position + cv::Point pa_bottom; // for person attributes, bottom position std::vector landmarks; }; std::vector outputs_; float focal_length_; cv::Mat camera_matrix_; - std::vector> colors_ = { - {128, 64, 128}, {232, 35, 244}, {70, 70, 70}, {156, 102, 102}, {153, 153, 190}, - {153, 153, 153}, {30, 170, 250}, {0, 220, 220}, {35, 142, 107}, {152, 251, 152}, - {180, 130, 70}, {60, 20, 220}, {0, 0, 255}, {142, 0, 0}, {70, 0, 0}, - {100, 60, 0}, {90, 0, 0}, {230, 0, 0}, {32, 11, 119}, {0, 74, 111}, - {81, 0, 81} - }; + std::vector> colors_ = { { 128, 64, 128 }, { 232, 35, 244 }, { 70, 70, 70 }, { 156, 102, 102 }, + { 153, 153, 190 }, { 153, 153, 153 }, { 30, 170, 250 }, { 0, 220, 220 }, + { 35, 142, 107 }, { 152, 251, 152 }, { 180, 130, 70 }, { 60, 20, 220 }, + { 0, 0, 255 }, { 142, 0, 0 }, { 70, 0, 0 }, { 100, 60, 0 }, + { 90, 0, 0 }, { 230, 0, 0 }, { 32, 11, 119 }, { 0, 74, 111 }, + { 81, 0, 81 } }; }; } // namespace Outputs -#endif // DYNAMIC_VINO_LIB__OUTPUTS__IMAGE_WINDOW_OUTPUT_HPP_ +#endif // OPENVINO_WRAPPER_LIB__OUTPUTS__IMAGE_WINDOW_OUTPUT_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_service_output.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/outputs/ros_service_output.hpp similarity index 61% rename from dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_service_output.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/outputs/ros_service_output.hpp index 5392792d..33a65582 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_service_output.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/outputs/ros_service_output.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,18 +17,18 @@ * @file ros_topic_output.hpp */ -#ifndef DYNAMIC_VINO_LIB__OUTPUTS__ROS_SERVICE_OUTPUT_HPP_ -#define DYNAMIC_VINO_LIB__OUTPUTS__ROS_SERVICE_OUTPUT_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__OUTPUTS__ROS_SERVICE_OUTPUT_HPP_ +#define OPENVINO_WRAPPER_LIB__OUTPUTS__ROS_SERVICE_OUTPUT_HPP_ #include #include #include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include #include #include @@ -36,8 +36,8 @@ #include #include -#include "dynamic_vino_lib/inferences/face_detection.hpp" -#include "dynamic_vino_lib/outputs/ros_topic_output.hpp" +#include "openvino_wrapper_lib/inferences/face_detection.hpp" +#include "openvino_wrapper_lib/outputs/ros_topic_output.hpp" namespace Outputs { @@ -48,25 +48,28 @@ namespace Outputs class RosServiceOutput : public RosTopicOutput { public: - explicit RosServiceOutput(std::string output_name) - : RosTopicOutput(output_name) {} + explicit RosServiceOutput(std::string output_name) : RosTopicOutput(output_name) + { + } /** * @brief Publish all the detected infomations generated by the accept * functions with ros topic. */ - void handleOutput() override {} + void handleOutput() override + { + } void clearData() override; void setServiceResponse(std::shared_ptr response); void setResponseForFace(std::shared_ptr response); - void setServiceResponse(std::shared_ptr response); - void setServiceResponse(std::shared_ptr response); - void setServiceResponse(std::shared_ptr response); - void setServiceResponse(std::shared_ptr response); + void setServiceResponse(std::shared_ptr response); + void setServiceResponse(std::shared_ptr response); + void setServiceResponse(std::shared_ptr response); + void setServiceResponse(std::shared_ptr response); private: const std::string service_name_; }; } // namespace Outputs -#endif // DYNAMIC_VINO_LIB__OUTPUTS__ROS_SERVICE_OUTPUT_HPP_ +#endif // OPENVINO_WRAPPER_LIB__OUTPUTS__ROS_SERVICE_OUTPUT_HPP_ diff --git a/openvino_wrapper_lib/include/openvino_wrapper_lib/outputs/ros_topic_output.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/outputs/ros_topic_output.hpp new file mode 100644 index 00000000..8a85249c --- /dev/null +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/outputs/ros_topic_output.hpp @@ -0,0 +1,188 @@ +// Copyright (c) 2018-2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief A header file with declaration for RosTopicOutput Class + * @file ros_topic_output.hpp + */ + +#ifndef OPENVINO_WRAPPER_LIB__OUTPUTS__ROS_TOPIC_OUTPUT_HPP_ +#define OPENVINO_WRAPPER_LIB__OUTPUTS__ROS_TOPIC_OUTPUT_HPP_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "openvino_wrapper_lib/inferences/face_detection.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" + +namespace Outputs +{ +/** + * @class RosTopicOutput + * @brief This class handles and publish the detection result with ros topic. + */ +class RosTopicOutput : public BaseOutput +{ +public: + explicit RosTopicOutput(std::string output_name_, const rclcpp::Node::SharedPtr node = nullptr); + /** + * @brief Calculate the camera matrix of a frame. + * @param[in] A frame. + */ + void feedFrame(const cv::Mat&) override; + /** + * @brief Publish all the detected infomations generated by the accept + * functions with ros topic. + */ + void handleOutput() override; + /** + * @brief Generate ros topic infomation according to + * the license plate detection result. + * @param[in] results a bundle of license plate detection results. + */ + void accept(const std::vector&) override; + /** + * @brief Generate ros topic infomation according to + * the vehicle attributes detection result. + * @param[in] results a bundle of vehicle attributes detection results. + */ + void accept(const std::vector&) override; + /** + * @brief Generate ros topic infomation according to + * the face reidentification result. + * @param[in] results a bundle of face reidentification results. + */ + void accept(const std::vector&) override; + /** + * @brief Generate ros topic infomation according to + * the landmarks detection result. + * @param[in] results a bundle of landmarks detection results. + */ + void accept(const std::vector&) override; + /** + * @brief Generate ros topic infomation according to + * the person attributes detection result. + * @param[in] results a bundle of person attributes detection results. + */ + void accept(const std::vector&) override; + /** + * @brief Generate ros topic infomation according to + * the person reidentification result. + * @param[in] results a bundle of person reidentification results. + */ + void accept(const std::vector&) override; + /** + * @brief Generate ros topic infomation according to + * the object segmentation result. + * @param[in] results a bundle of object segmentation results. + */ + void accept(const std::vector&) override; + /** + * @brief Generate ros topic infomation according to + * the object segmentation result. + * @param[in] results a bundle of object segmentation maskrcnn results. + */ + void accept(const std::vector&) override; + /** + * @brief Generate ros topic infomation according to + * the object segmentation result. + * @param[in] results a bundle of object segmentation maskrcnn results. + */ + void accept(const std::vector&) override; + /** + * @brief Generate ros topic infomation according to + * the object detection result. + * @param[in] results a bundle of object detection results. + */ + void accept(const std::vector&) override; + /** + * @brief Generate ros topic infomation according to + * the face detection result. + * @param[in] An face detection result objetc. + */ + void accept(const std::vector&) override; + /** + * @brief Generate ros topic infomation according to + * the emotion detection result. + * @param[in] An emotion detection result objetc. + */ + void accept(const std::vector&) override; + /** + * @brief Generate ros topic infomation according to + * the age gender detection result. + * @param[in] An age gender detection result objetc. + */ + void accept(const std::vector&) override; + /** + * @brief Generate ros topic infomation according to + * the headpose detection result. + * @param[in] An head pose detection result objetc. + */ + void accept(const std::vector&) override; + +protected: + const std::string topic_name_; + std::shared_ptr node_; + rclcpp::Publisher::SharedPtr pub_license_plate_; + std::shared_ptr license_plate_topic_; + rclcpp::Publisher::SharedPtr pub_vehicle_attribs_; + std::shared_ptr vehicle_attribs_topic_; + rclcpp::Publisher::SharedPtr pub_landmarks_; + std::shared_ptr landmarks_topic_; + rclcpp::Publisher::SharedPtr pub_face_reid_; + std::shared_ptr face_reid_topic_; + rclcpp::Publisher::SharedPtr pub_person_attribs_; + std::shared_ptr person_attribs_topic_; + rclcpp::Publisher::SharedPtr pub_person_reid_; + std::shared_ptr person_reid_topic_; + rclcpp::Publisher::SharedPtr pub_segmented_object_; + std::shared_ptr segmented_objects_topic_; + rclcpp::Publisher::SharedPtr pub_detected_object_; + std::shared_ptr detected_objects_topic_; + rclcpp::Publisher::SharedPtr pub_face_; + std::shared_ptr faces_topic_; + rclcpp::Publisher::SharedPtr pub_emotion_; + std::shared_ptr emotions_topic_; + rclcpp::Publisher::SharedPtr pub_age_gender_; + std::shared_ptr age_gender_topic_; + rclcpp::Publisher::SharedPtr pub_headpose_; + std::shared_ptr headpose_topic_; +}; +} // namespace Outputs +#endif // OPENVINO_WRAPPER_LIB__OUTPUTS__ROS_TOPIC_OUTPUT_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/rviz_output.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/outputs/rviz_output.hpp similarity index 57% rename from dynamic_vino_lib/include/dynamic_vino_lib/outputs/rviz_output.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/outputs/rviz_output.hpp index 359f8313..3f59fd4d 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/rviz_output.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/outputs/rviz_output.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -13,12 +13,12 @@ // limitations under the License. /** -* @brief A header file with declaration for RvizOutput Class -* @file rviz_output.h -*/ + * @brief A header file with declaration for RvizOutput Class + * @file rviz_output.h + */ -#ifndef DYNAMIC_VINO_LIB__OUTPUTS__RVIZ_OUTPUT_HPP_ -#define DYNAMIC_VINO_LIB__OUTPUTS__RVIZ_OUTPUT_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__OUTPUTS__RVIZ_OUTPUT_HPP_ +#define OPENVINO_WRAPPER_LIB__OUTPUTS__RVIZ_OUTPUT_HPP_ #include #include @@ -26,8 +26,8 @@ #include #include -#include "dynamic_vino_lib/outputs/base_output.hpp" -#include "dynamic_vino_lib/outputs/image_window_output.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" +#include "openvino_wrapper_lib/outputs/image_window_output.hpp" namespace Outputs { @@ -38,12 +38,12 @@ namespace Outputs class RvizOutput : public BaseOutput { public: - explicit RvizOutput(std::string output_name, const rclcpp::Node::SharedPtr node=nullptr); + explicit RvizOutput(std::string output_name, const rclcpp::Node::SharedPtr node = nullptr); /** * @brief Construct frame for rviz * @param[in] A frame. */ - void feedFrame(const cv::Mat &) override; + void feedFrame(const cv::Mat&) override; /** * @brief Show all the contents generated by the accept * functions with rviz. @@ -54,61 +54,73 @@ class RvizOutput : public BaseOutput * the face reidentification result. * @param[in] A face reidentification result objetc. */ - void accept(const std::vector &) override; + void accept(const std::vector&) override; /** * @brief Generate rviz output content according to * the landmarks detection result. * @param[in] A landmarks detection result objetc. */ - void accept(const std::vector &) override; + void accept(const std::vector&) override; /** * @brief Generate rviz output content according to * the person attributes detection result. * @param[in] A person attributes detection result objetc. */ - void accept(const std::vector &) override; + void accept(const std::vector&) override; /** * @brief Generate rviz output content according to * the person reidentification result. * @param[in] A person reidentification result objetc. */ - void accept(const std::vector &) override; + void accept(const std::vector&) override; /** * @brief Generate rviz output content according to * the face detection result. * @param[in] A face detection result objetc. */ - void accept(const std::vector &) override; + void accept(const std::vector&) override; /** * @brief Generate rviz output content according to * the object detection result. * @param[in] results A bundle of object detection results. */ - void accept(const std::vector &) override; + void accept(const std::vector&) override; /** - * @brief Generate rviz output content according to - * the object segmentation result. - * @param[in] results A bundle of object segmentation results. - */ - void accept(const std::vector &) override; + * @brief Generate rviz output content according to + * the object segmentation result. + * @param[in] results A bundle of object segmentation results. + */ + void accept(const std::vector&) override; + /** + * @brief Generate rviz output content according to + * the object segmentation result. + * @param[in] results A bundle of object segmentation maskrcnn results. + */ + void accept(const std::vector&) override; + /** + * @brief Generate image window output content according to + * the object segmentation instance result. + * @param[in] An obejct segmentation result objetc. + */ + void accept(const std::vector&) override; /** * @brief Generate rviz output content according to * the emotion detection result. * @param[in] A emotion detection result objetc. */ - void accept(const std::vector &) override; + void accept(const std::vector&) override; /** * @brief Generate rviz output content according to * the age and gender detection result. * @param[in] A head pose detection result objetc. */ - void accept(const std::vector &) override; + void accept(const std::vector&) override; /** * @brief Generate rviz output content according to * the headpose detection result. * @param[in] An age gender detection result objetc. */ - void accept(const std::vector &) override; + void accept(const std::vector&) override; private: std::shared_ptr node_; @@ -117,4 +129,4 @@ class RvizOutput : public BaseOutput std::shared_ptr image_window_output_; }; } // namespace Outputs -#endif // DYNAMIC_VINO_LIB__OUTPUTS__RVIZ_OUTPUT_HPP_ +#endif // OPENVINO_WRAPPER_LIB__OUTPUTS__RVIZ_OUTPUT_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/pipeline.hpp similarity index 74% rename from dynamic_vino_lib/include/dynamic_vino_lib/pipeline.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/pipeline.hpp index 210b4e96..ebd3e99e 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/pipeline.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,8 +16,8 @@ * @brief a header file with declaration of Pipeline class * @file pipeline.h */ -#ifndef DYNAMIC_VINO_LIB__PIPELINE_HPP_ -#define DYNAMIC_VINO_LIB__PIPELINE_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__PIPELINE_HPP_ +#define OPENVINO_WRAPPER_LIB__PIPELINE_HPP_ #include #include @@ -27,11 +27,10 @@ #include #include -#include "dynamic_vino_lib/inferences/base_inference.hpp" -#include "dynamic_vino_lib/inputs/standard_camera.hpp" -#include "dynamic_vino_lib/outputs/base_output.hpp" -#include "dynamic_vino_lib/pipeline_params.hpp" -// #include "dynamic_vino_lib/pipeline_filters.hpp" +#include "openvino_wrapper_lib/inferences/base_inference.hpp" +#include "openvino_wrapper_lib/inputs/standard_camera.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" +#include "openvino_wrapper_lib/pipeline_params.hpp" #include "opencv2/opencv.hpp" /** @@ -43,14 +42,14 @@ class Pipeline { public: - explicit Pipeline(const std::string & name = "pipeline"); + explicit Pipeline(const std::string& name = "pipeline"); /** * @brief Add input device to the pipeline. * @param[in] name name of the current input device. * @param[in] input_device the input device instance to be added. * @return whether the add operation is successful */ - bool add(const std::string & name, std::shared_ptr input_device); + bool add(const std::string& name, std::shared_ptr input_device); /** * @brief Add inference network to the pipeline. * @param[in] parent name of the parent device or inference. @@ -58,9 +57,8 @@ class Pipeline * @param[in] inference the inference instance to be added. * @return whether the add operation is successful */ - bool add( - const std::string & parent, const std::string & name, - std::shared_ptr inference); + bool add(const std::string& parent, const std::string& name, + std::shared_ptr inference); /** * @brief Add output device to the pipeline. * @param[in] parent name of the parent inference. @@ -68,31 +66,29 @@ class Pipeline * @param[in] output the output instance to be added. * @return whether the add operation is successful */ - bool add( - const std::string & parent, const std::string & name, - std::shared_ptr output); + bool add(const std::string& parent, const std::string& name, std::shared_ptr output); - bool add(const std::string & name, std::shared_ptr output); - void addConnect(const std::string & parent, const std::string & name); + bool add(const std::string& name, std::shared_ptr output); + void addConnect(const std::string& parent, const std::string& name); // inline void addFilters(const std::vector& filters) // { // filters_.add(filters); // } - bool add(const std::string & name, std::shared_ptr inference); + bool add(const std::string& name, std::shared_ptr inference); /** * @brief Add inference network-output device edge to the pipeline. * @param[in] parent name of the parent inference. * @param[in] name name of the current output device. * @return whether the add operation is successful */ - bool add(const std::string & parent, const std::string & name); + bool add(const std::string& parent, const std::string& name); /** * @brief Do the inference once. * Data flow from input device to inference network, then to output device. */ void runOnce(); - void callback(const std::string & detection_name); + void callback(const std::string& detection_name); /** * @brief Set the inference network to call the callback function as soon as * each inference is @@ -121,14 +117,14 @@ class Pipeline return next_; } /** - * @brief Get real time FPS (frames per second). - */ + * @brief Get real time FPS (frames per second). + */ int getFPS() const { return fps_; } - std::string findFilterConditions(const std::string & input, const std::string & output) + std::string findFilterConditions(const std::string& input, const std::string& output) { return params_->findFilterConditions(input, output); } @@ -151,12 +147,11 @@ class Pipeline const int kCatagoryOrder_Output = 3; std::shared_ptr params_; - // PipelineFilters filters_; std::shared_ptr input_device_; std::string input_device_name_; std::multimap next_; - std::map> name_to_detection_map_; + std::map> name_to_detection_map_; std::map> name_to_output_map_; int total_inference_ = 0; std::set output_names_; @@ -172,4 +167,4 @@ class Pipeline std::chrono::time_point t_start_; }; -#endif // DYNAMIC_VINO_LIB__PIPELINE_HPP_ +#endif // OPENVINO_WRAPPER_LIB__PIPELINE_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/pipeline_manager.hpp similarity index 52% rename from dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/pipeline_manager.hpp index 06c583c5..54de6ab5 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/pipeline_manager.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,10 +16,10 @@ * @brief a header file with declaration of Pipeline Manager class * @file pipeline_manager.hpp */ -#ifndef DYNAMIC_VINO_LIB__PIPELINE_MANAGER_HPP_ -#define DYNAMIC_VINO_LIB__PIPELINE_MANAGER_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__PIPELINE_MANAGER_HPP_ +#define OPENVINO_WRAPPER_LIB__PIPELINE_MANAGER_HPP_ -#include +#include #include #include #include @@ -28,8 +28,8 @@ #include #include #include -#include "dynamic_vino_lib/pipeline.hpp" -#include "dynamic_vino_lib/engines/engine_manager.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" +#include "openvino_wrapper_lib/engines/engine_manager.hpp" /** * @class PipelineManager @@ -39,24 +39,21 @@ class PipelineManager { public: /** - * @brief Get the singleton instance of PipelineManager class. - * The instance will be created when first call. - * @return The reference of PipelineManager instance. - */ - static PipelineManager & getInstance() + * @brief Get the singleton instance of PipelineManager class. + * The instance will be created when first call. + * @return The reference of PipelineManager instance. + */ + static PipelineManager& getInstance() { static PipelineManager manager_; return manager_; } - std::shared_ptr createPipeline( - const Params::ParamManager::PipelineRawData & params, - rclcpp::Node::SharedPtr node = nullptr); + std::shared_ptr createPipeline(const Params::ParamManager::PipelineRawData& params, + rclcpp::Node::SharedPtr node = nullptr); - void removePipeline(const std::string & name); - PipelineManager & updatePipeline( - const std::string & name, - const Params::ParamManager::PipelineRawData & params); + void removePipeline(const std::string& name); + PipelineManager& updatePipeline(const std::string& name, const Params::ParamManager::PipelineRawData& params); void runAll(); void stopAll(); @@ -85,7 +82,6 @@ class PipelineManager struct ServiceData { std::shared_ptr thread; - // std::shared_ptr node; PipelineState state; }; @@ -94,7 +90,7 @@ class PipelineManager return pipelines_; } - std::map * getPipelinesPtr() + std::map* getPipelinesPtr() { return &pipelines_; } @@ -103,43 +99,45 @@ class PipelineManager PipelineManager() { } - PipelineManager(PipelineManager const &); - void operator=(PipelineManager const &); - void threadPipeline(const char * name); - void threadSpinNodes(const char * name); - std::map> - parseInputDevice(const PipelineData & params); - std::map> - parseOutput(const PipelineData & pdata); - std::map> - parseInference(const Params::ParamManager::PipelineRawData & params); - std::shared_ptr - createFaceDetection(const Params::ParamManager::InferenceRawData & infer); - std::shared_ptr - createAgeGenderRecognition(const Params::ParamManager::InferenceRawData & infer); - std::shared_ptr - createEmotionRecognition(const Params::ParamManager::InferenceRawData & infer); - std::shared_ptr - createHeadPoseEstimation(const Params::ParamManager::InferenceRawData & infer); - std::shared_ptr - createObjectDetection(const Params::ParamManager::InferenceRawData & infer); - std::shared_ptr - createObjectSegmentation(const Params::ParamManager::InferenceRawData & infer); - std::shared_ptr - createPersonReidentification(const Params::ParamManager::InferenceRawData & infer); - std::shared_ptr - createPersonAttribsDetection(const Params::ParamManager::InferenceRawData & infer); - std::shared_ptr - createLandmarksDetection(const Params::ParamManager::InferenceRawData & infer); - std::shared_ptr - createFaceReidentification(const Params::ParamManager::InferenceRawData & infer); - std::shared_ptr - createVehicleAttribsDetection(const Params::ParamManager::InferenceRawData & infer); - std::shared_ptr - createLicensePlateDetection(const Params::ParamManager::InferenceRawData & infer); + PipelineManager(PipelineManager const&); + void operator=(PipelineManager const&); + void threadPipeline(const char* name); + void threadSpinNodes(const char* name); + std::map> parseInputDevice(const PipelineData& params); + std::map> parseOutput(const PipelineData& pdata); + std::map> + parseInference(const Params::ParamManager::PipelineRawData& params); + std::shared_ptr + createFaceDetection(const Params::ParamManager::InferenceRawData& infer); + std::shared_ptr + createAgeGenderRecognition(const Params::ParamManager::InferenceRawData& infer); + std::shared_ptr + createEmotionRecognition(const Params::ParamManager::InferenceRawData& infer); + std::shared_ptr + createHeadPoseEstimation(const Params::ParamManager::InferenceRawData& infer); + std::shared_ptr + createObjectDetection(const Params::ParamManager::InferenceRawData& infer); + std::shared_ptr + createObjectSegmentation(const Params::ParamManager::InferenceRawData& infer); + std::shared_ptr + createObjectSegmentationMaskrcnn(const Params::ParamManager::InferenceRawData& infer); + std::shared_ptr + createObjectSegmentationInstance(const Params::ParamManager::InferenceRawData& infer); + std::shared_ptr + createPersonReidentification(const Params::ParamManager::InferenceRawData& infer); + std::shared_ptr + createPersonAttribsDetection(const Params::ParamManager::InferenceRawData& infer); + std::shared_ptr + createLandmarksDetection(const Params::ParamManager::InferenceRawData& infer); + std::shared_ptr + createFaceReidentification(const Params::ParamManager::InferenceRawData& infer); + std::shared_ptr + createVehicleAttribsDetection(const Params::ParamManager::InferenceRawData& infer); + std::shared_ptr + createLicensePlateDetection(const Params::ParamManager::InferenceRawData& infer); std::map pipelines_; ServiceData service_; Engines::EngineManager engine_manager_; }; -#endif // DYNAMIC_VINO_LIB__PIPELINE_MANAGER_HPP_ +#endif // OPENVINO_WRAPPER_LIB__PIPELINE_MANAGER_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/pipeline_params.hpp similarity index 69% rename from dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/pipeline_params.hpp index 9de08354..f7171e4f 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/pipeline_params.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,10 +16,10 @@ * @brief a header file with declaration of Pipeline class * @file pipeline_params.hpp */ -#ifndef DYNAMIC_VINO_LIB__PIPELINE_PARAMS_HPP_ -#define DYNAMIC_VINO_LIB__PIPELINE_PARAMS_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__PIPELINE_PARAMS_HPP_ +#define OPENVINO_WRAPPER_LIB__PIPELINE_PARAMS_HPP_ -#include +#include #include #include #include @@ -28,9 +28,9 @@ #include #include -#include "dynamic_vino_lib/inferences/base_inference.hpp" -#include "dynamic_vino_lib/inputs/standard_camera.hpp" -#include "dynamic_vino_lib/outputs/base_output.hpp" +#include "openvino_wrapper_lib/inferences/base_inference.hpp" +#include "openvino_wrapper_lib/inputs/standard_camera.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" #include "opencv2/opencv.hpp" const char kInputType_Image[] = "Image"; @@ -53,8 +53,13 @@ const char kInferTpye_EmotionRecognition[] = "EmotionRecognition"; const char kInferTpye_HeadPoseEstimation[] = "HeadPoseEstimation"; const char kInferTpye_ObjectDetection[] = "ObjectDetection"; const char kInferTpye_ObjectSegmentation[] = "ObjectSegmentation"; +const char kInferTpye_ObjectSegmentationMaskrcnn[] = "ObjectSegmentationMaskrcnn"; +const char kInferTpye_ObjectSegmentationInstance[] = "ObjectSegmentationInstance"; +const char kInferTpye_ObjectSegmentationTypeYolo[] = "yolo"; +const char kInferTpye_ObjectSegmentationTypeMaskrcnn[] = "maskrcnn"; const char kInferTpye_ObjectDetectionTypeSSD[] = "SSD"; -const char kInferTpye_ObjectDetectionTypeYolov2[] = "yolov2"; +const char kInferTpye_ObjectDetectionTypeYolov5[] = "yolov5"; +const char kInferTpye_ObjectDetectionTypeYolov8[] = "yolov8"; const char kInferTpye_PersonReidentification[] = "PersonReidentification"; const char kInferTpye_PersonAttribsDetection[] = "PersonAttribsDetection"; const char kInferTpye_LandmarksDetection[] = "LandmarksDetection"; @@ -70,18 +75,18 @@ const char kInferTpye_LicensePlateDetection[] = "LicensePlateDetection"; class PipelineParams { public: - explicit PipelineParams(const std::string & name); - explicit PipelineParams(const Params::ParamManager::PipelineRawData & params); - Params::ParamManager::PipelineRawData getPipeline(const std::string & name); - PipelineParams & operator=(const Params::ParamManager::PipelineRawData & params); + explicit PipelineParams(const std::string& name); + explicit PipelineParams(const Params::ParamManager::PipelineRawData& params); + Params::ParamManager::PipelineRawData getPipeline(const std::string& name); + PipelineParams& operator=(const Params::ParamManager::PipelineRawData& params); void update(); - void update(const Params::ParamManager::PipelineRawData & params); - bool isOutputTo(std::string & name); + void update(const Params::ParamManager::PipelineRawData& params); + bool isOutputTo(std::string& name); bool isGetFps(); - std::string findFilterConditions(const std::string & input, const std::string & output); + std::string findFilterConditions(const std::string& input, const std::string& output); private: Params::ParamManager::PipelineRawData params_; }; -#endif // DYNAMIC_VINO_LIB__PIPELINE_PARAMS_HPP_ +#endif // OPENVINO_WRAPPER_LIB__PIPELINE_PARAMS_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/services/frame_processing_server.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/services/frame_processing_server.hpp similarity index 53% rename from dynamic_vino_lib/include/dynamic_vino_lib/services/frame_processing_server.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/services/frame_processing_server.hpp index 056cb179..9ce75c1f 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/services/frame_processing_server.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/services/frame_processing_server.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,22 +11,22 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -#ifndef DYNAMIC_VINO_LIB__SERVICES__FRAME_PROCESSING_SERVER_HPP_ -#define DYNAMIC_VINO_LIB__SERVICES__FRAME_PROCESSING_SERVER_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__SERVICES__FRAME_PROCESSING_SERVER_HPP_ +#define OPENVINO_WRAPPER_LIB__SERVICES__FRAME_PROCESSING_SERVER_HPP_ #include #include #include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include -#include -#include -#include +#include +#include +#include #include #include @@ -36,19 +36,15 @@ namespace vino_service { -template +template class FrameProcessingServer : public rclcpp::Node { public: - explicit FrameProcessingServer( - const std::string & service_name, - const std::string & config_path); - void initService(const std::string & config_path); + explicit FrameProcessingServer(const std::string& service_name, const std::string& config_path); + void initService(const std::string& config_path); private: - void cbService( - const std::shared_ptr request, - std::shared_ptr response); + void cbService(const std::shared_ptr request, std::shared_ptr response); // rclcpp::Service::SharedPtr service_; std::shared_ptr> service_; @@ -56,4 +52,4 @@ class FrameProcessingServer : public rclcpp::Node std::string config_path_; }; } // namespace vino_service -#endif // DYNAMIC_VINO_LIB__SERVICES__FRAME_PROCESSING_SERVER_HPP_ +#endif // OPENVINO_WRAPPER_LIB__SERVICES__FRAME_PROCESSING_SERVER_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/services/pipeline_processing_server.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/services/pipeline_processing_server.hpp similarity index 51% rename from dynamic_vino_lib/include/dynamic_vino_lib/services/pipeline_processing_server.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/services/pipeline_processing_server.hpp index f7f41141..3276bd41 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/services/pipeline_processing_server.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/services/pipeline_processing_server.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,14 +11,14 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -#ifndef DYNAMIC_VINO_LIB__SERVICES__PIPELINE_PROCESSING_SERVER_HPP_ -#define DYNAMIC_VINO_LIB__SERVICES__PIPELINE_PROCESSING_SERVER_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__SERVICES__PIPELINE_PROCESSING_SERVER_HPP_ +#define OPENVINO_WRAPPER_LIB__SERVICES__PIPELINE_PROCESSING_SERVER_HPP_ -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include #include #include #include @@ -27,29 +27,24 @@ namespace vino_service { -template +template class PipelineProcessingServer : public rclcpp::Node { public: - explicit PipelineProcessingServer( - const std::string & service_name); + explicit PipelineProcessingServer(const std::string& service_name); private: void initPipelineService(); - // bool cbService(ros::ServiceEvent& event); - void cbService( - const std::shared_ptr request, - std::shared_ptr response); + void cbService(const std::shared_ptr request, std::shared_ptr response); - void setResponse( - std::shared_ptr response); + void setResponse(std::shared_ptr response); void setPipelineByRequest(std::string pipeline_name, PipelineManager::PipelineState state); std::shared_ptr> service_; - std::map * pipelines_; + std::map* pipelines_; std::string service_name_; }; } // namespace vino_service -#endif // DYNAMIC_VINO_LIB__SERVICES__PIPELINE_PROCESSING_SERVER_HPP_ +#endif // OPENVINO_WRAPPER_LIB__SERVICES__PIPELINE_PROCESSING_SERVER_HPP_ diff --git a/openvino_wrapper_lib/include/openvino_wrapper_lib/slog.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/slog.hpp new file mode 100644 index 00000000..d1fb8db4 --- /dev/null +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/slog.hpp @@ -0,0 +1,177 @@ +// Copyright (c) 2018-2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a header file with logging facility for common samples + * @file slog.hpp + */ +#ifndef OPENVINO_WRAPPER_LIB__SLOG_HPP_ +#define OPENVINO_WRAPPER_LIB__SLOG_HPP_ + +#pragma once + +#include +#include + +namespace slog +{ +#if 1 +enum COLOR +{ + RESET = 0, + BLUE = 1, + GREEN = 2, + YELLOW = 3, + RED = 4, +}; + +#else +// the following are UBUNTU/LINUX ONLY terminal color codes. +#define RESET "\033[0m" +#define BLACK "\033[30m" /* Black */ +#define RED "\033[31m" /* Red */ +#define GREEN "\033[32m" /* Green */ +#define YELLOW "\033[33m" /* Yellow */ +#define BLUE "\033[34m" /* Blue */ +#define MAGENTA "\033[35m" /* Magenta */ +#define CYAN "\033[36m" /* Cyan */ +#define WHITE "\033[37m" /* White */ +#define BOLDBLACK "\033[1m\033[30m" /* Bold Black */ +#define BOLDRED "\033[1m\033[31m" /* Bold Red */ +#define BOLDGREEN "\033[1m\033[32m" /* Bold Green */ +#define BOLDYELLOW "\033[1m\033[33m" /* Bold Yellow */ +#define BOLDBLUE "\033[1m\033[34m" /* Bold Blue */ +#define BOLDMAGENTA "\033[1m\033[35m" /* Bold Magenta */ +#define BOLDCYAN "\033[1m\033[36m" /* Bold Cyan */ +#define BOLDWHITE "\033[1m\033[37m" /* Bold White */ +#endif + +/** + * @class LogStreamEndLine + * @brief The LogStreamEndLine class implements an end line marker for a log + * stream + */ +class LogStreamEndLine +{ +}; + +static constexpr LogStreamEndLine endl; + +/** + * @class LogStream + * @brief The LogStream class implements a stream for sample logging + */ +class LogStream +{ + std::string _prefix; + std::ostream* _log_stream; + bool _new_line; + int _color_id; + +public: + /** + * @brief A constructor. Creates an LogStream object + * @param prefix The prefix to print + */ + LogStream(const std::string& prefix, std::ostream& log_stream, const int color_id = -1) + : _prefix(prefix), _new_line(true), _color_id(color_id) + { + _log_stream = &log_stream; + } + + /** + * @brief A stream output operator to be used within the logger + * @param arg Object for serialization in the logger message + */ + template + LogStream& operator<<(const T& arg) + { + if (_new_line) { + setLineColor(); + (*_log_stream) << "[ " << _prefix << " ] "; + _new_line = false; + } + + (*_log_stream) << arg; + return *this; + } + + // Specializing for LogStreamEndLine to support slog::endl + LogStream& operator<<(const LogStreamEndLine& arg) + { + _new_line = true; + resetLineColor(); + (*_log_stream) << std::endl; + return *this; + } + + void setLineColor() + { + switch (_color_id) { + case BLUE: + (*_log_stream) << "\033[34m"; + break; + case GREEN: + (*_log_stream) << "\033[32m"; + break; + case YELLOW: + (*_log_stream) << "\033[33m"; + break; + case RED: + (*_log_stream) << "\033[31m"; + break; + default: + break; + } + } + + void resetLineColor() + { + if (_color_id > 0) { + (*_log_stream) << "\033[0m"; // RESET + } + } +}; + +class NullStream +{ +public: + NullStream() + { + } + + NullStream(const std::string& prefix, std::ostream& log_stream) + { + (void)prefix; + (void)log_stream; + } + + template + NullStream& operator<<(const T& arg) + { + return *this; + } +}; + +#ifdef LOG_LEVEL_DEBUG +static LogStream debug("DEBUG", std::cout, GREEN); +#else +static NullStream debug; +#endif +static LogStream info("INFO", std::cout, BLUE); +static LogStream warn("WARNING", std::cout, YELLOW); +static LogStream err("ERROR", std::cerr, RED); + +} // namespace slog +#endif // OPENVINO_WRAPPER_LIB__SLOG_HPP_ diff --git a/openvino_wrapper_lib/include/openvino_wrapper_lib/utils/common.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/utils/common.hpp new file mode 100644 index 00000000..081aa496 --- /dev/null +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/utils/common.hpp @@ -0,0 +1,382 @@ +// Copyright (C) 2018-2019 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +/** + * @brief a header file with common samples functionality + * @file common.hpp + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#ifndef UNUSED +#ifdef _WIN32 +#define UNUSED +#else +#define UNUSED __attribute__((unused)) +#endif +#endif + +template +constexpr std::size_t arraySize(const T (&)[N]) noexcept +{ + return N; +} + +// Helpers to print IE version information. +// We don't directly define operator<< for InferenceEngine::Version +// and such, because that won't get picked up by argument-dependent lookup +// due to not being in the same namespace as the Version class itself. +// We need ADL to work in order to print these objects using slog. +// So instead, we define wrapper classes and operator<< for those classes. + +class PrintableIeVersion +{ +public: + using ref_type = const InferenceEngine::Version&; + + PrintableIeVersion(ref_type version) : version(version) + { + } + + friend std::ostream& operator<<(std::ostream& os, const PrintableIeVersion& p) + { + ref_type version = p.version; + + return os << "\t" << version.description << " version ......... " << IE_VERSION_MAJOR << "." << IE_VERSION_MINOR + << "\n\tBuild ........... " << IE_VERSION_PATCH; + } + +private: + ref_type version; +}; + +inline PrintableIeVersion printable(PrintableIeVersion::ref_type version) +{ + return { version }; +} + +class PrintableIeVersionMap +{ +public: + using ref_type = const std::map&; + + PrintableIeVersionMap(ref_type versions) : versions(versions) + { + } + + friend std::ostream& operator<<(std::ostream& os, const PrintableIeVersionMap& p) + { + ref_type versions = p.versions; + + for (const auto& version : versions) { + os << "\t" << version.first << std::endl << printable(version.second) << std::endl; + } + + return os; + } + +private: + ref_type versions; +}; + +inline PrintableIeVersionMap printable(PrintableIeVersionMap::ref_type versions) +{ + return { versions }; +} + +/** + * @class Color + * @brief A Color class stores channels of a given color + */ +class Color +{ +private: + unsigned char _r; + unsigned char _g; + unsigned char _b; + +public: + /** + * A default constructor. + * @param r - value for red channel + * @param g - value for green channel + * @param b - value for blue channel + */ + Color(unsigned char r, unsigned char g, unsigned char b) : _r(r), _g(g), _b(b) + { + } + + inline unsigned char red() const + { + return _r; + } + + inline unsigned char blue() const + { + return _b; + } + + inline unsigned char green() const + { + return _g; + } +}; + +// Known colors for training classes from the Cityscapes dataset +static UNUSED const Color CITYSCAPES_COLORS[] = { + { 128, 64, 128 }, { 232, 35, 244 }, { 70, 70, 70 }, { 156, 102, 102 }, { 153, 153, 190 }, { 153, 153, 153 }, + { 30, 170, 250 }, { 0, 220, 220 }, { 35, 142, 107 }, { 152, 251, 152 }, { 180, 130, 70 }, { 60, 20, 220 }, + { 0, 0, 255 }, { 142, 0, 0 }, { 70, 0, 0 }, { 100, 60, 0 }, { 90, 0, 0 }, { 230, 0, 0 }, + { 32, 11, 119 }, { 0, 74, 111 }, { 81, 0, 81 } +}; + +static std::vector> +perfCountersSorted(std::map perfMap) +{ + using perfItem = std::pair; + std::vector sorted; + for (auto& kvp : perfMap) + sorted.push_back(kvp); + + std::stable_sort(sorted.begin(), sorted.end(), [](const perfItem& l, const perfItem& r) { + return l.second.execution_index < r.second.execution_index; + }); + + return sorted; +} + +static UNUSED void +printPerformanceCounts(const std::map& performanceMap, + std::ostream& stream, const std::string& deviceName, bool bshowHeader = true) +{ + long long totalTime = 0; + // Print performance counts + if (bshowHeader) { + stream << std::endl << "performance counts:" << std::endl << std::endl; + } + + auto performanceMapSorted = perfCountersSorted(performanceMap); + + for (const auto& it : performanceMapSorted) { + std::string toPrint(it.first); + const int maxLayerName = 30; + + if (it.first.length() >= maxLayerName) { + toPrint = it.first.substr(0, maxLayerName - 4); + toPrint += "..."; + } + + stream << std::setw(maxLayerName) << std::left << toPrint; + switch (it.second.status) { + case InferenceEngine::InferenceEngineProfileInfo::EXECUTED: + stream << std::setw(15) << std::left << "EXECUTED"; + break; + case InferenceEngine::InferenceEngineProfileInfo::NOT_RUN: + stream << std::setw(15) << std::left << "NOT_RUN"; + break; + case InferenceEngine::InferenceEngineProfileInfo::OPTIMIZED_OUT: + stream << std::setw(15) << std::left << "OPTIMIZED_OUT"; + break; + } + stream << std::setw(30) << std::left << "layerType: " + std::string(it.second.layer_type) + " "; + stream << std::setw(20) << std::left << "realTime: " + std::to_string(it.second.realTime_uSec); + stream << std::setw(20) << std::left << "cpu: " + std::to_string(it.second.cpu_uSec); + stream << " execType: " << it.second.exec_type << std::endl; + if (it.second.realTime_uSec > 0) { + totalTime += it.second.realTime_uSec; + } + } + stream << std::setw(20) << std::left << "Total time: " + std::to_string(totalTime) << " microseconds" << std::endl; + std::cout << std::endl; + std::cout << "Full device name: " << deviceName << std::endl; + std::cout << std::endl; +} + +static UNUSED void printPerformanceCounts(InferenceEngine::InferRequest request, std::ostream& stream, + std::string deviceName, bool bshowHeader = true) +{ + auto performanceMap = request.GetPerformanceCounts(); + printPerformanceCounts(performanceMap, stream, deviceName, bshowHeader); +} + +inline std::map getMapFullDevicesNames(InferenceEngine::Core& ie, + std::vector devices) +{ + std::map devicesMap; + InferenceEngine::Parameter p; + for (std::string& deviceName : devices) { + if (deviceName != "") { + try { + p = ie.GetMetric(deviceName, METRIC_KEY(FULL_DEVICE_NAME)); + devicesMap.insert(std::pair(deviceName, p.as())); + } catch (InferenceEngine::Exception&) { + } + } + } + return devicesMap; +} + +inline std::string getFullDeviceName(std::map& devicesMap, std::string device) +{ + std::map::iterator it = devicesMap.find(device); + if (it != devicesMap.end()) { + return it->second; + } else { + return ""; + } +} + +inline std::string getFullDeviceName(InferenceEngine::Core& ie, std::string device) +{ + InferenceEngine::Parameter p; + try { + p = ie.GetMetric(device, METRIC_KEY(FULL_DEVICE_NAME)); + return p.as(); + } catch (InferenceEngine::Exception&) { + return ""; + } +} + +inline std::size_t getTensorWidth(const InferenceEngine::TensorDesc& desc) +{ + const auto& layout = desc.getLayout(); + const auto& dims = desc.getDims(); + const auto& size = dims.size(); + if ((size >= 2) && (layout == InferenceEngine::Layout::NCHW || layout == InferenceEngine::Layout::NHWC || + layout == InferenceEngine::Layout::NCDHW || layout == InferenceEngine::Layout::NDHWC || + layout == InferenceEngine::Layout::OIHW || layout == InferenceEngine::Layout::CHW || + layout == InferenceEngine::Layout::HW)) { + // Regardless of layout, dimensions are stored in fixed order + return dims.back(); + } else { + throw std::runtime_error("Tensor does not have width dimension"); + } + return 0; +} + +inline std::size_t getTensorHeight(const InferenceEngine::TensorDesc& desc) +{ + const auto& layout = desc.getLayout(); + const auto& dims = desc.getDims(); + const auto& size = dims.size(); + if ((size >= 2) && (layout == InferenceEngine::Layout::NCHW || layout == InferenceEngine::Layout::NHWC || + layout == InferenceEngine::Layout::NCDHW || layout == InferenceEngine::Layout::NDHWC || + layout == InferenceEngine::Layout::OIHW || layout == InferenceEngine::Layout::CHW || + layout == InferenceEngine::Layout::HW)) { + // Regardless of layout, dimensions are stored in fixed order + return dims.at(size - 2); + } else { + throw std::runtime_error("Tensor does not have height dimension"); + } + return 0; +} + +inline std::size_t getTensorChannels(const InferenceEngine::TensorDesc& desc) +{ + const auto& layout = desc.getLayout(); + if (layout == InferenceEngine::Layout::NCHW || layout == InferenceEngine::Layout::NHWC || + layout == InferenceEngine::Layout::NCDHW || layout == InferenceEngine::Layout::NDHWC || + layout == InferenceEngine::Layout::C || layout == InferenceEngine::Layout::CHW || + layout == InferenceEngine::Layout::NC || layout == InferenceEngine::Layout::CN) { + // Regardless of layout, dimensions are stored in fixed order + const auto& dims = desc.getDims(); + switch (desc.getLayoutByDims(dims)) { + case InferenceEngine::Layout::C: + return dims.at(0); + case InferenceEngine::Layout::NC: + return dims.at(1); + case InferenceEngine::Layout::CHW: + return dims.at(0); + case InferenceEngine::Layout::NCHW: + return dims.at(1); + case InferenceEngine::Layout::NCDHW: + return dims.at(1); + case InferenceEngine::Layout::SCALAR: // [[fallthrough]] + case InferenceEngine::Layout::BLOCKED: // [[fallthrough]] + default: + throw std::runtime_error("Tensor does not have channels dimension"); + } + } else { + throw std::runtime_error("Tensor does not have channels dimension"); + } + return 0; +} + +inline std::size_t getTensorBatch(const InferenceEngine::TensorDesc& desc) +{ + const auto& layout = desc.getLayout(); + if (layout == InferenceEngine::Layout::NCHW || layout == InferenceEngine::Layout::NHWC || + layout == InferenceEngine::Layout::NCDHW || layout == InferenceEngine::Layout::NDHWC || + layout == InferenceEngine::Layout::NC || layout == InferenceEngine::Layout::CN) { + // Regardless of layout, dimensions are stored in fixed order + const auto& dims = desc.getDims(); + switch (desc.getLayoutByDims(dims)) { + case InferenceEngine::Layout::NC: + return dims.at(0); + case InferenceEngine::Layout::NCHW: + return dims.at(0); + case InferenceEngine::Layout::NCDHW: + return dims.at(0); + case InferenceEngine::Layout::CHW: // [[fallthrough]] + case InferenceEngine::Layout::C: // [[fallthrough]] + case InferenceEngine::Layout::SCALAR: // [[fallthrough]] + case InferenceEngine::Layout::BLOCKED: // [[fallthrough]] + default: + throw std::runtime_error("Tensor does not have channels dimension"); + } + } else { + throw std::runtime_error("Tensor does not have channels dimension"); + } + return 0; +} + +inline void showAvailableDevices() +{ + InferenceEngine::Core ie; + std::vector devices = ie.GetAvailableDevices(); + + std::cout << std::endl; + std::cout << "Available target devices:"; + for (const auto& device : devices) { + std::cout << " " << device; + } + std::cout << std::endl; +} + +inline std::string fileNameNoExt(const std::string& filepath) +{ + auto pos = filepath.rfind('.'); + if (pos == std::string::npos) + return filepath; + return filepath.substr(0, pos); +} + +static inline ov::Layout getLayoutFromShape(const ov::Shape& shape) +{ + if (shape.size() == 2) { + return "NC"; + } else if (shape.size() == 3) { + return (shape[0] >= 1 && shape[0] <= 4) ? "CHW" : "HWC"; + } else if (shape.size() == 4) { + return (shape[1] >= 1 && shape[1] <= 4) ? "NCHW" : "NHWC"; + } else { + throw std::runtime_error("Usupported " + std::to_string(shape.size()) + "D shape"); + } +} diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/utils/mutex_counter.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/utils/mutex_counter.hpp similarity index 84% rename from dynamic_vino_lib/include/dynamic_vino_lib/utils/mutex_counter.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/utils/mutex_counter.hpp index 47947928..f0da93a3 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/utils/mutex_counter.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/utils/mutex_counter.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018-2019 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,8 +17,8 @@ // @file mutex_counter.hpp // -#ifndef DYNAMIC_VINO_LIB__UTILS__MUTEX_COUNTER_HPP_ -#define DYNAMIC_VINO_LIB__UTILS__MUTEX_COUNTER_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__UTILS__MUTEX_COUNTER_HPP_ +#define OPENVINO_WRAPPER_LIB__UTILS__MUTEX_COUNTER_HPP_ #include @@ -54,4 +54,4 @@ class MutexCounter std::condition_variable cv_; }; -#endif // DYNAMIC_VINO_LIB__UTILS__MUTEX_COUNTER_HPP_ +#endif // OPENVINO_WRAPPER_LIB__UTILS__MUTEX_COUNTER_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/utils/version_info.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/utils/version_info.hpp similarity index 60% rename from dynamic_vino_lib/include/dynamic_vino_lib/utils/version_info.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/utils/version_info.hpp index abeac0c5..d9408994 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/utils/version_info.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/utils/version_info.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018-2019 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,10 +17,10 @@ // @file version_info.hpp // -#ifndef DYNAMIC_VINO_LIB__UTILS__VERSION_INFO_HPP_ -#define DYNAMIC_VINO_LIB__UTILS__VERSION_INFO_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__UTILS__VERSION_INFO_HPP_ +#define OPENVINO_WRAPPER_LIB__UTILS__VERSION_INFO_HPP_ -#if(defined(USE_OLD_E_PLUGIN_API)) +#if (defined(USE_OLD_E_PLUGIN_API)) #include #endif #include @@ -42,36 +42,26 @@ * @param s - string to trim * @return trimmed string */ -inline std::string & trim(std::string & s) +inline std::string& trim(std::string& s) { - s.erase(s.begin(), - std::find_if(s.begin(), s.end(), std::not1(std::ptr_fun(std::isspace)))); - s.erase( - std::find_if(s.rbegin(), s.rend(), std::not1(std::ptr_fun(std::isspace))).base(), - s.end()); + s.erase(s.begin(), std::find_if(s.begin(), s.end(), std::not1(std::ptr_fun(std::isspace)))); + s.erase(std::find_if(s.rbegin(), s.rend(), std::not1(std::ptr_fun(std::isspace))).base(), s.end()); return s; } -static std::ostream & operator<<(std::ostream & os, const InferenceEngine::Version * version) +static std::ostream& operator<<(std::ostream& os, const ov::Version& version) { os << "\n\tAPI version ............ "; - if (nullptr == version) { - os << "UNKNOWN"; - } else { - os << version->apiVersion.major << "." << version->apiVersion.minor; - if (nullptr != version->buildNumber) { - os << "\n\t" << - "Build .................. " << version->buildNumber; - } - if (nullptr != version->description) { - os << "\n\t" << - "Description ............ " << version->description; - } - } + os << OPENVINO_VERSION_MAJOR << "." << OPENVINO_VERSION_MINOR << "." << OPENVINO_VERSION_PATCH; + os << "\n\t" + << "Build .................. " << version.buildNumber; + os << "\n\t" + << "Description ............ " << version.description; + return os; } -#if(defined(USE_OLD_E_PLUGIN_API)) +#if (defined(USE_OLD_E_PLUGIN_API)) /** * @class PluginVersion * @brief A PluginVersion class stores plugin version and initialization status @@ -80,7 +70,7 @@ struct PluginVersion : public InferenceEngine::Version { bool initialized = false; - explicit PluginVersion(const InferenceEngine::Version * ver) + explicit PluginVersion(const InferenceEngine::Version* ver) { if (nullptr == ver) { return; @@ -95,7 +85,7 @@ struct PluginVersion : public InferenceEngine::Version } }; -static UNUSED std::ostream & operator<<(std::ostream & os, const PluginVersion & version) +static UNUSED std::ostream& operator<<(std::ostream& os, const PluginVersion& version) { os << "\tPlugin version ......... "; if (!version) { @@ -121,12 +111,12 @@ static UNUSED std::ostream & operator<<(std::ostream & os, const PluginVersion & return os; } -inline void printPluginVersion(InferenceEngine::InferenceEnginePluginPtr ptr, std::ostream & stream) +inline void printPluginVersion(InferenceEngine::InferenceEnginePluginPtr ptr, std::ostream& stream) { - const PluginVersion * pluginVersion = nullptr; - ptr->GetVersion((const InferenceEngine::Version * &)pluginVersion); + const PluginVersion* pluginVersion = nullptr; + ptr->GetVersion((const InferenceEngine::Version*&)pluginVersion); stream << pluginVersion << std::endl; } -#endif // (defined(USE_OLD_E_PLUGIN_API)) +#endif // (defined(USE_OLD_E_PLUGIN_API)) -#endif // DYNAMIC_VINO_LIB__UTILS__VERSION_INFO_HPP_ +#endif // OPENVINO_WRAPPER_LIB__UTILS__VERSION_INFO_HPP_ diff --git a/dynamic_vino_lib/package.xml b/openvino_wrapper_lib/package.xml similarity index 88% rename from dynamic_vino_lib/package.xml rename to openvino_wrapper_lib/package.xml index d432eba2..bd73ac69 100644 --- a/dynamic_vino_lib/package.xml +++ b/openvino_wrapper_lib/package.xml @@ -1,7 +1,7 @@ - dynamic_vino_lib + openvino_wrapper_lib 0.9.0 a ROS2 wrapper package for Intel OpenVINO Weizhi Liu @@ -37,9 +37,8 @@ limitations under the License. class_loader cv_bridge object_msgs - people_msgs - pipeline_srv_msgs - vino_param_lib + openvino_msgs + openvino_param_lib realsense2 openvino_common @@ -54,9 +53,8 @@ limitations under the License. class_loader cv_bridge object_msgs - people_msgs - pipeline_srv_msgs - vino_param_lib + openvino_msgs + openvino_param_lib realsense2 ament_lint_auto diff --git a/dynamic_vino_lib/src/engines/engine.cpp b/openvino_wrapper_lib/src/engines/engine.cpp similarity index 60% rename from dynamic_vino_lib/src/engines/engine.cpp rename to openvino_wrapper_lib/src/engines/engine.cpp index 6f16472f..e3f3865a 100644 --- a/dynamic_vino_lib/src/engines/engine.cpp +++ b/openvino_wrapper_lib/src/engines/engine.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,20 +16,17 @@ * @brief a header file with definition of Engine class * @file engine.cpp */ -#include "dynamic_vino_lib/engines/engine.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino_wrapper_lib/slog.hpp" -#if(defined(USE_OLD_E_PLUGIN_API)) -Engines::Engine::Engine( - InferenceEngine::InferencePlugin plg, - const Models::BaseModel::Ptr base_model) +#if (defined(USE_OLD_E_PLUGIN_API)) +Engines::Engine::Engine(InferenceEngine::InferencePlugin plg, const Models::BaseModel::Ptr base_model) { - request_ = (plg.LoadNetwork(base_model->getNetReader()->getNetwork(), {})).CreateInferRequestPtr(); + request_ = (plg.LoadNetwork(base_model->getModel()->getNetwork(), {})).CreateInferRequestPtr(); } #endif -Engines::Engine::Engine( - InferenceEngine::InferRequest::Ptr & request) +Engines::Engine::Engine(ov::InferRequest& request) { request_ = request; } diff --git a/openvino_wrapper_lib/src/engines/engine_manager.cpp b/openvino_wrapper_lib/src/engines/engine_manager.cpp new file mode 100644 index 00000000..c55d8553 --- /dev/null +++ b/openvino_wrapper_lib/src/engines/engine_manager.cpp @@ -0,0 +1,104 @@ +// Copyright (c) 2018-2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a header file with definition of Engine class + * @file engine.cpp + */ +#include "openvino_wrapper_lib/engines/engine_manager.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino_wrapper_lib/slog.hpp" +#include "openvino_wrapper_lib/models/base_model.hpp" +#include "openvino_wrapper_lib/utils/version_info.hpp" +#include +#include +#if (defined(USE_OLD_E_PLUGIN_API)) +#include +#endif + +std::shared_ptr Engines::EngineManager::createEngine(const std::string& device, + const std::shared_ptr& model) +{ +#if (defined(USE_OLD_E_PLUGIN_API)) + return createEngine_beforeV2019R2(device, model); +#else + return createEngine_V2022(device, model); +#endif +} + +std::shared_ptr +Engines::EngineManager::createEngine_V2022(const std::string& device, const std::shared_ptr& model) +{ + ov::Core core; + ov::CompiledModel executable_network = core.compile_model(model->getModel(), device); + ov::InferRequest infer_request = executable_network.create_infer_request(); + + return std::make_shared(infer_request); +} + +#if (defined(USE_OLD_E_PLUGIN_API)) +std::shared_ptr Engines::EngineManager::createEngine_beforeV2019R2( + const std::string& device, const std::shared_ptr& model) +{ + if (plugins_for_devices_.find(device) == plugins_for_devices_.end()) { + auto pcommon = Params::ParamManager::getInstance().getCommon(); + plugins_for_devices_[device] = *makePluginByName(device, pcommon.custom_cpu_library, pcommon.custom_cldnn_library, + pcommon.enable_performance_count); + slog::info << "Created plugin for " << device << slog::endl; + } + + auto executeable_network = plugins_for_devices_[device].LoadNetwork(model->getModel()->getNetwork(), {}); + auto request = executeable_network.CreateInferRequestPtr(); + + return std::make_shared(request); +} + +std::unique_ptr +Engines::EngineManager::makePluginByName(const std::string& device_name, const std::string& custom_cpu_library_message, + const std::string& custom_cldnn_message, bool performance_message) +{ + slog::info << "Creating plugin for " << device_name << slog::endl; + + InferenceEngine::InferencePlugin plugin = + InferenceEngine::PluginDispatcher({ "../../../lib/intel64", "" }).getPluginByDevice(device_name); + + /** Printing plugin version **/ + printPluginVersion(plugin, std::cout); + + /** Load extensions for the CPU plugin **/ + if ((device_name.find("CPU") != std::string::npos)) { + plugin.AddExtension(std::make_shared()); + if (!custom_cpu_library_message.empty()) { + slog::info << "custom cpu library is not empty, tyring to use this extension:" << custom_cpu_library_message + << slog::endl; + // CPU(MKLDNN) extensions are loaded as a shared library and passed as a + // pointer to base + // extension + auto extension_ptr = InferenceEngine::make_so_pointer(custom_cpu_library_message); + plugin.AddExtension(extension_ptr); + } + } else if (!custom_cldnn_message.empty()) { + slog::info << "custom cldnn library is not empty, tyring to use this extension:" << custom_cldnn_message + << slog::endl; + // Load Extensions for other plugins not CPU + plugin.SetConfig({ { InferenceEngine::PluginConfigParams::KEY_CONFIG_FILE, custom_cldnn_message } }); + } + if (performance_message) { + plugin.SetConfig( + { { InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::YES } }); + } + + return std::make_unique(InferenceEngine::InferenceEnginePluginPtr(plugin)); +} +#endif diff --git a/openvino_wrapper_lib/src/inferences/age_gender_detection.cpp b/openvino_wrapper_lib/src/inferences/age_gender_detection.cpp new file mode 100644 index 00000000..0598df48 --- /dev/null +++ b/openvino_wrapper_lib/src/inferences/age_gender_detection.cpp @@ -0,0 +1,116 @@ +// Copyright (c) 2018-2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a header file with declaration of AgeGenderResult class + * @file age_gender_detection.cpp + */ + +#include +#include +#include +#include +#include "openvino_wrapper_lib/inferences/age_gender_detection.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" + +// AgeGenderResult +openvino_wrapper_lib::AgeGenderResult::AgeGenderResult(const cv::Rect& location) : Result(location) +{ +} + +// AgeGender Detection +openvino_wrapper_lib::AgeGenderDetection::AgeGenderDetection() : openvino_wrapper_lib::BaseInference() +{ +} + +openvino_wrapper_lib::AgeGenderDetection::~AgeGenderDetection() = default; + +void openvino_wrapper_lib::AgeGenderDetection::loadNetwork(std::shared_ptr network) +{ + valid_model_ = network; + setMaxBatchSize(network->getMaxBatchSize()); +} + +bool openvino_wrapper_lib::AgeGenderDetection::enqueue(const cv::Mat& frame, const cv::Rect& input_frame_loc) +{ + if (getEnqueuedNum() == 0) { + results_.clear(); + } + bool succeed = openvino_wrapper_lib::BaseInference::enqueue(frame, input_frame_loc, 1, getResultsLength(), + valid_model_->getInputName()); + if (!succeed) { + return false; + } + Result r(input_frame_loc); + results_.emplace_back(r); + return true; +} + +bool openvino_wrapper_lib::AgeGenderDetection::submitRequest() +{ + return openvino_wrapper_lib::BaseInference::submitRequest(); +} + +bool openvino_wrapper_lib::AgeGenderDetection::fetchResults() +{ + bool can_fetch = openvino_wrapper_lib::BaseInference::fetchResults(); + if (!can_fetch) { + return false; + } + auto request = getEngine()->getRequest(); + ov::Tensor gender_tensor = request.get_tensor(valid_model_->getOutputGenderName()); + ov::Tensor age_tensor = request.get_tensor(valid_model_->getOutputAgeName()); + + for (int i = 0; i < results_.size(); ++i) { + results_[i].age_ = age_tensor.data()[i] * 100; + results_[i].male_prob_ = gender_tensor.data()[i * 2 + 1]; + } + return true; +} + +int openvino_wrapper_lib::AgeGenderDetection::getResultsLength() const +{ + return static_cast(results_.size()); +} + +const openvino_wrapper_lib::Result* openvino_wrapper_lib::AgeGenderDetection::getLocationResult(int idx) const +{ + return &(results_[idx]); +} + +const std::string openvino_wrapper_lib::AgeGenderDetection::getName() const +{ + return valid_model_->getModelCategory(); +} + +void openvino_wrapper_lib::AgeGenderDetection::observeOutput(const std::shared_ptr& output) +{ + if (output != nullptr) { + output->accept(results_); + } +} + +const std::vector +openvino_wrapper_lib::AgeGenderDetection::getFilteredROIs(const std::string filter_conditions) const +{ + if (!filter_conditions.empty()) { + slog::err << "Age gender detection does not support filtering now! " + << "Filter conditions: " << filter_conditions << slog::endl; + } + std::vector filtered_rois; + for (auto res : results_) { + filtered_rois.push_back(res.getLocation()); + } + return filtered_rois; +} diff --git a/dynamic_vino_lib/src/inferences/base_filter.cpp b/openvino_wrapper_lib/src/inferences/base_filter.cpp similarity index 71% rename from dynamic_vino_lib/src/inferences/base_filter.cpp rename to openvino_wrapper_lib/src/inferences/base_filter.cpp index 14f2a38c..ac81271b 100644 --- a/dynamic_vino_lib/src/inferences/base_filter.cpp +++ b/openvino_wrapper_lib/src/inferences/base_filter.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,49 +17,44 @@ * @file base_filter.cpp */ -#include "dynamic_vino_lib/inferences/base_filter.hpp" +#include "openvino_wrapper_lib/inferences/base_filter.hpp" #include #include #include -dynamic_vino_lib::BaseFilter::BaseFilter() {} +openvino_wrapper_lib::BaseFilter::BaseFilter() +{ +} -bool dynamic_vino_lib::BaseFilter::isValidFilterConditions( - const std::string & filter_conditions) +bool openvino_wrapper_lib::BaseFilter::isValidFilterConditions(const std::string& filter_conditions) { return strip(filter_conditions) != ""; } -void dynamic_vino_lib::BaseFilter::acceptFilterConditions( - const std::string & filter_conditions) +void openvino_wrapper_lib::BaseFilter::acceptFilterConditions(const std::string& filter_conditions) { striped_conditions_ = strip(filter_conditions); std::vector infix_conditions = split(striped_conditions_); infixToSuffix(infix_conditions); } -bool dynamic_vino_lib::BaseFilter::isRelationOperator(const std::string & str) +bool openvino_wrapper_lib::BaseFilter::isRelationOperator(const std::string& str) { - if (std::find(relation_operators_.begin(), relation_operators_.end(), str) != - relation_operators_.end()) - { + if (std::find(relation_operators_.begin(), relation_operators_.end(), str) != relation_operators_.end()) { return true; } return false; } -bool dynamic_vino_lib::BaseFilter::isLogicOperator(const std::string & str) +bool openvino_wrapper_lib::BaseFilter::isLogicOperator(const std::string& str) { - if (std::find(logic_operators_.begin(), logic_operators_.end(), str) != - logic_operators_.end()) - { + if (std::find(logic_operators_.begin(), logic_operators_.end(), str) != logic_operators_.end()) { return true; } return false; } -bool dynamic_vino_lib::BaseFilter::isPriorTo( - const std::string & operator1, const std::string & operator2) +bool openvino_wrapper_lib::BaseFilter::isPriorTo(const std::string& operator1, const std::string& operator2) { if (isRelationOperator(operator1) && isLogicOperator(operator2)) { return true; @@ -67,15 +62,19 @@ bool dynamic_vino_lib::BaseFilter::isPriorTo( return false; } -std::string dynamic_vino_lib::BaseFilter::boolToStr(bool value) +std::string openvino_wrapper_lib::BaseFilter::boolToStr(bool value) { - if (value) {return "true";} + if (value) { + return "true"; + } return "false"; } -bool dynamic_vino_lib::BaseFilter::strToBool(const std::string & value) +bool openvino_wrapper_lib::BaseFilter::strToBool(const std::string& value) { - if (!value.compare("true")) {return true;} else if (!value.compare("false")) { + if (!value.compare("true")) { + return true; + } else if (!value.compare("false")) { return false; } else { slog::err << "Invalid string: " << value << " for bool conversion!" << slog::endl; @@ -83,14 +82,13 @@ bool dynamic_vino_lib::BaseFilter::strToBool(const std::string & value) return false; } -const std::vector & -dynamic_vino_lib::BaseFilter::getSuffixConditions() const +const std::vector& openvino_wrapper_lib::BaseFilter::getSuffixConditions() const { return suffix_conditons_; } -bool dynamic_vino_lib::BaseFilter::logicOperation( - const std::string & logic1, const std::string & op, const std::string & logic2) +bool openvino_wrapper_lib::BaseFilter::logicOperation(const std::string& logic1, const std::string& op, + const std::string& logic2) { if (!op.compare("&&")) { return strToBool(logic1) && strToBool(logic2); @@ -102,8 +100,8 @@ bool dynamic_vino_lib::BaseFilter::logicOperation( } } -bool dynamic_vino_lib::BaseFilter::stringCompare( - const std::string & candidate, const std::string & op, const std::string & target) +bool openvino_wrapper_lib::BaseFilter::stringCompare(const std::string& candidate, const std::string& op, + const std::string& target) { if (!op.compare("==")) { return !target.compare(candidate); @@ -115,8 +113,7 @@ bool dynamic_vino_lib::BaseFilter::stringCompare( } } -bool dynamic_vino_lib::BaseFilter::floatCompare( - float candidate, const std::string & op, float target) +bool openvino_wrapper_lib::BaseFilter::floatCompare(float candidate, const std::string& op, float target) { if (!op.compare("<=")) { return candidate <= target; @@ -132,7 +129,7 @@ bool dynamic_vino_lib::BaseFilter::floatCompare( } } -float dynamic_vino_lib::BaseFilter::stringToFloat(const std::string & candidate) +float openvino_wrapper_lib::BaseFilter::stringToFloat(const std::string& candidate) { float result = 0; try { @@ -143,8 +140,7 @@ float dynamic_vino_lib::BaseFilter::stringToFloat(const std::string & candidate) return result; } -std::vector dynamic_vino_lib::BaseFilter::split( - const std::string & filter_conditions) +std::vector openvino_wrapper_lib::BaseFilter::split(const std::string& filter_conditions) { std::vector seperators; seperators.insert(seperators.end(), relation_operators_.begin(), relation_operators_.end()); @@ -174,8 +170,7 @@ std::vector dynamic_vino_lib::BaseFilter::split( return infix_conditions; } -void dynamic_vino_lib::BaseFilter::infixToSuffix( - std::vector & infix_conditions) +void openvino_wrapper_lib::BaseFilter::infixToSuffix(std::vector& infix_conditions) { std::stack operator_stack; for (auto elem : infix_conditions) { @@ -206,7 +201,7 @@ void dynamic_vino_lib::BaseFilter::infixToSuffix( } } -std::string dynamic_vino_lib::BaseFilter::strip(const std::string & str) +std::string openvino_wrapper_lib::BaseFilter::strip(const std::string& str) { std::string stripped_string = ""; for (auto character : str) { diff --git a/dynamic_vino_lib/src/inferences/base_inference.cpp b/openvino_wrapper_lib/src/inferences/base_inference.cpp similarity index 61% rename from dynamic_vino_lib/src/inferences/base_inference.cpp rename to openvino_wrapper_lib/src/inferences/base_inference.cpp index b17a8c21..b2fca8b1 100644 --- a/dynamic_vino_lib/src/inferences/base_inference.cpp +++ b/openvino_wrapper_lib/src/inferences/base_inference.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,28 +19,28 @@ #include -#include "dynamic_vino_lib/inferences/base_inference.hpp" -#include "dynamic_vino_lib/models/base_model.hpp" +#include "openvino_wrapper_lib/inferences/base_inference.hpp" +#include "openvino_wrapper_lib/models/base_model.hpp" - // Result -dynamic_vino_lib::Result::Result(const cv::Rect & location) +// Result +openvino_wrapper_lib::Result::Result(const cv::Rect& location) { location_ = location; } // BaseInference -dynamic_vino_lib::BaseInference::BaseInference() = default; +openvino_wrapper_lib::BaseInference::BaseInference() = default; -dynamic_vino_lib::BaseInference::~BaseInference() = default; +openvino_wrapper_lib::BaseInference::~BaseInference() = default; -void dynamic_vino_lib::BaseInference::loadEngine(const std::shared_ptr engine) +void openvino_wrapper_lib::BaseInference::loadEngine(const std::shared_ptr engine) { engine_ = engine; } -bool dynamic_vino_lib::BaseInference::submitRequest() +bool openvino_wrapper_lib::BaseInference::submitRequest() { - if (engine_->getRequest() == nullptr) { + if (!engine_->getRequest()) { return false; } if (!enqueued_frames_) { @@ -48,14 +48,14 @@ bool dynamic_vino_lib::BaseInference::submitRequest() } enqueued_frames_ = 0; results_fetched_ = false; - engine_->getRequest()->StartAsync(); + engine_->getRequest().start_async(); slog::debug << "Async Inference started!" << slog::endl; return true; } -bool dynamic_vino_lib::BaseInference::SynchronousRequest() +bool openvino_wrapper_lib::BaseInference::SynchronousRequest() { - if (engine_->getRequest() == nullptr) { + if (!engine_->getRequest()) { return false; } if (!enqueued_frames_) { @@ -63,11 +63,11 @@ bool dynamic_vino_lib::BaseInference::SynchronousRequest() } enqueued_frames_ = 0; results_fetched_ = false; - engine_->getRequest()->Infer(); + engine_->getRequest().infer(); return true; } -bool dynamic_vino_lib::BaseInference::fetchResults() +bool openvino_wrapper_lib::BaseInference::fetchResults() { if (results_fetched_) { return false; @@ -76,7 +76,7 @@ bool dynamic_vino_lib::BaseInference::fetchResults() return true; } -void dynamic_vino_lib::BaseInference::addCandidatedModel(std::shared_ptr model) +void openvino_wrapper_lib::BaseInference::addCandidatedModel(std::shared_ptr model) { slog::info << "TESTING in addCandidatedModel()" << slog::endl; if (model != nullptr) { diff --git a/dynamic_vino_lib/src/inferences/base_reidentification.cpp b/openvino_wrapper_lib/src/inferences/base_reidentification.cpp similarity index 73% rename from dynamic_vino_lib/src/inferences/base_reidentification.cpp rename to openvino_wrapper_lib/src/inferences/base_reidentification.cpp index d9ede0bd..0d0d7c3a 100644 --- a/dynamic_vino_lib/src/inferences/base_reidentification.cpp +++ b/openvino_wrapper_lib/src/inferences/base_reidentification.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -23,17 +23,16 @@ #include #include #include -#include "dynamic_vino_lib/inferences/base_reidentification.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/inferences/base_reidentification.hpp" +#include "openvino_wrapper_lib/slog.hpp" // Tracker -dynamic_vino_lib::Tracker::Tracker( - int max_record_size, double same_track_thresh, double new_track_thresh) -: max_record_size_(max_record_size), - same_track_thresh_(same_track_thresh), - new_track_thresh_(new_track_thresh) {} +openvino_wrapper_lib::Tracker::Tracker(int max_record_size, double same_track_thresh, double new_track_thresh) + : max_record_size_(max_record_size), same_track_thresh_(same_track_thresh), new_track_thresh_(new_track_thresh) +{ +} -int dynamic_vino_lib::Tracker::processNewTrack(const std::vector & feature) +int openvino_wrapper_lib::Tracker::processNewTrack(const std::vector& feature) { int most_similar_id; double similarity = findMostSimilarTrack(feature, most_similar_id); @@ -45,8 +44,7 @@ int dynamic_vino_lib::Tracker::processNewTrack(const std::vector & featur return most_similar_id; } -double dynamic_vino_lib::Tracker::findMostSimilarTrack( - const std::vector & feature, int & most_similar_id) +double openvino_wrapper_lib::Tracker::findMostSimilarTrack(const std::vector& feature, int& most_similar_id) { double max_similarity = 0; most_similar_id = -1; @@ -60,13 +58,13 @@ double dynamic_vino_lib::Tracker::findMostSimilarTrack( return max_similarity; } -double dynamic_vino_lib::Tracker::calcSimilarity( - const std::vector & feature_a, const std::vector & feature_b) +double openvino_wrapper_lib::Tracker::calcSimilarity(const std::vector& feature_a, + const std::vector& feature_b) { if (feature_a.size() != feature_b.size()) { - slog::err << "cosine similarity can't be called for vectors of different lengths: " << - "feature_a size = " << std::to_string(feature_a.size()) << - "feature_b size = " << std::to_string(feature_b.size()) << slog::endl; + slog::err << "cosine similarity can't be called for vectors of different lengths: " + << "feature_a size = " << std::to_string(feature_a.size()) + << "feature_b size = " << std::to_string(feature_b.size()) << slog::endl; } float mul_sum, denom_a, denom_b, value_a, value_b; mul_sum = denom_a = denom_b = value_a = value_b = 0; @@ -79,13 +77,13 @@ double dynamic_vino_lib::Tracker::calcSimilarity( } if (denom_a == 0 || denom_b == 0) { slog::err << "cosine similarity is not defined whenever one or both " - "input vectors are zero-vectors." << slog::endl; + "input vectors are zero-vectors." + << slog::endl; } return mul_sum / (sqrt(denom_a) * sqrt(denom_b)); } -void dynamic_vino_lib::Tracker::updateMatchTrack( - int track_id, const std::vector & feature) +void openvino_wrapper_lib::Tracker::updateMatchTrack(int track_id, const std::vector& feature) { if (recorded_tracks_.find(track_id) != recorded_tracks_.end()) { recorded_tracks_[track_id].feature.assign(feature.begin(), feature.end()); @@ -95,7 +93,7 @@ void dynamic_vino_lib::Tracker::updateMatchTrack( } } -void dynamic_vino_lib::Tracker::removeEarlestTrack() +void openvino_wrapper_lib::Tracker::removeEarlestTrack() { std::lock_guard lk(tracks_mtx_); int64_t earlest_time = LONG_MAX; @@ -109,8 +107,7 @@ void dynamic_vino_lib::Tracker::removeEarlestTrack() recorded_tracks_.erase(remove_iter); } - -int dynamic_vino_lib::Tracker::addNewTrack(const std::vector & feature) +int openvino_wrapper_lib::Tracker::addNewTrack(const std::vector& feature) { if (recorded_tracks_.size() >= max_record_size_) { std::thread remove_thread(std::bind(&Tracker::removeEarlestTrack, this)); @@ -125,14 +122,13 @@ int dynamic_vino_lib::Tracker::addNewTrack(const std::vector & feature) return max_track_id_; } -int64_t dynamic_vino_lib::Tracker::getCurrentTime() +int64_t openvino_wrapper_lib::Tracker::getCurrentTime() { - auto tp = std::chrono::time_point_cast( - std::chrono::system_clock::now()); + auto tp = std::chrono::time_point_cast(std::chrono::system_clock::now()); return static_cast(tp.time_since_epoch().count()); } -bool dynamic_vino_lib::Tracker::saveTracksToFile(std::string filepath) +bool openvino_wrapper_lib::Tracker::saveTracksToFile(std::string filepath) { std::ofstream outfile(filepath); if (!outfile.is_open()) { @@ -140,8 +136,7 @@ bool dynamic_vino_lib::Tracker::saveTracksToFile(std::string filepath) return false; } for (auto record : recorded_tracks_) { - outfile << record.first << " " << - record.second.lastest_update_time << " "; + outfile << record.first << " " << record.second.lastest_update_time << " "; for (auto elem : record.second.feature) { outfile << elem << " "; } @@ -152,7 +147,7 @@ bool dynamic_vino_lib::Tracker::saveTracksToFile(std::string filepath) return true; } -bool dynamic_vino_lib::Tracker::loadTracksFromFile(std::string filepath) +bool openvino_wrapper_lib::Tracker::loadTracksFromFile(std::string filepath) { std::ifstream infile(filepath); if (!infile.is_open()) { diff --git a/openvino_wrapper_lib/src/inferences/emotions_detection.cpp b/openvino_wrapper_lib/src/inferences/emotions_detection.cpp new file mode 100644 index 00000000..f43aa6b2 --- /dev/null +++ b/openvino_wrapper_lib/src/inferences/emotions_detection.cpp @@ -0,0 +1,138 @@ +// Copyright (c) 2018-2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a header file with declaration of EmotionsDetection class and + * EmotionsResult class + * @file emotions_recognition.cpp + */ + +#include +#include +#include +#include +#include "openvino_wrapper_lib/inferences/emotions_detection.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" +#include "openvino_wrapper_lib/slog.hpp" + +// EmotionsResult +openvino_wrapper_lib::EmotionsResult::EmotionsResult(const cv::Rect& location) : Result(location) +{ +} + +// Emotions Detection +openvino_wrapper_lib::EmotionsDetection::EmotionsDetection() : openvino_wrapper_lib::BaseInference() +{ +} + +openvino_wrapper_lib::EmotionsDetection::~EmotionsDetection() = default; + +void openvino_wrapper_lib::EmotionsDetection::loadNetwork(const std::shared_ptr network) +{ + valid_model_ = network; + setMaxBatchSize(network->getMaxBatchSize()); +} + +bool openvino_wrapper_lib::EmotionsDetection::enqueue(const cv::Mat& frame, const cv::Rect& input_frame_loc) +{ + if (getEnqueuedNum() == 0) { + results_.clear(); + } + bool succeed = openvino_wrapper_lib::BaseInference::enqueue(frame, input_frame_loc, 1, getResultsLength(), + valid_model_->getInputName()); + if (!succeed) { + slog::err << "Failed enqueue Emotion frame." << slog::endl; + // TODO(weizhi): throw an error here + return false; + } + Result r(input_frame_loc); + results_.emplace_back(r); + return true; +} + +bool openvino_wrapper_lib::EmotionsDetection::submitRequest() +{ + return openvino_wrapper_lib::BaseInference::submitRequest(); +} + +bool openvino_wrapper_lib::EmotionsDetection::fetchResults() +{ + bool can_fetch = openvino_wrapper_lib::BaseInference::fetchResults(); + if (!can_fetch) { + return false; + } + int label_length = static_cast(valid_model_->getLabels().size()); + std::string output_name = valid_model_->getOutputName(); + ov::Tensor emotions_tensor = getEngine()->getRequest().get_tensor(output_name); + /** emotions vector must have the same size as number of channels + in model output. Default output format is NCHW so we check index 1 */ + + ov::Shape shape = emotions_tensor.get_shape(); + int64 num_of_channels = shape[1]; + if (num_of_channels != label_length) { + slog::err << "Output size (" << num_of_channels << ") of the Emotions Recognition network is not equal " + << "to used emotions vector size (" << label_length << ")" << slog::endl; + throw std::logic_error("Output size (" + std::to_string(num_of_channels) + + ") of the Emotions Recognition network is not equal " + "to used emotions vector size (" + + std::to_string(label_length) + ")"); + } + + /** we identify an index of the most probable emotion in output array + for idx image to return appropriate emotion name */ + auto emotions_values = emotions_tensor.data(); + for (int idx = 0; idx < results_.size(); ++idx) { + auto output_idx_pos = emotions_values + label_length * idx; + int64 max_prob_emotion_idx = std::max_element(output_idx_pos, output_idx_pos + label_length) - output_idx_pos; + results_[idx].label_ = valid_model_->getLabels()[max_prob_emotion_idx]; + } + + return true; +} + +int openvino_wrapper_lib::EmotionsDetection::getResultsLength() const +{ + return static_cast(results_.size()); +} + +const openvino_wrapper_lib::Result* openvino_wrapper_lib::EmotionsDetection::getLocationResult(int idx) const +{ + return &(results_[idx]); +} + +const std::string openvino_wrapper_lib::EmotionsDetection::getName() const +{ + return valid_model_->getModelCategory(); +} + +void openvino_wrapper_lib::EmotionsDetection::observeOutput(const std::shared_ptr& output) +{ + if (output != nullptr) { + output->accept(results_); + } +} + +const std::vector +openvino_wrapper_lib::EmotionsDetection::getFilteredROIs(const std::string filter_conditions) const +{ + if (!filter_conditions.empty()) { + slog::err << "Emotion detection does not support filtering now! " + << "Filter conditions: " << filter_conditions << slog::endl; + } + std::vector filtered_rois; + for (auto res : results_) { + filtered_rois.push_back(res.getLocation()); + } + return filtered_rois; +} diff --git a/dynamic_vino_lib/src/inferences/face_detection.cpp b/openvino_wrapper_lib/src/inferences/face_detection.cpp similarity index 61% rename from dynamic_vino_lib/src/inferences/face_detection.cpp rename to openvino_wrapper_lib/src/inferences/face_detection.cpp index 0e6bb1d7..cab124dc 100644 --- a/dynamic_vino_lib/src/inferences/face_detection.cpp +++ b/openvino_wrapper_lib/src/inferences/face_detection.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -22,20 +22,18 @@ #include #include -#include "dynamic_vino_lib/inferences/face_detection.hpp" -#include "dynamic_vino_lib/outputs/base_output.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/inferences/face_detection.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" +#include "openvino_wrapper_lib/slog.hpp" // FaceDetectionResult -dynamic_vino_lib::FaceDetectionResult::FaceDetectionResult(const cv::Rect & location) -: ObjectDetectionResult(location) +openvino_wrapper_lib::FaceDetectionResult::FaceDetectionResult(const cv::Rect& location) + : ObjectDetectionResult(location) { } // FaceDetection -dynamic_vino_lib::FaceDetection::FaceDetection( - bool enable_roi_constraint, - double show_output_thresh) -: ObjectDetection(enable_roi_constraint, show_output_thresh) +openvino_wrapper_lib::FaceDetection::FaceDetection(bool enable_roi_constraint, double show_output_thresh) + : ObjectDetection(enable_roi_constraint, show_output_thresh) { } diff --git a/openvino_wrapper_lib/src/inferences/face_reidentification.cpp b/openvino_wrapper_lib/src/inferences/face_reidentification.cpp new file mode 100644 index 00000000..c308d3d9 --- /dev/null +++ b/openvino_wrapper_lib/src/inferences/face_reidentification.cpp @@ -0,0 +1,125 @@ +// Copyright (c) 2018-2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a header file with declaration of FaceReidentification class and + * FaceReidentificationResult class + * @file face_reidentification.cpp + */ +#include +#include +#include +#include "openvino_wrapper_lib/inferences/face_reidentification.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" +#include "openvino_wrapper_lib/slog.hpp" + +// FaceReidentificationResult +openvino_wrapper_lib::FaceReidentificationResult::FaceReidentificationResult(const cv::Rect& location) + : Result(location) +{ +} + +// FaceReidentification +openvino_wrapper_lib::FaceReidentification::FaceReidentification(double match_thresh) + : openvino_wrapper_lib::BaseInference() +{ + face_tracker_ = std::make_shared(1000, match_thresh, 0.3); +} + +openvino_wrapper_lib::FaceReidentification::~FaceReidentification() = default; +void openvino_wrapper_lib::FaceReidentification::loadNetwork( + const std::shared_ptr network) +{ + valid_model_ = network; + setMaxBatchSize(network->getMaxBatchSize()); +} + +bool openvino_wrapper_lib::FaceReidentification::enqueue(const cv::Mat& frame, const cv::Rect& input_frame_loc) +{ + if (getEnqueuedNum() == 0) { + results_.clear(); + } + if (!openvino_wrapper_lib::BaseInference::enqueue(frame, input_frame_loc, 1, 0, + valid_model_->getInputName())) { + return false; + } + Result r(input_frame_loc); + results_.emplace_back(r); + return true; +} + +bool openvino_wrapper_lib::FaceReidentification::submitRequest() +{ + return openvino_wrapper_lib::BaseInference::submitRequest(); +} + +bool openvino_wrapper_lib::FaceReidentification::fetchResults() +{ + bool can_fetch = openvino_wrapper_lib::BaseInference::fetchResults(); + if (!can_fetch) { + return false; + } + bool found_result = false; + InferenceEngine::InferRequest::Ptr request = getEngine()->getRequest(); + std::string output = valid_model_->getOutputName(); + const float* output_values = request->GetBlob(output)->buffer().as(); + int result_length = request->GetBlob(output)->getTensorDesc().getDims()[1]; + for (int i = 0; i < getResultsLength(); i++) { + std::vector new_face = + std::vector(output_values + result_length * i, output_values + result_length * (i + 1)); + std::string face_id = "No." + std::to_string(face_tracker_->processNewTrack(new_face)); + results_[i].face_id_ = face_id; + found_result = true; + } + if (!found_result) { + results_.clear(); + } + return true; +} + +int openvino_wrapper_lib::FaceReidentification::getResultsLength() const +{ + return static_cast(results_.size()); +} + +const openvino_wrapper_lib::Result* openvino_wrapper_lib::FaceReidentification::getLocationResult(int idx) const +{ + return &(results_[idx]); +} + +const std::string openvino_wrapper_lib::FaceReidentification::getName() const +{ + return valid_model_->getModelCategory(); +} + +void openvino_wrapper_lib::FaceReidentification::observeOutput(const std::shared_ptr& output) +{ + if (output != nullptr) { + output->accept(results_); + } +} + +const std::vector +openvino_wrapper_lib::FaceReidentification::getFilteredROIs(const std::string filter_conditions) const +{ + if (!filter_conditions.empty()) { + slog::err << "Face reidentification does not support filtering now! " + << "Filter conditions: " << filter_conditions << slog::endl; + } + std::vector filtered_rois; + for (auto res : results_) { + filtered_rois.push_back(res.getLocation()); + } + return filtered_rois; +} diff --git a/openvino_wrapper_lib/src/inferences/head_pose_detection.cpp b/openvino_wrapper_lib/src/inferences/head_pose_detection.cpp new file mode 100644 index 00000000..4ccde0c9 --- /dev/null +++ b/openvino_wrapper_lib/src/inferences/head_pose_detection.cpp @@ -0,0 +1,118 @@ +// Copyright (c) 2018-2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a header file with declaration of HeadPoseDetection class and + * HeadPoseResult class + * @file head_pose_recognition.cpp + */ + +#include +#include +#include +#include "openvino_wrapper_lib/inferences/head_pose_detection.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" + +// HeadPoseResult +openvino_wrapper_lib::HeadPoseResult::HeadPoseResult(const cv::Rect& location) : Result(location) +{ +} + +// Head Pose Detection +openvino_wrapper_lib::HeadPoseDetection::HeadPoseDetection() : openvino_wrapper_lib::BaseInference() +{ +} + +openvino_wrapper_lib::HeadPoseDetection::~HeadPoseDetection() = default; + +void openvino_wrapper_lib::HeadPoseDetection::loadNetwork(std::shared_ptr network) +{ + valid_model_ = network; + setMaxBatchSize(network->getMaxBatchSize()); +} + +bool openvino_wrapper_lib::HeadPoseDetection::enqueue(const cv::Mat& frame, const cv::Rect& input_frame_loc) +{ + if (getEnqueuedNum() == 0) { + results_.clear(); + } + bool succeed = openvino_wrapper_lib::BaseInference::enqueue(frame, input_frame_loc, 1, getResultsLength(), + valid_model_->getInputName()); + if (!succeed) { + return false; + } + Result r(input_frame_loc); + results_.emplace_back(r); + return true; +} + +bool openvino_wrapper_lib::HeadPoseDetection::submitRequest() +{ + return openvino_wrapper_lib::BaseInference::submitRequest(); +} + +bool openvino_wrapper_lib::HeadPoseDetection::fetchResults() +{ + bool can_fetch = openvino_wrapper_lib::BaseInference::fetchResults(); + if (!can_fetch) { + return false; + } + auto request = getEngine()->getRequest(); + ov::Tensor angle_r = request.get_tensor(valid_model_->getOutputOutputAngleR()); + ov::Tensor angle_p = request.get_tensor(valid_model_->getOutputOutputAngleP()); + ov::Tensor angle_y = request.get_tensor(valid_model_->getOutputOutputAngleY()); + + for (int i = 0; i < getResultsLength(); ++i) { + results_[i].angle_r_ = angle_r.data()[i]; + results_[i].angle_p_ = angle_p.data()[i]; + results_[i].angle_y_ = angle_y.data()[i]; + } + return true; +} + +int openvino_wrapper_lib::HeadPoseDetection::getResultsLength() const +{ + return static_cast(results_.size()); +} + +const openvino_wrapper_lib::Result* openvino_wrapper_lib::HeadPoseDetection::getLocationResult(int idx) const +{ + return &(results_[idx]); +} + +const std::string openvino_wrapper_lib::HeadPoseDetection::getName() const +{ + return valid_model_->getModelCategory(); +} + +void openvino_wrapper_lib::HeadPoseDetection::observeOutput(const std::shared_ptr& output) +{ + if (output != nullptr) { + output->accept(results_); + } +} + +const std::vector +openvino_wrapper_lib::HeadPoseDetection::getFilteredROIs(const std::string filter_conditions) const +{ + if (!filter_conditions.empty()) { + slog::err << "Headpose detection does not support filtering now! " + << "Filter conditions: " << filter_conditions << slog::endl; + } + std::vector filtered_rois; + for (auto res : results_) { + filtered_rois.push_back(res.getLocation()); + } + return filtered_rois; +} diff --git a/dynamic_vino_lib/src/inferences/landmarks_detection.cpp b/openvino_wrapper_lib/src/inferences/landmarks_detection.cpp similarity index 51% rename from dynamic_vino_lib/src/inferences/landmarks_detection.cpp rename to openvino_wrapper_lib/src/inferences/landmarks_detection.cpp index 5ab122f9..ff1e4c12 100644 --- a/dynamic_vino_lib/src/inferences/landmarks_detection.cpp +++ b/openvino_wrapper_lib/src/inferences/landmarks_detection.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -20,36 +20,35 @@ #include #include #include -#include "dynamic_vino_lib/inferences/landmarks_detection.hpp" -#include "dynamic_vino_lib/outputs/base_output.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/inferences/landmarks_detection.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" +#include "openvino_wrapper_lib/slog.hpp" // LandmarksDetectionResult -dynamic_vino_lib::LandmarksDetectionResult::LandmarksDetectionResult( - const cv::Rect & location) -: Result(location) {} +openvino_wrapper_lib::LandmarksDetectionResult::LandmarksDetectionResult(const cv::Rect& location) : Result(location) +{ +} // LandmarksDetection -dynamic_vino_lib::LandmarksDetection::LandmarksDetection() -: dynamic_vino_lib::BaseInference() {} +openvino_wrapper_lib::LandmarksDetection::LandmarksDetection() : openvino_wrapper_lib::BaseInference() +{ +} -dynamic_vino_lib::LandmarksDetection::~LandmarksDetection() = default; -void dynamic_vino_lib::LandmarksDetection::loadNetwork( - const std::shared_ptr network) +openvino_wrapper_lib::LandmarksDetection::~LandmarksDetection() = default; +void openvino_wrapper_lib::LandmarksDetection::loadNetwork( + const std::shared_ptr network) { valid_model_ = network; setMaxBatchSize(network->getMaxBatchSize()); } -bool dynamic_vino_lib::LandmarksDetection::enqueue( - const cv::Mat & frame, const cv::Rect & input_frame_loc) +bool openvino_wrapper_lib::LandmarksDetection::enqueue(const cv::Mat& frame, const cv::Rect& input_frame_loc) { if (getEnqueuedNum() == 0) { results_.clear(); } - if (!dynamic_vino_lib::BaseInference::enqueue( - frame, input_frame_loc, 1, 0, valid_model_->getInputName())) - { + if (!openvino_wrapper_lib::BaseInference::enqueue(frame, input_frame_loc, 1, 0, + valid_model_->getInputName())) { return false; } Result r(input_frame_loc); @@ -57,23 +56,25 @@ bool dynamic_vino_lib::LandmarksDetection::enqueue( return true; } -bool dynamic_vino_lib::LandmarksDetection::submitRequest() +bool openvino_wrapper_lib::LandmarksDetection::submitRequest() { - return dynamic_vino_lib::BaseInference::submitRequest(); + return openvino_wrapper_lib::BaseInference::submitRequest(); } -bool dynamic_vino_lib::LandmarksDetection::fetchResults() +bool openvino_wrapper_lib::LandmarksDetection::fetchResults() { - bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); - if (!can_fetch) {return false;} + bool can_fetch = openvino_wrapper_lib::BaseInference::fetchResults(); + if (!can_fetch) { + return false; + } bool found_result = false; InferenceEngine::InferRequest::Ptr request = getEngine()->getRequest(); std::string output = valid_model_->getOutputName(); - const float * output_values = request->GetBlob(output)->buffer().as(); + const float* output_values = request->GetBlob(output)->buffer().as(); int result_length = request->GetBlob(output)->getTensorDesc().getDims()[1]; for (int i = 0; i < getResultsLength(); i++) { - std::vector coordinates = std::vector( - output_values + result_length * i, output_values + result_length * (i + 1)); + std::vector coordinates = + std::vector(output_values + result_length * i, output_values + result_length * (i + 1)); for (int j = 0; j < result_length; j += 2) { cv::Rect rect = results_[i].getLocation(); int col = static_cast(coordinates[j] * rect.width); @@ -83,40 +84,40 @@ bool dynamic_vino_lib::LandmarksDetection::fetchResults() } found_result = true; } - if (!found_result) {results_.clear();} + if (!found_result) { + results_.clear(); + } return true; } -int dynamic_vino_lib::LandmarksDetection::getResultsLength() const +int openvino_wrapper_lib::LandmarksDetection::getResultsLength() const { return static_cast(results_.size()); } -const dynamic_vino_lib::Result * -dynamic_vino_lib::LandmarksDetection::getLocationResult(int idx) const +const openvino_wrapper_lib::Result* openvino_wrapper_lib::LandmarksDetection::getLocationResult(int idx) const { return &(results_[idx]); } -const std::string dynamic_vino_lib::LandmarksDetection::getName() const +const std::string openvino_wrapper_lib::LandmarksDetection::getName() const { return valid_model_->getModelCategory(); } -void dynamic_vino_lib::LandmarksDetection::observeOutput( - const std::shared_ptr & output) +void openvino_wrapper_lib::LandmarksDetection::observeOutput(const std::shared_ptr& output) { if (output != nullptr) { output->accept(results_); } } -const std::vector dynamic_vino_lib::LandmarksDetection::getFilteredROIs( - const std::string filter_conditions) const +const std::vector +openvino_wrapper_lib::LandmarksDetection::getFilteredROIs(const std::string filter_conditions) const { if (!filter_conditions.empty()) { - slog::err << "Landmarks detection does not support filtering now! " << - "Filter conditions: " << filter_conditions << slog::endl; + slog::err << "Landmarks detection does not support filtering now! " + << "Filter conditions: " << filter_conditions << slog::endl; } std::vector filtered_rois; for (auto res : results_) { diff --git a/openvino_wrapper_lib/src/inferences/license_plate_detection.cpp b/openvino_wrapper_lib/src/inferences/license_plate_detection.cpp new file mode 100644 index 00000000..b4b2f04e --- /dev/null +++ b/openvino_wrapper_lib/src/inferences/license_plate_detection.cpp @@ -0,0 +1,138 @@ +// Copyright (c) 2018-2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a realization file with declaration of LicensePlateDetection class and + * LicensePlateDetectionResult class + * @file license_plate_detection.cpp + */ +#include +#include +#include +#include "openvino_wrapper_lib/inferences/license_plate_detection.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" +#include "openvino_wrapper_lib/slog.hpp" + +// LicensePlateDetectionResult +openvino_wrapper_lib::LicensePlateDetectionResult::LicensePlateDetectionResult(const cv::Rect& location) + : Result(location) +{ +} + +// LicensePlateDetection +openvino_wrapper_lib::LicensePlateDetection::LicensePlateDetection() : openvino_wrapper_lib::BaseInference() +{ +} + +openvino_wrapper_lib::LicensePlateDetection::~LicensePlateDetection() = default; +void openvino_wrapper_lib::LicensePlateDetection::loadNetwork( + const std::shared_ptr network) +{ + valid_model_ = network; + setMaxBatchSize(network->getMaxBatchSize()); +} + +void openvino_wrapper_lib::LicensePlateDetection::fillSeqBlob() +{ + ov::Tensor seq_tensor = getEngine()->getRequest().get_tensor(valid_model_->getSeqInputName()); + int max_sequence_size = seq_tensor.get_shape()[0]; + // second input is sequence, which is some relic from the training + // it should have the leading 0.0f and rest 1.0f + float* tensor_data = seq_tensor.data(); + std::fill(tensor_data, tensor_data + max_sequence_size, 1.0f); +} + +bool openvino_wrapper_lib::LicensePlateDetection::enqueue(const cv::Mat& frame, const cv::Rect& input_frame_loc) +{ + if (getEnqueuedNum() == 0) { + results_.clear(); + } + if (!openvino_wrapper_lib::BaseInference::enqueue(frame, input_frame_loc, 1, 0, + valid_model_->getInputName())) { + return false; + } + fillSeqBlob(); + Result r(input_frame_loc); + results_.emplace_back(r); + return true; +} + +bool openvino_wrapper_lib::LicensePlateDetection::submitRequest() +{ + return openvino_wrapper_lib::BaseInference::submitRequest(); +} + +bool openvino_wrapper_lib::LicensePlateDetection::fetchResults() +{ + bool can_fetch = openvino_wrapper_lib::BaseInference::fetchResults(); + if (!can_fetch) { + return false; + } + bool found_result = false; + ov::InferRequest request = getEngine()->getRequest(); + std::string output = valid_model_->getOutputName(); + const float* output_values = request.get_tensor(output).data(); + for (int i = 0; i < getResultsLength(); i++) { + std::string license = ""; + int max_size = valid_model_->getMaxSequenceSize(); + for (int j = 0; j < max_size; j++) { + if (output_values[i * max_size + j] == -1) { + break; + } + license += licenses_[output_values[i * max_size + j]]; + } + results_[i].license_ = license; + found_result = true; + } + if (!found_result) { + results_.clear(); + } + return true; +} + +int openvino_wrapper_lib::LicensePlateDetection::getResultsLength() const +{ + return static_cast(results_.size()); +} + +const openvino_wrapper_lib::Result* openvino_wrapper_lib::LicensePlateDetection::getLocationResult(int idx) const +{ + return &(results_[idx]); +} + +const std::string openvino_wrapper_lib::LicensePlateDetection::getName() const +{ + return valid_model_->getModelCategory(); +} + +void openvino_wrapper_lib::LicensePlateDetection::observeOutput(const std::shared_ptr& output) +{ + if (output != nullptr) { + output->accept(results_); + } +} + +const std::vector +openvino_wrapper_lib::LicensePlateDetection::getFilteredROIs(const std::string filter_conditions) const +{ + if (!filter_conditions.empty()) { + slog::err << "License plate detection does not support filtering now! " + << "Filter conditions: " << filter_conditions << slog::endl; + } + std::vector filtered_rois; + for (auto res : results_) { + filtered_rois.push_back(res.getLocation()); + } + return filtered_rois; +} diff --git a/dynamic_vino_lib/src/inferences/object_detection.cpp b/openvino_wrapper_lib/src/inferences/object_detection.cpp similarity index 50% rename from dynamic_vino_lib/src/inferences/object_detection.cpp rename to openvino_wrapper_lib/src/inferences/object_detection.cpp index 36f30f89..8a79c521 100644 --- a/dynamic_vino_lib/src/inferences/object_detection.cpp +++ b/openvino_wrapper_lib/src/inferences/object_detection.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -22,47 +22,42 @@ #include #include #include -#include "dynamic_vino_lib/inferences/object_detection.hpp" -#include "dynamic_vino_lib/outputs/base_output.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/inferences/object_detection.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" +#include "openvino_wrapper_lib/slog.hpp" // ObjectDetectionResult -dynamic_vino_lib::ObjectDetectionResult::ObjectDetectionResult(const cv::Rect & location) -: Result(location) +openvino_wrapper_lib::ObjectDetectionResult::ObjectDetectionResult(const cv::Rect& location) : Result(location) { } // ObjectDetection -dynamic_vino_lib::ObjectDetection::ObjectDetection( - bool enable_roi_constraint, - double show_output_thresh) -: show_output_thresh_(show_output_thresh), - enable_roi_constraint_(enable_roi_constraint), dynamic_vino_lib::BaseInference() +openvino_wrapper_lib::ObjectDetection::ObjectDetection(bool enable_roi_constraint, double show_output_thresh) + : show_output_thresh_(show_output_thresh) + , enable_roi_constraint_(enable_roi_constraint) + , openvino_wrapper_lib::BaseInference() { result_filter_ = std::make_shared(); result_filter_->init(); } -dynamic_vino_lib::ObjectDetection::~ObjectDetection() = default; +openvino_wrapper_lib::ObjectDetection::~ObjectDetection() = default; -void dynamic_vino_lib::ObjectDetection::loadNetwork( - std::shared_ptr network) +void openvino_wrapper_lib::ObjectDetection::loadNetwork(std::shared_ptr network) { valid_model_ = network; setMaxBatchSize(network->getMaxBatchSize()); } -bool dynamic_vino_lib::ObjectDetection::enqueue( - const cv::Mat & frame, - const cv::Rect & input_frame_loc) +bool openvino_wrapper_lib::ObjectDetection::enqueue(const cv::Mat& frame, const cv::Rect& input_frame_loc) { if (valid_model_ == nullptr || getEngine() == nullptr) { return false; } if (enqueued_frames_ >= valid_model_->getMaxBatchSize()) { - slog::warn << "Number of " << getName() << "input more than maximum(" << - max_batch_size_ << ") processed by inference" << slog::endl; + slog::warn << "Number of " << getName() << "input more than maximum(" << max_batch_size_ + << ") processed by inference" << slog::endl; return false; } @@ -70,53 +65,48 @@ bool dynamic_vino_lib::ObjectDetection::enqueue( return false; } - // nonsense!! - // Result r(input_frame_loc); - // results_.clear(); - // results_.emplace_back(r); enqueued_frames_ += 1; return true; } -bool dynamic_vino_lib::ObjectDetection::fetchResults() +bool openvino_wrapper_lib::ObjectDetection::fetchResults() { - bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); + bool can_fetch = openvino_wrapper_lib::BaseInference::fetchResults(); if (!can_fetch) { return false; } results_.clear(); - return (valid_model_ != nullptr) && valid_model_->fetchResults( - getEngine(), results_, show_output_thresh_, enable_roi_constraint_); + return (valid_model_ != nullptr) && + valid_model_->fetchResults(getEngine(), results_, show_output_thresh_, enable_roi_constraint_); } -int dynamic_vino_lib::ObjectDetection::getResultsLength() const +int openvino_wrapper_lib::ObjectDetection::getResultsLength() const { return static_cast(results_.size()); } -const dynamic_vino_lib::ObjectDetection::Result * -dynamic_vino_lib::ObjectDetection::getLocationResult(int idx) const +const openvino_wrapper_lib::ObjectDetection::Result* +openvino_wrapper_lib::ObjectDetection::getLocationResult(int idx) const { return &(results_[idx]); } -const std::string dynamic_vino_lib::ObjectDetection::getName() const +const std::string openvino_wrapper_lib::ObjectDetection::getName() const { return valid_model_->getModelCategory(); } -void dynamic_vino_lib::ObjectDetection::observeOutput( - const std::shared_ptr & output) +void openvino_wrapper_lib::ObjectDetection::observeOutput(const std::shared_ptr& output) { if (output != nullptr) { output->accept(results_); } } -const std::vector dynamic_vino_lib::ObjectDetection::getFilteredROIs( - const std::string filter_conditions) const +const std::vector +openvino_wrapper_lib::ObjectDetection::getFilteredROIs(const std::string filter_conditions) const { if (!result_filter_->isValidFilterConditions(filter_conditions)) { std::vector filtered_rois; @@ -130,24 +120,23 @@ const std::vector dynamic_vino_lib::ObjectDetection::getFilteredROIs( return result_filter_->getFilteredLocations(); } - // ObjectDetectionResultFilter -dynamic_vino_lib::ObjectDetectionResultFilter::ObjectDetectionResultFilter() {} +openvino_wrapper_lib::ObjectDetectionResultFilter::ObjectDetectionResultFilter() +{ +} -void dynamic_vino_lib::ObjectDetectionResultFilter::init() +void openvino_wrapper_lib::ObjectDetectionResultFilter::init() { key_to_function_.insert(std::make_pair("label", isValidLabel)); key_to_function_.insert(std::make_pair("confidence", isValidConfidence)); } -void dynamic_vino_lib::ObjectDetectionResultFilter::acceptResults( - const std::vector & results) +void openvino_wrapper_lib::ObjectDetectionResultFilter::acceptResults(const std::vector& results) { results_ = results; } -std::vector -dynamic_vino_lib::ObjectDetectionResultFilter::getFilteredLocations() +std::vector openvino_wrapper_lib::ObjectDetectionResultFilter::getFilteredLocations() { std::vector locations; for (auto result : results_) { @@ -158,27 +147,24 @@ dynamic_vino_lib::ObjectDetectionResultFilter::getFilteredLocations() return locations; } -bool dynamic_vino_lib::ObjectDetectionResultFilter::isValidLabel( - const Result & result, const std::string & op, const std::string & target) +bool openvino_wrapper_lib::ObjectDetectionResultFilter::isValidLabel(const Result& result, const std::string& op, + const std::string& target) { return stringCompare(result.getLabel(), op, target); } -bool dynamic_vino_lib::ObjectDetectionResultFilter::isValidConfidence( - const Result & result, const std::string & op, const std::string & target) +bool openvino_wrapper_lib::ObjectDetectionResultFilter::isValidConfidence(const Result& result, const std::string& op, + const std::string& target) { return floatCompare(result.getConfidence(), op, stringToFloat(target)); } -bool dynamic_vino_lib::ObjectDetectionResultFilter::isValidResult( - const Result & result) +bool openvino_wrapper_lib::ObjectDetectionResultFilter::isValidResult(const Result& result) { ISVALIDRESULT(key_to_function_, result); } -double dynamic_vino_lib::ObjectDetection::calcIoU( - const cv::Rect & box_1, - const cv::Rect & box_2) +double openvino_wrapper_lib::ObjectDetection::calcIoU(const cv::Rect& box_1, const cv::Rect& box_2) { cv::Rect i = box_1 & box_2; cv::Rect u = box_1 | box_2; diff --git a/openvino_wrapper_lib/src/inferences/object_segmentation.cpp b/openvino_wrapper_lib/src/inferences/object_segmentation.cpp new file mode 100644 index 00000000..fa035a6c --- /dev/null +++ b/openvino_wrapper_lib/src/inferences/object_segmentation.cpp @@ -0,0 +1,217 @@ +// Copyright (c) 2018-2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a header file with declaration of ObjectSegmentation class and + * ObjectSegmentationResult class + * @file object_segmentation.cpp + */ +#include +#include +#include +#include +#include + +#include +#include "openvino_wrapper_lib/inferences/object_segmentation.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" +#include "openvino_wrapper_lib/slog.hpp" + +// ObjectSegmentationResult +openvino_wrapper_lib::ObjectSegmentationResult::ObjectSegmentationResult(const cv::Rect& location) : Result(location) +{ +} + +// ObjectSegmentation +openvino_wrapper_lib::ObjectSegmentation::ObjectSegmentation(double show_output_thresh) + : show_output_thresh_(show_output_thresh), openvino_wrapper_lib::BaseInference() +{ +} + +openvino_wrapper_lib::ObjectSegmentation::~ObjectSegmentation() = default; + +void openvino_wrapper_lib::ObjectSegmentation::loadNetwork( + const std::shared_ptr network) +{ + slog::info << "Loading Network: " << network->getModelCategory() << slog::endl; + valid_model_ = network; + setMaxBatchSize(network->getMaxBatchSize()); +} + +/** + * Deprecated! + * This function only support OpenVINO version <=2018R5 + */ +bool openvino_wrapper_lib::ObjectSegmentation::enqueue_for_one_input(const cv::Mat& frame, + const cv::Rect& input_frame_loc) +{ + if (width_ == 0 && height_ == 0) { + width_ = frame.cols; + height_ = frame.rows; + } + if (!openvino_wrapper_lib::BaseInference::enqueue(frame, input_frame_loc, 1, 0, + valid_model_->getInputName())) { + return false; + } + Result r(input_frame_loc); + results_.clear(); + results_.emplace_back(r); + return true; +} + +bool openvino_wrapper_lib::ObjectSegmentation::enqueue(const cv::Mat& frame, const cv::Rect& input_frame_loc) +{ + if (width_ == 0 && height_ == 0) { + width_ = frame.cols; + height_ = frame.rows; + } + + if (valid_model_ == nullptr || getEngine() == nullptr) { + throw std::logic_error("Model or Engine is not set correctly!"); + return false; + } + + if (enqueued_frames_ >= valid_model_->getMaxBatchSize()) { + slog::warn << "Number of " << getName() << "input more than maximum(" << max_batch_size_ + << ") processed by inference" << slog::endl; + return false; + } + + if (!valid_model_->enqueue(getEngine(), frame, input_frame_loc)) { + return false; + } + + enqueued_frames_ += 1; + return true; +} + +bool openvino_wrapper_lib::ObjectSegmentation::submitRequest() +{ + return openvino_wrapper_lib::BaseInference::submitRequest(); +} + +bool openvino_wrapper_lib::ObjectSegmentation::fetchResults() +{ + bool can_fetch = openvino_wrapper_lib::BaseInference::fetchResults(); + if (!can_fetch) { + return false; + } + bool found_result = false; + results_.clear(); + ov::InferRequest infer_request = getEngine()->getRequest(); + slog::debug << "Analyzing Detection results..." << slog::endl; + std::string detection_output = valid_model_->getOutputName("detection"); + + ov::Tensor output_tensor = infer_request.get_tensor(detection_output); + const auto out_data = output_tensor.data(); + ov::Shape out_shape = output_tensor.get_shape(); + size_t output_w, output_h, output_des, output_extra = 0; + if (out_shape.size() == 3) { + output_w = out_shape[2]; + output_h = out_shape[1]; + output_des = out_shape[0]; + } else if (out_shape.size() == 4) { + output_w = out_shape[3]; + output_h = out_shape[2]; + output_des = out_shape[1]; + output_extra = out_shape[0]; + } else { + slog::warn << "unexpected output shape: " << out_shape << slog::endl; + return false; + } + + slog::debug << "output w " << output_w << slog::endl; + slog::debug << "output h " << output_h << slog::endl; + slog::debug << "output description " << output_des << slog::endl; + slog::debug << "output extra " << output_extra << slog::endl; + + const float* detections = output_tensor.data(); + std::vector& labels = valid_model_->getLabels(); + slog::debug << "label size " << labels.size() << slog::endl; + + cv::Mat inImg, resImg, maskImg(output_h, output_w, CV_8UC3); + cv::Mat colored_mask(output_h, output_w, CV_8UC3); + cv::Rect roi = cv::Rect(0, 0, output_w, output_h); + + for (int rowId = 0; rowId < output_h; ++rowId) { + for (int colId = 0; colId < output_w; ++colId) { + std::size_t classId = 0; + float maxProb = -1.0f; + if (output_des < 2) { // assume the output is already ArgMax'ed + classId = static_cast(detections[rowId * output_w + colId]); + for (int ch = 0; ch < colored_mask.channels(); ++ch) { + colored_mask.at(rowId, colId)[ch] = colors_[classId][ch]; + } + } else { + for (int chId = 0; chId < output_des; ++chId) { + float prob = detections[chId * output_h * output_w + rowId * output_w + colId]; + if (prob > maxProb) { + classId = chId; + maxProb = prob; + } + } + while (classId >= colors_.size()) { + static std::mt19937 rng(classId); + std::uniform_int_distribution distr(0, 255); + cv::Vec3b color(distr(rng), distr(rng), distr(rng)); + colors_.push_back(color); + } + if (maxProb > 0.5) { + for (int ch = 0; ch < colored_mask.channels(); ++ch) { + colored_mask.at(rowId, colId)[ch] = colors_[classId][ch]; + } + } + } + } + } + const float alpha = 0.7f; + Result result(roi); + result.mask_ = colored_mask; + found_result = true; + results_.emplace_back(result); + return true; +} + +int openvino_wrapper_lib::ObjectSegmentation::getResultsLength() const +{ + return static_cast(results_.size()); +} + +const openvino_wrapper_lib::Result* openvino_wrapper_lib::ObjectSegmentation::getLocationResult(int idx) const +{ + return &(results_[idx]); +} + +const std::string openvino_wrapper_lib::ObjectSegmentation::getName() const +{ + return valid_model_->getModelCategory(); +} + +void openvino_wrapper_lib::ObjectSegmentation::observeOutput(const std::shared_ptr& output) +{ + if (output != nullptr) { + output->accept(results_); + } +} + +const std::vector +openvino_wrapper_lib::ObjectSegmentation::getFilteredROIs(const std::string filter_conditions) const +{ + if (!filter_conditions.empty()) { + slog::err << "Object segmentation does not support filtering now! " + << "Filter conditions: " << filter_conditions << slog::endl; + } + std::vector filtered_rois; + return filtered_rois; +} diff --git a/openvino_wrapper_lib/src/inferences/object_segmentation_instance.cpp b/openvino_wrapper_lib/src/inferences/object_segmentation_instance.cpp new file mode 100644 index 00000000..c833ad98 --- /dev/null +++ b/openvino_wrapper_lib/src/inferences/object_segmentation_instance.cpp @@ -0,0 +1,121 @@ +// Copyright (c) 2023 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include +#include +#include +#include "openvino_wrapper_lib/inferences/object_segmentation_instance.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" +#include "openvino_wrapper_lib/slog.hpp" + +openvino_wrapper_lib::ObjectSegmentationInstanceResult::ObjectSegmentationInstanceResult(const cv::Rect& location) + : Result(location) +{ +} + +openvino_wrapper_lib::ObjectSegmentationInstance::ObjectSegmentationInstance(double show_output_thresh) + : show_output_thresh_(show_output_thresh), openvino_wrapper_lib::BaseInference() +{ +} + +openvino_wrapper_lib::ObjectSegmentationInstance::~ObjectSegmentationInstance() = default; + +void openvino_wrapper_lib::ObjectSegmentationInstance::loadNetwork( + const std::shared_ptr network) +{ + slog::info << "Loading Network: " << network->getModelCategory() << slog::endl; + valid_model_ = network; + setMaxBatchSize(network->getMaxBatchSize()); +} + +bool openvino_wrapper_lib::ObjectSegmentationInstance::enqueue(const cv::Mat& frame, const cv::Rect& input_frame_loc) +{ + if (width_ == 0 && height_ == 0) { + width_ = frame.cols; + height_ = frame.rows; + } + + if (valid_model_ == nullptr || getEngine() == nullptr) { + throw std::logic_error("Model or Engine is not set correctly!"); + return false; + } + + if (enqueued_frames_ >= valid_model_->getMaxBatchSize()) { + slog::warn << "Number of " << getName() << "input more than maximum(" << max_batch_size_ + << ") processed by inference" << slog::endl; + return false; + } + + if (!valid_model_->enqueue(getEngine(), frame, input_frame_loc)) { + return false; + } + + enqueued_frames_ += 1; + return true; +} + +bool openvino_wrapper_lib::ObjectSegmentationInstance::submitRequest() +{ + return openvino_wrapper_lib::BaseInference::submitRequest(); +} + +bool openvino_wrapper_lib::ObjectSegmentationInstance::fetchResults() +{ + bool can_fetch = openvino_wrapper_lib::BaseInference::fetchResults(); + if (!can_fetch) { + return false; + } + results_.clear(); + + return (valid_model_ != nullptr) && valid_model_->fetchResults(getEngine(), results_, show_output_thresh_); +} + +int openvino_wrapper_lib::ObjectSegmentationInstance::getResultsLength() const +{ + return static_cast(results_.size()); +} + +const openvino_wrapper_lib::Result* openvino_wrapper_lib::ObjectSegmentationInstance::getLocationResult(int idx) const +{ + return &(results_[idx]); +} + +const std::string openvino_wrapper_lib::ObjectSegmentationInstance::getName() const +{ + return valid_model_->getModelCategory(); +} + +void openvino_wrapper_lib::ObjectSegmentationInstance::observeOutput(const std::shared_ptr& output) +{ + if (output != nullptr) { + output->accept(results_); + } +} + +const std::vector +openvino_wrapper_lib::ObjectSegmentationInstance::getFilteredROIs(const std::string filter_conditions) const +{ + if (!filter_conditions.empty()) { + slog::err << "Object segmentation does not support filtering now! " + << "Filter conditions: " << filter_conditions << slog::endl; + } + std::vector filtered_rois; + for (auto res : results_) { + filtered_rois.push_back(res.getLocation()); + } + return filtered_rois; +} diff --git a/openvino_wrapper_lib/src/inferences/object_segmentation_maskrcnn.cpp b/openvino_wrapper_lib/src/inferences/object_segmentation_maskrcnn.cpp new file mode 100644 index 00000000..7e1dc28e --- /dev/null +++ b/openvino_wrapper_lib/src/inferences/object_segmentation_maskrcnn.cpp @@ -0,0 +1,222 @@ +// Copyright (c) 2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a header file with declaration of ObjectSegmentation class and + * ObjectSegmentationResult class + * @file object_segmentation.cpp + */ +#include +#include +#include +#include +#include + +#include +#include "openvino_wrapper_lib/inferences/object_segmentation_maskrcnn.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" +#include "openvino_wrapper_lib/slog.hpp" + +// ObjectSegmentationResult +openvino_wrapper_lib::ObjectSegmentationMaskrcnnResult::ObjectSegmentationMaskrcnnResult(const cv::Rect& location) + : Result(location) +{ +} + +// ObjectSegmentation +openvino_wrapper_lib::ObjectSegmentationMaskrcnn::ObjectSegmentationMaskrcnn(double show_output_thresh) + : show_output_thresh_(show_output_thresh), openvino_wrapper_lib::BaseInference() +{ +} + +openvino_wrapper_lib::ObjectSegmentationMaskrcnn::~ObjectSegmentationMaskrcnn() = default; + +void openvino_wrapper_lib::ObjectSegmentationMaskrcnn::loadNetwork( + const std::shared_ptr network) +{ + slog::info << "Loading Network: " << network->getModelCategory() << slog::endl; + valid_model_ = network; + setMaxBatchSize(network->getMaxBatchSize()); +} + +/** + * Deprecated! + * This function only support OpenVINO version <=2018R5 + */ +bool openvino_wrapper_lib::ObjectSegmentationMaskrcnn::enqueue_for_one_input(const cv::Mat& frame, + const cv::Rect& input_frame_loc) +{ + if (width_ == 0 && height_ == 0) { + width_ = frame.cols; + height_ = frame.rows; + } + if (!openvino_wrapper_lib::BaseInference::enqueue(frame, input_frame_loc, 1, 0, + valid_model_->getInputName())) { + return false; + } + Result r(input_frame_loc); + results_.clear(); + results_.emplace_back(r); + return true; +} + +bool openvino_wrapper_lib::ObjectSegmentationMaskrcnn::enqueue(const cv::Mat& frame, const cv::Rect& input_frame_loc) +{ + if (width_ == 0 && height_ == 0) { + width_ = frame.cols; + height_ = frame.rows; + } + + if (valid_model_ == nullptr || getEngine() == nullptr) { + throw std::logic_error("Model or Engine is not set correctly!"); + return false; + } + + if (enqueued_frames_ >= valid_model_->getMaxBatchSize()) { + slog::warn << "Number of " << getName() << "input more than maximum(" << max_batch_size_ + << ") processed by inference" << slog::endl; + return false; + } + + if (!valid_model_->enqueue(getEngine(), frame, input_frame_loc)) { + return false; + } + + enqueued_frames_ += 1; + return true; +} + +bool openvino_wrapper_lib::ObjectSegmentationMaskrcnn::submitRequest() +{ + return openvino_wrapper_lib::BaseInference::submitRequest(); +} + +bool openvino_wrapper_lib::ObjectSegmentationMaskrcnn::fetchResults() +{ + bool can_fetch = openvino_wrapper_lib::BaseInference::fetchResults(); + if (!can_fetch) { + return false; + } + bool found_result = false; + results_.clear(); + ov::InferRequest infer_request = getEngine()->getRequest(); + slog::debug << "Analyzing Detection results..." << slog::endl; + std::string detection_output = valid_model_->getOutputName("detection"); + std::string mask_output = valid_model_->getOutputName("masks"); + slog::debug << "Detection_output=" << detection_output << ", Mask_output=" << mask_output << slog::endl; + + // get detection data + ov::Tensor do_tensor = infer_request.get_tensor(detection_output.c_str()); + const auto do_data = do_tensor.data(); + ov::Shape do_shape = do_tensor.get_shape(); + slog::debug << "Detection Blob getDims = " << do_shape.size() << "[Should be 2]" << slog::endl; + // get mask data + ov::Tensor mask_tensor = infer_request.get_tensor(mask_output.c_str()); + const auto mask_data = mask_tensor.data(); + ov::Shape mask_shape = mask_tensor.get_shape(); + + // determine models + size_t box_description_size = do_shape.back(); + OPENVINO_ASSERT(mask_shape.size() == 4); + size_t box_num = mask_shape[0]; + size_t C = mask_shape[1]; + size_t H = mask_shape[2]; + size_t W = mask_shape[3]; + size_t box_stride = W * H * C; + slog::debug << "box_description is:" << box_description_size << slog::endl; + slog::debug << "box_num is:" << box_num << slog::endl; + slog::debug << "C is:" << C << slog::endl; + slog::debug << "H is:" << H << slog::endl; + slog::debug << "W is:" << W << slog::endl; + + for (size_t box = 0; box < box_num; ++box) { + // box description: batch, label, prob, x1, y1, x2, y2 + float* box_info = do_data + box * box_description_size; + auto batch = static_cast(box_info[0]); + slog::debug << "batch =" << batch << slog::endl; + if (batch < 0) { + slog::warn << "Batch size should be greater than 0. [batch=" << batch << "]." << slog::endl; + break; + } + float prob = box_info[2]; + if (prob > show_output_thresh_) { + float x1 = std::min(std::max(0.0f, box_info[3] * width_), static_cast(width_)); + float y1 = std::min(std::max(0.0f, box_info[4] * height_), static_cast(height_)); + float x2 = std::min(std::max(0.0f, box_info[5] * width_), static_cast(width_)); + float y2 = std::min(std::max(0.0f, box_info[6] * height_), static_cast(height_)); + int box_width = static_cast(x2 - x1); + int box_height = static_cast(y2 - y1); + slog::debug << "Box[" << box_width << "x" << box_height << "]" << slog::endl; + if (box_width <= 0 || box_height <= 0) + break; + int class_id = static_cast(box_info[1] + 1e-6f); + float* mask_arr = mask_data + box_stride * box + H * W * (class_id - 1); + slog::info << "Detected class " << class_id << " with probability " << prob << " from batch " << batch << ": [" + << x1 << ", " << y1 << "], [" << x2 << ", " << y2 << "]" << slog::endl; + cv::Mat mask_mat(H, W, CV_32FC1, mask_arr); + cv::Rect roi = cv::Rect(static_cast(x1), static_cast(y1), box_width, box_height); + cv::Mat resized_mask_mat(box_height, box_width, CV_32FC1); + cv::resize(mask_mat, resized_mask_mat, cv::Size(box_width, box_height)); + Result result(roi); + result.confidence_ = prob; + std::vector& labels = valid_model_->getLabels(); + result.label_ = class_id < labels.size() ? labels[class_id] : std::string("label #") + std::to_string(class_id); + result.mask_ = resized_mask_mat; + found_result = true; + slog::debug << "adding one segmentation Box ..." << slog::endl; + results_.emplace_back(result); + } + } + if (!found_result) { + slog::debug << "No Segmentation Result Found!" << slog::endl; + results_.clear(); + } + return true; +} + +int openvino_wrapper_lib::ObjectSegmentationMaskrcnn::getResultsLength() const +{ + return static_cast(results_.size()); +} + +const openvino_wrapper_lib::Result* openvino_wrapper_lib::ObjectSegmentationMaskrcnn::getLocationResult(int idx) const +{ + return &(results_[idx]); +} + +const std::string openvino_wrapper_lib::ObjectSegmentationMaskrcnn::getName() const +{ + return valid_model_->getModelCategory(); +} + +void openvino_wrapper_lib::ObjectSegmentationMaskrcnn::observeOutput(const std::shared_ptr& output) +{ + if (output != nullptr) { + output->accept(results_); + } +} + +const std::vector +openvino_wrapper_lib::ObjectSegmentationMaskrcnn::getFilteredROIs(const std::string filter_conditions) const +{ + if (!filter_conditions.empty()) { + slog::err << "Object segmentation does not support filtering now! " + << "Filter conditions: " << filter_conditions << slog::endl; + } + std::vector filtered_rois; + for (auto res : results_) { + filtered_rois.push_back(res.getLocation()); + } + return filtered_rois; +} diff --git a/openvino_wrapper_lib/src/inferences/person_attribs_detection.cpp b/openvino_wrapper_lib/src/inferences/person_attribs_detection.cpp new file mode 100644 index 00000000..1a26952b --- /dev/null +++ b/openvino_wrapper_lib/src/inferences/person_attribs_detection.cpp @@ -0,0 +1,143 @@ +// Copyright (c) 2018-2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a header file with declaration of PersonAttribsDetection class and + * PersonAttribsDetectionResult class + * @file person_attribs_detection.cpp + */ +#include +#include +#include +#include +#include "openvino_wrapper_lib/inferences/person_attribs_detection.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" +#include "openvino_wrapper_lib/slog.hpp" + +// PersonAttribsDetectionResult +openvino_wrapper_lib::PersonAttribsDetectionResult::PersonAttribsDetectionResult(const cv::Rect& location) + : Result(location) +{ +} + +// PersonAttribsDetection +openvino_wrapper_lib::PersonAttribsDetection::PersonAttribsDetection(double attribs_confidence) + : attribs_confidence_(attribs_confidence), openvino_wrapper_lib::BaseInference() +{ +} + +openvino_wrapper_lib::PersonAttribsDetection::~PersonAttribsDetection() = default; +void openvino_wrapper_lib::PersonAttribsDetection::loadNetwork( + const std::shared_ptr network) +{ + valid_model_ = network; + setMaxBatchSize(network->getMaxBatchSize()); +} + +bool openvino_wrapper_lib::PersonAttribsDetection::enqueue(const cv::Mat& frame, const cv::Rect& input_frame_loc) +{ + if (getEnqueuedNum() == 0) { + results_.clear(); + } + if (!openvino_wrapper_lib::BaseInference::enqueue(frame, input_frame_loc, 1, 0, + valid_model_->getInputName())) { + return false; + } + Result r(input_frame_loc); + results_.emplace_back(r); + return true; +} + +bool openvino_wrapper_lib::PersonAttribsDetection::submitRequest() +{ + return openvino_wrapper_lib::BaseInference::submitRequest(); +} + +bool openvino_wrapper_lib::PersonAttribsDetection::fetchResults() +{ + bool can_fetch = openvino_wrapper_lib::BaseInference::fetchResults(); + if (!can_fetch) { + return false; + } + bool found_result = false; + ov::InferRequest request = getEngine()->getRequest(); + slog::debug << "Analyzing Attributes Detection results..." << slog::endl; + std::string attribute_output = valid_model_->getOutputName("attributes_output_"); + std::string top_output = valid_model_->getOutputName("top_output_"); + std::string bottom_output = valid_model_->getOutputName("bottom_output_"); + + ov::Tensor attrib_tensor = request.get_tensor(attribute_output); + ov::Tensor top_tensor = request.get_tensor(top_output); + ov::Tensor bottom_tensor = request.get_tensor(bottom_output); + + auto attri_values = attrib_tensor.data(); + auto top_values = top_tensor.data(); + auto bottom_values = bottom_tensor.data(); + + int net_attrib_length = net_attributes_.size(); + for (int i = 0; i < getResultsLength(); i++) { + results_[i].male_probability_ = attri_values[i * net_attrib_length]; + results_[i].top_point_.x = top_values[i]; + results_[i].top_point_.y = top_values[i + 1]; + results_[i].bottom_point_.x = bottom_values[i]; + results_[i].bottom_point_.y = bottom_values[i + 1]; + std::string attrib = ""; + for (int j = 1; j < net_attrib_length; j++) { + attrib += (attri_values[i * net_attrib_length + j] > attribs_confidence_) ? net_attributes_[j] + ", " : ""; + } + results_[i].attributes_ = attrib; + + found_result = true; + } + if (!found_result) { + results_.clear(); + } + return true; +} + +int openvino_wrapper_lib::PersonAttribsDetection::getResultsLength() const +{ + return static_cast(results_.size()); +} + +const openvino_wrapper_lib::Result* openvino_wrapper_lib::PersonAttribsDetection::getLocationResult(int idx) const +{ + return &(results_[idx]); +} + +const std::string openvino_wrapper_lib::PersonAttribsDetection::getName() const +{ + return valid_model_->getModelCategory(); +} + +void openvino_wrapper_lib::PersonAttribsDetection::observeOutput(const std::shared_ptr& output) +{ + if (output != nullptr) { + output->accept(results_); + } +} + +const std::vector +openvino_wrapper_lib::PersonAttribsDetection::getFilteredROIs(const std::string filter_conditions) const +{ + if (!filter_conditions.empty()) { + slog::err << "Person attributes detection does not support filtering now! " + << "Filter conditions: " << filter_conditions << slog::endl; + } + std::vector filtered_rois; + for (auto res : results_) { + filtered_rois.push_back(res.getLocation()); + } + return filtered_rois; +} diff --git a/openvino_wrapper_lib/src/inferences/person_reidentification.cpp b/openvino_wrapper_lib/src/inferences/person_reidentification.cpp new file mode 100644 index 00000000..2c9d1ccb --- /dev/null +++ b/openvino_wrapper_lib/src/inferences/person_reidentification.cpp @@ -0,0 +1,123 @@ +// Copyright (c) 2018-2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a header file with declaration of PersonReidentification class and + * PersonReidentificationResult class + * @file person_reidentification.cpp + */ +#include +#include +#include +#include "openvino_wrapper_lib/inferences/person_reidentification.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" +#include "openvino_wrapper_lib/slog.hpp" + +// PersonReidentificationResult +openvino_wrapper_lib::PersonReidentificationResult::PersonReidentificationResult(const cv::Rect& location) + : Result(location) +{ +} + +// PersonReidentification +openvino_wrapper_lib::PersonReidentification::PersonReidentification(double match_thresh) + : openvino_wrapper_lib::BaseInference() +{ + person_tracker_ = std::make_shared(1000, match_thresh, 0.3); +} + +openvino_wrapper_lib::PersonReidentification::~PersonReidentification() = default; +void openvino_wrapper_lib::PersonReidentification::loadNetwork( + const std::shared_ptr network) +{ + valid_model_ = network; + setMaxBatchSize(network->getMaxBatchSize()); +} + +bool openvino_wrapper_lib::PersonReidentification::enqueue(const cv::Mat& frame, const cv::Rect& input_frame_loc) +{ + if (getEnqueuedNum() == 0) { + results_.clear(); + } + if (!openvino_wrapper_lib::BaseInference::enqueue(frame, input_frame_loc, 1, 0, + valid_model_->getInputName())) { + return false; + } + Result r(input_frame_loc); + results_.emplace_back(r); + return true; +} + +bool openvino_wrapper_lib::PersonReidentification::submitRequest() +{ + return openvino_wrapper_lib::BaseInference::submitRequest(); +} + +bool openvino_wrapper_lib::PersonReidentification::fetchResults() +{ + bool can_fetch = openvino_wrapper_lib::BaseInference::fetchResults(); + if (!can_fetch) { + return false; + } + bool found_result = false; + ov::InferRequest request = getEngine()->getRequest(); + std::string output = valid_model_->getOutputName(); + const float* output_values = request.get_tensor(output).data(); + for (int i = 0; i < getResultsLength(); i++) { + std::vector new_person = std::vector(output_values + 256 * i, output_values + 256 * i + 256); + std::string person_id = "No." + std::to_string(person_tracker_->processNewTrack(new_person)); + results_[i].person_id_ = person_id; + found_result = true; + } + if (!found_result) { + results_.clear(); + } + return true; +} + +int openvino_wrapper_lib::PersonReidentification::getResultsLength() const +{ + return static_cast(results_.size()); +} + +const openvino_wrapper_lib::Result* openvino_wrapper_lib::PersonReidentification::getLocationResult(int idx) const +{ + return &(results_[idx]); +} + +const std::string openvino_wrapper_lib::PersonReidentification::getName() const +{ + return valid_model_->getModelCategory(); +} + +void openvino_wrapper_lib::PersonReidentification::observeOutput(const std::shared_ptr& output) +{ + if (output != nullptr) { + output->accept(results_); + } +} + +const std::vector +openvino_wrapper_lib::PersonReidentification::getFilteredROIs(const std::string filter_conditions) const +{ + if (!filter_conditions.empty()) { + slog::err << "Person reidentification does not support filtering now! " + << "Filter conditions: " << filter_conditions << slog::endl; + } + std::vector filtered_rois; + for (auto res : results_) { + filtered_rois.push_back(res.getLocation()); + } + return filtered_rois; +} diff --git a/openvino_wrapper_lib/src/inferences/vehicle_attribs_detection.cpp b/openvino_wrapper_lib/src/inferences/vehicle_attribs_detection.cpp new file mode 100644 index 00000000..f8440e34 --- /dev/null +++ b/openvino_wrapper_lib/src/inferences/vehicle_attribs_detection.cpp @@ -0,0 +1,128 @@ +// Copyright (c) 2018-2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a realization file with declaration of VehicleAttribsDetection class and + * VehicleAttribsDetectionResult class + * @file vehicle_attribs_detection.cpp + */ +#include +#include +#include +#include "openvino_wrapper_lib/inferences/vehicle_attribs_detection.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" +#include "openvino_wrapper_lib/slog.hpp" + +// VehicleAttribsDetectionResult +openvino_wrapper_lib::VehicleAttribsDetectionResult::VehicleAttribsDetectionResult(const cv::Rect& location) + : Result(location) +{ +} + +// VehicleAttribsDetection +openvino_wrapper_lib::VehicleAttribsDetection::VehicleAttribsDetection() : openvino_wrapper_lib::BaseInference() +{ +} + +openvino_wrapper_lib::VehicleAttribsDetection::~VehicleAttribsDetection() = default; +void openvino_wrapper_lib::VehicleAttribsDetection::loadNetwork( + const std::shared_ptr network) +{ + valid_model_ = network; + setMaxBatchSize(network->getMaxBatchSize()); +} + +bool openvino_wrapper_lib::VehicleAttribsDetection::enqueue(const cv::Mat& frame, const cv::Rect& input_frame_loc) +{ + if (getEnqueuedNum() == 0) { + results_.clear(); + } + if (!openvino_wrapper_lib::BaseInference::enqueue(frame, input_frame_loc, 1, 0, + valid_model_->getInputName())) { + return false; + } + Result r(input_frame_loc); + results_.emplace_back(r); + return true; +} + +bool openvino_wrapper_lib::VehicleAttribsDetection::submitRequest() +{ + return openvino_wrapper_lib::BaseInference::submitRequest(); +} + +bool openvino_wrapper_lib::VehicleAttribsDetection::fetchResults() +{ + bool can_fetch = openvino_wrapper_lib::BaseInference::fetchResults(); + if (!can_fetch) { + return false; + } + bool found_result = false; + + ov::InferRequest infer_request = getEngine()->getRequest(); + std::string color_name = valid_model_->getOutputName("color_output_"); + std::string type_name = valid_model_->getOutputName("type_output_"); + const float* color_values = infer_request.get_tensor(color_name).data(); + const float* type_values = infer_request.get_tensor(type_name).data(); + + for (int i = 0; i < getResultsLength(); i++) { + auto color_id = std::max_element(color_values, color_values + 7) - color_values; + auto type_id = std::max_element(type_values, type_values + 4) - type_values; + color_values += 7; + type_values += 4; + results_[i].color_ = colors_[color_id]; + results_[i].type_ = types_[type_id]; + found_result = true; + } + if (!found_result) { + results_.clear(); + } + return true; +} + +int openvino_wrapper_lib::VehicleAttribsDetection::getResultsLength() const +{ + return static_cast(results_.size()); +} + +const openvino_wrapper_lib::Result* openvino_wrapper_lib::VehicleAttribsDetection::getLocationResult(int idx) const +{ + return &(results_[idx]); +} + +const std::string openvino_wrapper_lib::VehicleAttribsDetection::getName() const +{ + return valid_model_->getModelCategory(); +} + +void openvino_wrapper_lib::VehicleAttribsDetection::observeOutput(const std::shared_ptr& output) +{ + if (output != nullptr) { + output->accept(results_); + } +} + +const std::vector +openvino_wrapper_lib::VehicleAttribsDetection::getFilteredROIs(const std::string filter_conditions) const +{ + if (!filter_conditions.empty()) { + slog::err << "Vehicle attributes detection does not support filtering now! " + << "Filter conditions: " << filter_conditions << slog::endl; + } + std::vector filtered_rois; + for (auto res : results_) { + filtered_rois.push_back(res.getLocation()); + } + return filtered_rois; +} diff --git a/dynamic_vino_lib/src/inputs/image_input.cpp b/openvino_wrapper_lib/src/inputs/image_input.cpp similarity index 78% rename from dynamic_vino_lib/src/inputs/image_input.cpp rename to openvino_wrapper_lib/src/inputs/image_input.cpp index 3a30d226..99743e86 100644 --- a/dynamic_vino_lib/src/inputs/image_input.cpp +++ b/openvino_wrapper_lib/src/inputs/image_input.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,10 +18,10 @@ */ #include -#include "dynamic_vino_lib/inputs/image_input.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/inputs/image_input.hpp" +#include "openvino_wrapper_lib/slog.hpp" -Input::Image::Image(const std::string & file) +Input::Image::Image(const std::string& file) { file_.assign(file); } @@ -39,7 +39,7 @@ bool Input::Image::initialize() return isInit(); } -bool Input::Image::read(cv::Mat * frame) +bool Input::Image::read(cv::Mat* frame) { if (!isInit()) { return false; @@ -49,12 +49,11 @@ bool Input::Image::read(cv::Mat * frame) return true; } -void Input::Image::config(const Input::Config & config) +void Input::Image::config(const Input::Config& config) { if (config.path != "") { file_.assign(config.path); initialize(); - slog::info << "Image Input device was reinitialized with new file:" << - config.path.c_str() << slog::endl; + slog::info << "Image Input device was reinitialized with new file:" << config.path.c_str() << slog::endl; } } diff --git a/dynamic_vino_lib/src/inputs/image_topic.cpp b/openvino_wrapper_lib/src/inputs/image_topic.cpp similarity index 64% rename from dynamic_vino_lib/src/inputs/image_topic.cpp rename to openvino_wrapper_lib/src/inputs/image_topic.cpp index 5d356289..0fe6a6ed 100644 --- a/dynamic_vino_lib/src/inputs/image_topic.cpp +++ b/openvino_wrapper_lib/src/inputs/image_topic.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,14 +19,12 @@ #include #include -#include "dynamic_vino_lib/inputs/image_topic.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/inputs/image_topic.hpp" +#include "openvino_wrapper_lib/slog.hpp" #define INPUT_TOPIC "/openvino_toolkit/image_raw" - -Input::ImageTopic::ImageTopic(rclcpp::Node::SharedPtr node) -: node_(node) +Input::ImageTopic::ImageTopic(rclcpp::Node::SharedPtr node) : node_(node) { } @@ -34,24 +32,23 @@ bool Input::ImageTopic::initialize() { slog::debug << "before Image Topic init" << slog::endl; - if(node_ == nullptr){ + if (node_ == nullptr) { throw std::runtime_error("Image Topic is not instancialized because of no parent node."); return false; } auto qos = rclcpp::QoS(rclcpp::KeepLast(1)).best_effort(); - sub_ = node_->create_subscription( - INPUT_TOPIC, qos, - std::bind(&ImageTopic::cb, this, std::placeholders::_1)); + sub_ = node_->create_subscription(INPUT_TOPIC, qos, + std::bind(&ImageTopic::cb, this, std::placeholders::_1)); return true; } - bool Input::ImageTopic::initialize(size_t width, size_t height) - { - slog::warn << "BE CAREFUL: nothing for resolution is done when calling initialize(width, height)" - << " for Image Topic" << slog::endl; - return initialize(); - } +bool Input::ImageTopic::initialize(size_t width, size_t height) +{ + slog::warn << "BE CAREFUL: nothing for resolution is done when calling initialize(width, height)" + << " for Image Topic" << slog::endl; + return initialize(); +} void Input::ImageTopic::cb(const sensor_msgs::msg::Image::SharedPtr image_msg) { @@ -59,14 +56,10 @@ void Input::ImageTopic::cb(const sensor_msgs::msg::Image::SharedPtr image_msg) setHeader(image_msg->header); image_ = cv_bridge::toCvCopy(image_msg, "bgr8")->image; - //Suppose Image Topic is sent within BGR order, so the below line would work. - //image_ = cv::Mat(image_msg->height, image_msg->width, CV_8UC3, - // const_cast(&image_msg->data[0]), image_msg->step); - image_count_.increaseCounter(); } -bool Input::ImageTopic::read(cv::Mat * frame) +bool Input::ImageTopic::read(cv::Mat* frame) { if (image_count_.get() < 0 || image_.empty()) { slog::debug << "No data received in CameraTopic instance" << slog::endl; diff --git a/dynamic_vino_lib/src/inputs/ip_camera.cpp b/openvino_wrapper_lib/src/inputs/ip_camera.cpp similarity index 89% rename from dynamic_vino_lib/src/inputs/ip_camera.cpp rename to openvino_wrapper_lib/src/inputs/ip_camera.cpp index f4975648..fc6c69d1 100644 --- a/dynamic_vino_lib/src/inputs/ip_camera.cpp +++ b/openvino_wrapper_lib/src/inputs/ip_camera.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,8 +16,7 @@ * @brief a header file with declaration of IpCamera class * @file ip_camera.cpp */ -#include "dynamic_vino_lib/inputs/ip_camera.hpp" - +#include "openvino_wrapper_lib/inputs/ip_camera.hpp" bool Input::IpCamera::initialize() { @@ -35,7 +34,7 @@ bool Input::IpCamera::initialize(size_t width, size_t height) return isInit(); } -bool Input::IpCamera::read(cv::Mat * frame) +bool Input::IpCamera::read(cv::Mat* frame) { if (!isInit()) { return false; diff --git a/dynamic_vino_lib/src/inputs/realsense_camera.cpp b/openvino_wrapper_lib/src/inputs/realsense_camera.cpp similarity index 83% rename from dynamic_vino_lib/src/inputs/realsense_camera.cpp rename to openvino_wrapper_lib/src/inputs/realsense_camera.cpp index 62847f23..3686e42c 100644 --- a/dynamic_vino_lib/src/inputs/realsense_camera.cpp +++ b/openvino_wrapper_lib/src/inputs/realsense_camera.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,8 +16,8 @@ * @brief a header file with declaration of RealSenseCamera class * @file realsense_camera.cpp */ -#include "dynamic_vino_lib/inputs/realsense_camera.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/inputs/realsense_camera.hpp" +#include "openvino_wrapper_lib/slog.hpp" // RealSenseCamera bool Input::RealSenseCamera::initialize() @@ -37,33 +37,32 @@ bool Input::RealSenseCamera::initialize(size_t width, size_t height) slog::info << "RealSense Serial number : " << devSerialNumber << slog::endl; cfg_.enable_device(devSerialNumber); - cfg_.enable_stream(RS2_STREAM_COLOR, static_cast(width), static_cast(height), - RS2_FORMAT_BGR8, 30); + cfg_.enable_stream(RS2_STREAM_COLOR, static_cast(width), static_cast(height), RS2_FORMAT_BGR8, 30); setInitStatus(pipe_.start(cfg_)); setWidth(width); setHeight(height); - //bypass RealSense's bug: several captured frames after HW is inited are with wrong data. + // bypass RealSense's bug: several captured frames after HW is inited are with wrong data. bypassFewFramesOnceInited(); return isInit(); } -bool Input::RealSenseCamera::read(cv::Mat * frame) +bool Input::RealSenseCamera::read(cv::Mat* frame) { if (!isInit()) { return false; } - try { + try { rs2::frameset data = pipe_.wait_for_frames(); // Wait for next set of frames from the camera rs2::frame color_frame; color_frame = data.get_color_frame(); cv::Mat(cv::Size(static_cast(getWidth()), static_cast(getHeight())), CV_8UC3, - const_cast(color_frame.get_data()), cv::Mat::AUTO_STEP) - .copyTo(*frame); + const_cast(color_frame.get_data()), cv::Mat::AUTO_STEP) + .copyTo(*frame); } catch (...) { return false; } @@ -88,7 +87,7 @@ std::string Input::RealSenseCamera::getCameraSN() void Input::RealSenseCamera::bypassFewFramesOnceInited() { - if(!isInit() || !first_read_){ + if (!isInit() || !first_read_) { return; } diff --git a/dynamic_vino_lib/src/inputs/standard_camera.cpp b/openvino_wrapper_lib/src/inputs/standard_camera.cpp similarity index 55% rename from dynamic_vino_lib/src/inputs/standard_camera.cpp rename to openvino_wrapper_lib/src/inputs/standard_camera.cpp index fd7e209b..17a498af 100644 --- a/dynamic_vino_lib/src/inputs/standard_camera.cpp +++ b/openvino_wrapper_lib/src/inputs/standard_camera.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,7 +16,11 @@ * @brief a header file with declaration of StandardCamera class * @file standard_camera.cpp */ -#include "dynamic_vino_lib/inputs/standard_camera.hpp" +#include "openvino_wrapper_lib/inputs/standard_camera.hpp" + +Input::StandardCamera::StandardCamera(const std::string& camera) : device_path_(camera) +{ +} bool Input::StandardCamera::initialize() { @@ -25,17 +29,26 @@ bool Input::StandardCamera::initialize() bool Input::StandardCamera::initialize(size_t width, size_t height) { - auto id = getCameraId(); - setInitStatus(cap.open(id)); - cap.set(cv::CAP_PROP_FRAME_WIDTH, width); - cap.set(cv::CAP_PROP_FRAME_HEIGHT, height); - setWidth(width); - setHeight(height); + bool init = false; + if (!device_path_.empty()) { + init = cap.open(device_path_); + } + if (init == false) { + auto id = getCameraId(); + init = cap.open(id); + } - return isInit(); + if (init) { + cap.set(cv::CAP_PROP_FRAME_WIDTH, width); + cap.set(cv::CAP_PROP_FRAME_HEIGHT, height); + setWidth(width); + setHeight(height); + setInitStatus(true); + } + return init; } -bool Input::StandardCamera::read(cv::Mat * frame) +bool Input::StandardCamera::read(cv::Mat* frame) { if (!isInit()) { return false; @@ -48,24 +61,23 @@ bool Input::StandardCamera::read(cv::Mat * frame) int Input::StandardCamera::getCameraId() { // In case this function is invoked more than once. - if (camera_id_ >= 0){ + if (camera_id_ >= 0) { return camera_id_; } static int STANDARD_CAMERA_COUNT = -1; - int fd; // A file descriptor to the video device + int fd; // A file descriptor to the video device struct v4l2_capability cap; char file[32]; - //if it is a realsense camera then skip it until we meet a standard camera - do - { - STANDARD_CAMERA_COUNT ++; - sprintf(file,"/dev/video%d",STANDARD_CAMERA_COUNT);//format filename - fd = open(file,O_RDWR); + // if it is a realsense camera then skip it until we meet a standard camera + do { + STANDARD_CAMERA_COUNT++; + sprintf(file, "/dev/video%d", STANDARD_CAMERA_COUNT); // format filename + fd = open(file, O_RDWR); ioctl(fd, VIDIOC_QUERYCAP, &cap); close(fd); - std::cout << "!!camera: "<< cap.card << std::endl; - }while(!strcmp((char*)cap.card,"Intel(R) RealSense(TM) Depth Ca")); + std::cout << "!!camera: " << cap.card << std::endl; + } while (!strcmp((char*)cap.card, "Intel(R) RealSense(TM) Depth Ca")); camera_id_ = STANDARD_CAMERA_COUNT; return STANDARD_CAMERA_COUNT; diff --git a/dynamic_vino_lib/src/inputs/video_input.cpp b/openvino_wrapper_lib/src/inputs/video_input.cpp similarity index 87% rename from dynamic_vino_lib/src/inputs/video_input.cpp rename to openvino_wrapper_lib/src/inputs/video_input.cpp index d3279025..775559a0 100644 --- a/dynamic_vino_lib/src/inputs/video_input.cpp +++ b/openvino_wrapper_lib/src/inputs/video_input.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,10 +19,10 @@ #include -#include "dynamic_vino_lib/inputs/video_input.hpp" +#include "openvino_wrapper_lib/inputs/video_input.hpp" // Video -Input::Video::Video(const std::string & video) +Input::Video::Video(const std::string& video) { video_.assign(video); } @@ -47,7 +47,7 @@ bool Input::Video::initialize(size_t width, size_t height) return isInit(); } -bool Input::Video::read(cv::Mat * frame) +bool Input::Video::read(cv::Mat* frame) { if (!isInit()) { return false; diff --git a/openvino_wrapper_lib/src/models/age_gender_detection_model.cpp b/openvino_wrapper_lib/src/models/age_gender_detection_model.cpp new file mode 100644 index 00000000..74f5c07f --- /dev/null +++ b/openvino_wrapper_lib/src/models/age_gender_detection_model.cpp @@ -0,0 +1,95 @@ +// Copyright (c) 2018-2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a header file with declaration of AgeGenderDetectionModel class + * @file age_gender_detection_model.cpp + */ +#include +#include +#include +#include "openvino_wrapper_lib/models/age_gender_detection_model.hpp" +#include "openvino_wrapper_lib/slog.hpp" + +// Validated Age Gender Classification Network +Models::AgeGenderDetectionModel::AgeGenderDetectionModel(const std::string& label_loc, const std::string& model_loc, + int max_batch_size) + : BaseModel(label_loc, model_loc, max_batch_size) +{ +} +bool Models::AgeGenderDetectionModel::updateLayerProperty(std::shared_ptr& model) +{ + slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; + // set input property + inputs_info_ = model->inputs(); + ov::preprocess::PrePostProcessor ppp = ov::preprocess::PrePostProcessor(model); + input_tensor_name_ = model->input().get_any_name(); + ov::preprocess::InputInfo& input_info = ppp.input(input_tensor_name_); + + ov::Shape input_tensor_shape = model->input().get_shape(); + if (inputs_info_.size() != 1) { + slog::warn << "This model seems not Age-Gender-like, which should have only one input, but we got" + << std::to_string(input_tensor_shape.size()) << "inputs" << slog::endl; + return false; + } + + addInputInfo(ModelAttribute::DefaultInputName, input_tensor_name_); + const ov::Layout tensor_layout{ "NCHW" }; + input_info.tensor().set_element_type(ov::element::f32).set_layout(tensor_layout); + + // set output property + outputs_info_ = model->outputs(); + if (outputs_info_.size() != 2) { + slog::warn << "This model seems not Age-Gender-like, which should have and only have 2 outputs, but we got" + << std::to_string(outputs_info_.size()) << "outputs" << slog::endl; + return false; + } + +#if (0) /// + // Check More Configuration: + if (gender_output_ptr->getCreatorLayer().lock()->type == "Convolution") { + std::swap(age_output_ptr, gender_output_ptr); + } + if (age_output_ptr->getCreatorLayer().lock()->type != "Convolution") { + slog::err << "In Age Gender network, age layer (" << age_output_ptr->getCreatorLayer().lock()->name + << ") should be a Convolution, but was: " << age_output_ptr->getCreatorLayer().lock()->type << slog::endl; + return false; + } + if (gender_output_ptr->getCreatorLayer().lock()->type != "SoftMax") { + slog::err << "In Age Gender network, gender layer (" << gender_output_ptr->getCreatorLayer().lock()->name + << ") should be a SoftMax, but was: " << gender_output_ptr->getCreatorLayer().lock()->type << slog::endl; + return false; + } + slog::info << "Age layer: " << age_output_ptr->getCreatorLayer().lock()->name << slog::endl; + slog::info << "Gender layer: " << gender_output_ptr->getCreatorLayer().lock()->name << slog::endl; +#endif + + auto age_output_info = outputs_info_[1]; + ppp.output(age_output_info.get_any_name()).tensor().set_element_type(ov::element::f32).set_layout(tensor_layout); + auto gender_output_info = outputs_info_[0]; + ppp.output(gender_output_info.get_any_name()).tensor().set_element_type(ov::element::f32).set_layout(tensor_layout); + + model = ppp.build(); + ov::set_batch(model, getMaxBatchSize()); + + addOutputInfo("age", age_output_info.get_any_name()); + addOutputInfo("gender", gender_output_info.get_any_name()); + printAttribute(); + return true; +} + +const std::string Models::AgeGenderDetectionModel::getModelCategory() const +{ + return "Age Gender Detection"; +} diff --git a/dynamic_vino_lib/src/models/attributes/ssd_model_attr.cpp b/openvino_wrapper_lib/src/models/attributes/ssd_model_attr.cpp similarity index 59% rename from dynamic_vino_lib/src/models/attributes/ssd_model_attr.cpp rename to openvino_wrapper_lib/src/models/attributes/ssd_model_attr.cpp index c2924c62..16948322 100644 --- a/dynamic_vino_lib/src/models/attributes/ssd_model_attr.cpp +++ b/openvino_wrapper_lib/src/models/attributes/ssd_model_attr.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Intel Corporation +// Copyright (c) 2020-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,59 +19,57 @@ #include -#include "dynamic_vino_lib/models/attributes/base_attribute.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/models/attributes/base_attribute.hpp" +#include "openvino_wrapper_lib/slog.hpp" // Validated Face Detection Network -Models::SSDModelAttr::SSDModelAttr( - const std::string model_name) -: ModelAttribute(model_name) +Models::SSDModelAttr::SSDModelAttr(const std::string model_name) : ModelAttribute(model_name) { } -bool Models::SSDModelAttr::updateLayerProperty( - const InferenceEngine::CNNNetwork & net_reader) +bool Models::SSDModelAttr::updateLayerProperty(const std::shared_ptr& model) { slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; - - InferenceEngine::InputsDataMap input_info_map(net_reader.getInputsInfo()); + auto input_info_map = model->inputs(); if (input_info_map.size() != 1) { slog::warn << "This model seems not SSDNet-like, SSDnet has only one input, but we got " - << std::to_string(input_info_map.size()) << "inputs" << slog::endl; + << std::to_string(input_info_map.size()) << "inputs" << slog::endl; return false; } - - InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; - input_info->setPrecision(InferenceEngine::Precision::U8); - addInputInfo("input", input_info_map.begin()->first); - const InferenceEngine::SizeVector input_dims = input_info->getTensorDesc().getDims(); + ov::preprocess::PrePostProcessor ppp = ov::preprocess::PrePostProcessor(model); + input_tensor_name_ = model->input().get_any_name(); + ov::preprocess::InputInfo& input_info = ppp.input(input_tensor_name_); + input_info.tensor().set_element_type(ov::element::u8); + addInputInfo(ModelAttribute::DefaultInputName, input_tensor_name_); + + ov::Shape input_dims = input_info_map[0].get_shape(); setInputHeight(input_dims[2]); setInputWidth(input_dims[3]); slog::info << "Checking OUTPUTs for model " << getModelName() << slog::endl; - InferenceEngine::OutputsDataMap output_info_map(net_reader.getOutputsInfo()); - if (output_info_map.size() != 1) { - slog::warn << "This model seems not SSDNet-like! We got " - << std::to_string(output_info_map.size()) << "outputs, but SSDnet has only one." - << slog::endl; + auto outputs_info = model->outputs(); + if (outputs_info.size() != 1) { + slog::warn << "This model seems not SSDNet-like! We got " << std::to_string(outputs_info.size()) + << "outputs, but SSDnet has only one." << slog::endl; return false; } - InferenceEngine::DataPtr & output_data_ptr = output_info_map.begin()->second; - addOutputInfo("output", output_info_map.begin()->first); - slog::info << "Checking Object Detection output ... Name=" << output_info_map.begin()->first - << slog::endl; - output_data_ptr->setPrecision(InferenceEngine::Precision::FP32); -///TODO: double check this part: BEGIN -#if(0) /// + ov::preprocess::OutputInfo& output_info = ppp.output(); + addOutputInfo(ModelAttribute::DefaultOutputName, model->output().get_any_name()); + slog::info << "Checking Object Detection output ... Name=" << model->output().get_any_name() << slog::endl; + + output_info.tensor().set_element_type(ov::element::f32); + +/// TODO: double check this part: BEGIN +#if (0) /// const InferenceEngine::CNNLayerPtr output_layer = - net_reader->getNetwork().getLayerByName(output_info_map.begin()->first.c_str()); + model->getNetwork().getLayerByName(output_info_map.begin()->first.c_str()); // output layer should have attribute called num_classes slog::info << "Checking Object Detection num_classes" << slog::endl; if (output_layer->params.find("num_classes") == output_layer->params.end()) { slog::warn << "This model's output layer (" << output_info_map.begin()->first - << ") should have num_classes integer attribute" << slog::endl; + << ") should have num_classes integer attribute" << slog::endl; return false; } // class number should be equal to size of label vector @@ -88,25 +86,26 @@ bool Models::SSDModelAttr::updateLayerProperty( } } #endif - ///TODO: double check this part: END + /// TODO: double check this part: END // last dimension of output layer should be 7 - const InferenceEngine::SizeVector output_dims = output_data_ptr->getTensorDesc().getDims(); + auto outputsDataMap = model->outputs(); + auto& data = outputsDataMap[0]; + ov::Shape output_dims = data.get_shape(); setMaxProposalCount(static_cast(output_dims[2])); slog::info << "max proposal count is: " << getMaxProposalCount() << slog::endl; auto object_size = static_cast(output_dims[3]); if (object_size != 7) { slog::warn << "This model is NOT SSDNet-like, whose output data for each detected object" - << "should have 7 dimensions, but was " << std::to_string(object_size) - << slog::endl; + << "should have 7 dimensions, but was " << std::to_string(object_size) << slog::endl; return false; } setObjectSize(object_size); if (output_dims.size() != 4) { slog::warn << "This model is not SSDNet-like, output dimensions shoulld be 4, but was" - << std::to_string(output_dims.size()) << slog::endl; + << std::to_string(output_dims.size()) << slog::endl; return false; } @@ -114,4 +113,3 @@ bool Models::SSDModelAttr::updateLayerProperty( slog::info << "This model is SSDNet-like, Layer Property updated!" << slog::endl; return true; } - diff --git a/openvino_wrapper_lib/src/models/base_model.cpp b/openvino_wrapper_lib/src/models/base_model.cpp new file mode 100644 index 00000000..d5b9e04d --- /dev/null +++ b/openvino_wrapper_lib/src/models/base_model.cpp @@ -0,0 +1,178 @@ +// Copyright (c) 2018-2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a header file with declaration of BaseModel class + * @file base_model.cpp + */ + +#include +#include +#include +#include +#include +#include +#include "openvino_wrapper_lib/models/base_model.hpp" +#include "openvino_wrapper_lib/slog.hpp" +#include "openvino_wrapper_lib/utils/common.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino_wrapper_lib/models/attributes/base_attribute.hpp" +#include "openvino_wrapper_lib/models/attributes/base_attribute.hpp" + +// Validated Base Network +Models::BaseModel::BaseModel(const std::string& label_loc, const std::string& model_loc, int max_batch_size) + : label_loc_(label_loc), model_loc_(model_loc), max_batch_size_(max_batch_size), ModelAttribute(model_loc) +{ + if (model_loc.empty()) { + throw std::logic_error("model file name is empty!"); + } +} + +Models::BaseModel::BaseModel(const Params::ParamManager::InferenceRawData& config) + : BaseModel(config.label, config.model, config.batch) +{ + config_ = config; +} + +void Models::BaseModel::modelInit() +{ + slog::info << "Loading network files" << model_loc_ << slog::endl; + slog::info << label_loc_ << slog::endl; + + // Read network model + model_ = engine.read_model(model_loc_); + + // Extract model name and load it's weights + // remove extension + size_t last_index = model_loc_.find_last_of("."); + std::string raw_name = model_loc_.substr(0, last_index); + + // Read labels (if any) + std::string label_file_name = label_loc_.substr(0, last_index); + loadLabelsFromFile(label_loc_); + + // Set batch size to given max_batch_size_ + slog::info << "Batch size is set to " << max_batch_size_ << slog::endl; + updateLayerProperty(model_); +} + +Models::ObjectDetectionModel::ObjectDetectionModel(const std::string& label_loc, const std::string& model_loc, + int max_batch_size) + : BaseModel(label_loc, model_loc, max_batch_size) +{ +} + +bool Models::BaseModel::matToBlob(const cv::Mat& orig_image, const cv::Rect&, float scale_factor, int batch_index, + const std::shared_ptr& engine) +{ + if (engine == nullptr) { + slog::err << "A frame is trying to be enqueued in a NULL Engine." << slog::endl; + return false; + } + + ov::InferRequest infer_request = engine->getRequest(); + ov::Tensor input_tensor = infer_request.get_tensor(getInputName("input0")); + ov::Shape input_shape = input_tensor.get_shape(); + + OPENVINO_ASSERT(input_shape.size() == 4); + const auto layout = getLayoutFromShape(input_shape); + const size_t width = input_shape[ov::layout::width_idx(layout)]; // input_shape[2]; + const size_t height = input_shape[ov::layout::height_idx(layout)]; // input_shape[1]; + const size_t channels = input_shape[ov::layout::channels_idx(layout)]; // input_shape[3]; + + slog::debug << "width is:" << width << slog::endl; + slog::debug << "height is:" << height << slog::endl; + slog::debug << "channels is:" << channels << slog::endl; + slog::debug << "origin channels is:" << orig_image.channels() << slog::endl; + slog::debug << "input shape is:" << input_shape << slog::endl; + + unsigned char* data = input_tensor.data(); + cv::Size size = { (int)width, (int)height }; + cv::Mat resized_image(size, CV_8UC3, data); + + if (isKeepInputRatio()) { + slog::debug << "keep Input Shape Ratio is ENABLED!" << slog::endl; + cv::Mat extend_image = extendFrameToInputRatio(orig_image); + cv::resize(extend_image, resized_image, size); + frame_resize_ratio_width_ = static_cast(extend_image.cols) / width; + frame_resize_ratio_height_ = static_cast(extend_image.rows) / height; + } else { + cv::resize(orig_image, resized_image, size); + frame_resize_ratio_width_ = static_cast(orig_image.cols) / width; + frame_resize_ratio_height_ = static_cast(orig_image.rows) / height; + } + + return true; +} + +cv::Mat Models::BaseModel::extendFrameToInputRatio(const cv::Mat orig) +{ + auto orig_width = orig.cols; + auto orig_height = orig.rows; + const auto target_width = getInputWidth(); + const auto target_height = getInputHeight(); + const float orig_ratio = static_cast(orig_width) / orig_height; + const float target_ratio = static_cast(target_width) / target_height; + + slog::debug << "extend Ratio: orit_ratio:" << orig_ratio << ", target_ratio:" << target_ratio + << ", orig_width:" << orig_width << ", orig_height:" << orig_height << slog::endl; + if (orig_ratio < target_ratio) { + orig_width = (int)(orig_height * target_ratio); + } else { + orig_height = (int)(orig_width * target_ratio); + } + + slog::debug << "extend Image to: " << orig_width << "x" << orig_height << slog::endl; + cv::Mat result = cv::Mat::zeros(orig_height, orig_width, CV_8UC3); + orig.copyTo(result(cv::Rect(0, 0, orig.cols, orig.rows))); + + return result; +} + +bool Models::BaseModel::updateLayerProperty(std::shared_ptr& model) +{ + slog::info << "Checking INPUTS & OUTPUTS for Model " << getModelName() << slog::endl; + + // check input shape + inputs_info_ = model->inputs(); + slog::debug << "input size=" << inputs_info_.size() << slog::endl; + if (inputs_info_.size() != getCountOfInputs()) { + slog::warn << "This inference sample should have have " << getCountOfInputs() << " inputs, but we got" + << std::to_string(inputs_info_.size()) << "inputs" << slog::endl; + throw std::logic_error("input_tensor_count doesn't align!"); + return false; + } + + for (int i = 0; i < getCountOfInputs(); i++) { + std::string name{ "input" }; + addInputInfo(name + std::to_string(i), inputs_info_[i].get_any_name()); + } + + // check output shape + outputs_info_ = model->outputs(); + slog::debug << "output size=" << outputs_info_.size() << slog::endl; + if (outputs_info_.size() != getCountOfOutputs()) { + slog::warn << "This inference sample should have have " << getCountOfOutputs() << " outputs, but we got " + << outputs_info_.size() << "outputs" << slog::endl; + throw std::logic_error("output_tensor_count doesn't align!"); + return false; + } + + for (int i = 0; i < getCountOfOutputs(); i++) { + std::string name{ "output" }; + addOutputInfo(name + std::to_string(i), outputs_info_[i].get_any_name()); + } + + return true; +} diff --git a/openvino_wrapper_lib/src/models/emotion_detection_model.cpp b/openvino_wrapper_lib/src/models/emotion_detection_model.cpp new file mode 100644 index 00000000..00e89c0f --- /dev/null +++ b/openvino_wrapper_lib/src/models/emotion_detection_model.cpp @@ -0,0 +1,72 @@ +// Copyright (c) 2018-2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a header file with declaration of EmotionDetectionModel class + * @file emotion_detection_model.cpp + */ +#include +#include +#include "openvino_wrapper_lib/models/emotion_detection_model.hpp" +#include "openvino_wrapper_lib/slog.hpp" + +// Validated Emotions Detection Network +Models::EmotionDetectionModel::EmotionDetectionModel(const std::string& label_loc, const std::string& model_loc, + int max_batch_size) + : BaseModel(label_loc, model_loc, max_batch_size) +{ +} + +bool Models::EmotionDetectionModel::updateLayerProperty(std::shared_ptr& model) +{ + slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; + // set input property + inputs_info_ = model->inputs(); + ov::preprocess::PrePostProcessor ppp = ov::preprocess::PrePostProcessor(model); + input_tensor_name_ = model->input().get_any_name(); + ov::preprocess::InputInfo& input_info = ppp.input(input_tensor_name_); + + ov::Shape input_tensor_shape = model->input().get_shape(); + if (inputs_info_.size() != 1) { + slog::warn << "This model seems not Emotion-detection-model-like, which should have only one input, but we got" + << std::to_string(input_tensor_shape.size()) << "inputs" << slog::endl; + return false; + } + + addInputInfo(ModelAttribute::DefaultInputName, input_tensor_name_); + const ov::Layout tensor_layout{ "NHWC" }; + input_info.tensor().set_element_type(ov::element::f32).set_layout(tensor_layout); + + // set output property + outputs_info_ = model->outputs(); + output_tensor_name_ = model->output().get_any_name(); + ov::preprocess::OutputInfo& output_info = ppp.output(output_tensor_name_); + if (outputs_info_.size() != 1) { + slog::warn << "This model should have and only have 1 output, but we got " << std::to_string(outputs_info_.size()) + << "outputs" << slog::endl; + return false; + } + + model = ppp.build(); + ov::set_batch(model, getMaxBatchSize()); + addOutputInfo(ModelAttribute::DefaultOutputName, output_tensor_name_); + + printAttribute(); + return true; +} + +const std::string Models::EmotionDetectionModel::getModelCategory() const +{ + return "Emotions Detection"; +} diff --git a/dynamic_vino_lib/src/models/face_detection_model.cpp b/openvino_wrapper_lib/src/models/face_detection_model.cpp similarity index 68% rename from dynamic_vino_lib/src/models/face_detection_model.cpp rename to openvino_wrapper_lib/src/models/face_detection_model.cpp index c673b6d7..397f5c6a 100644 --- a/dynamic_vino_lib/src/models/face_detection_model.cpp +++ b/openvino_wrapper_lib/src/models/face_detection_model.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,13 +19,13 @@ #include -#include "dynamic_vino_lib/models/face_detection_model.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/models/face_detection_model.hpp" +#include "openvino_wrapper_lib/slog.hpp" // Validated Face Detection Network -Models::FaceDetectionModel::FaceDetectionModel( - const std::string & label_loc, const std::string & model_loc, int max_batch_size) -: ObjectDetectionModel(label_loc, model_loc, max_batch_size) +Models::FaceDetectionModel::FaceDetectionModel(const std::string& label_loc, const std::string& model_loc, + int max_batch_size) + : ObjectDetectionModel(label_loc, model_loc, max_batch_size) { } diff --git a/dynamic_vino_lib/src/models/face_reidentification_model.cpp b/openvino_wrapper_lib/src/models/face_reidentification_model.cpp similarity index 62% rename from dynamic_vino_lib/src/models/face_reidentification_model.cpp rename to openvino_wrapper_lib/src/models/face_reidentification_model.cpp index a5d4572c..d546f17b 100644 --- a/dynamic_vino_lib/src/models/face_reidentification_model.cpp +++ b/openvino_wrapper_lib/src/models/face_reidentification_model.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,26 +17,25 @@ * @file face_reidentification_model.cpp */ #include -#include "dynamic_vino_lib/models/face_reidentification_model.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/models/face_reidentification_model.hpp" +#include "openvino_wrapper_lib/slog.hpp" // Validated Face Reidentification Network -Models::FaceReidentificationModel::FaceReidentificationModel( - const std::string & label_loc, const std::string & model_loc, int max_batch_size) -: BaseModel(label_loc, model_loc, max_batch_size) {} +Models::FaceReidentificationModel::FaceReidentificationModel(const std::string& label_loc, const std::string& model_loc, + int max_batch_size) + : BaseModel(label_loc, model_loc, max_batch_size) +{ +} -void Models::FaceReidentificationModel::setLayerProperty( - InferenceEngine::CNNNetwork& net_reader) +void Models::FaceReidentificationModel::setLayerProperty(InferenceEngine::CNNNetwork& model) { // set input property - InferenceEngine::InputsDataMap input_info_map( - net_reader.getInputsInfo()); + InferenceEngine::InputsDataMap input_info_map(model.getInputsInfo()); InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; input_info->setPrecision(InferenceEngine::Precision::U8); input_info->getInputData()->setLayout(InferenceEngine::Layout::NCHW); // set output property - InferenceEngine::OutputsDataMap output_info_map( - net_reader.getOutputsInfo()); - InferenceEngine::DataPtr & output_data_ptr = output_info_map.begin()->second; + InferenceEngine::OutputsDataMap output_info_map(model.getOutputsInfo()); + InferenceEngine::DataPtr& output_data_ptr = output_info_map.begin()->second; output_data_ptr->setPrecision(InferenceEngine::Precision::FP32); output_data_ptr->setLayout(InferenceEngine::Layout::NCHW); // set input and output layer name @@ -44,8 +43,9 @@ void Models::FaceReidentificationModel::setLayerProperty( output_ = output_info_map.begin()->first; } -void Models::FaceReidentificationModel::checkLayerProperty( - const InferenceEngine::CNNNetwork & net_reader) {} +void Models::FaceReidentificationModel::checkLayerProperty(const InferenceEngine::CNNNetwork& model) +{ +} const std::string Models::FaceReidentificationModel::getModelCategory() const { diff --git a/openvino_wrapper_lib/src/models/head_pose_detection_model.cpp b/openvino_wrapper_lib/src/models/head_pose_detection_model.cpp new file mode 100644 index 00000000..8be7d6af --- /dev/null +++ b/openvino_wrapper_lib/src/models/head_pose_detection_model.cpp @@ -0,0 +1,79 @@ +// Copyright (c) 2018-2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a header file with declaration of HeadPoseDetectionModel class + * @file head_pose_detection_model.cpp + */ + +#include +#include + +#include "openvino_wrapper_lib/models/head_pose_detection_model.hpp" +#include "openvino_wrapper_lib/slog.hpp" + +// Validated Head Pose Network +Models::HeadPoseDetectionModel::HeadPoseDetectionModel(const std::string& label_loc, const std::string& model_loc, + int max_batch_size) + : BaseModel(label_loc, model_loc, max_batch_size) +{ +} + +bool Models::HeadPoseDetectionModel::updateLayerProperty(std::shared_ptr& model) +{ + slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; + // set input property + auto input_info_map = model->inputs(); + if (input_info_map.size() != 1) { + slog::warn << "This model should have only one input, but we got" << std::to_string(input_info_map.size()) + << "inputs" << slog::endl; + return false; + } + + ov::preprocess::PrePostProcessor ppp = ov::preprocess::PrePostProcessor(model); + input_tensor_name_ = model->input().get_any_name(); + ov::preprocess::InputInfo& input_info = ppp.input(input_tensor_name_); + const ov::Layout input_tensor_layout{ "NCHW" }; + input_info.tensor().set_element_type(ov::element::u8).set_layout(input_tensor_layout); + addInputInfo(ModelAttribute::DefaultInputName, input_tensor_name_); + + // set output property + auto output_info_map = model->outputs(); + std::vector outputs_name; + for (auto& output_item : output_info_map) { + std::string output_tensor_name_ = output_item.get_any_name(); + const ov::Layout output_tensor_layout{ "NC" }; + ppp.output(output_tensor_name_).tensor().set_element_type(ov::element::f32).set_layout(output_tensor_layout); + outputs_name.push_back(output_tensor_name_); + } + + model = ppp.build(); + ov::set_batch(model, getMaxBatchSize()); + + for (const std::string& outName : { output_angle_r_, output_angle_p_, output_angle_y_ }) { + if (find(outputs_name.begin(), outputs_name.end(), outName) == outputs_name.end()) { + throw std::logic_error("There is no " + outName + " output in Head Pose Estimation network"); + } else { + addOutputInfo(outName, outName); + } + } + + printAttribute(); + return true; +} + +const std::string Models::HeadPoseDetectionModel::getModelCategory() const +{ + return "Head Pose Network"; +} diff --git a/dynamic_vino_lib/src/models/landmarks_detection_model.cpp b/openvino_wrapper_lib/src/models/landmarks_detection_model.cpp similarity index 61% rename from dynamic_vino_lib/src/models/landmarks_detection_model.cpp rename to openvino_wrapper_lib/src/models/landmarks_detection_model.cpp index 42aa5319..34e05d7d 100644 --- a/dynamic_vino_lib/src/models/landmarks_detection_model.cpp +++ b/openvino_wrapper_lib/src/models/landmarks_detection_model.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,26 +17,25 @@ * @file landmarks_detection_model.cpp */ #include -#include "dynamic_vino_lib/models/landmarks_detection_model.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/models/landmarks_detection_model.hpp" +#include "openvino_wrapper_lib/slog.hpp" // Validated Landmarks Detection Network -Models::LandmarksDetectionModel::LandmarksDetectionModel( - const std::string & label_loc, const std::string & model_loc, int max_batch_size) -: BaseModel(label_loc, model_loc, max_batch_size) {} +Models::LandmarksDetectionModel::LandmarksDetectionModel(const std::string& label_loc, const std::string& model_loc, + int max_batch_size) + : BaseModel(label_loc, model_loc, max_batch_size) +{ +} -void Models::LandmarksDetectionModel::setLayerProperty( - InferenceEngine::CNNNetwork& net_reader) +void Models::LandmarksDetectionModel::setLayerProperty(InferenceEngine::CNNNetwork& model) { // set input property - InferenceEngine::InputsDataMap input_info_map( - net_reader.getInputsInfo()); + InferenceEngine::InputsDataMap input_info_map(model.getInputsInfo()); InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; input_info->setPrecision(InferenceEngine::Precision::U8); input_info->getInputData()->setLayout(InferenceEngine::Layout::NCHW); // set output property - InferenceEngine::OutputsDataMap output_info_map( - net_reader.getOutputsInfo()); - InferenceEngine::DataPtr & output_data_ptr = output_info_map.begin()->second; + InferenceEngine::OutputsDataMap output_info_map(model.getOutputsInfo()); + InferenceEngine::DataPtr& output_data_ptr = output_info_map.begin()->second; output_data_ptr->setPrecision(InferenceEngine::Precision::FP32); output_data_ptr->setLayout(InferenceEngine::Layout::NCHW); // set input and output layer name @@ -44,16 +43,13 @@ void Models::LandmarksDetectionModel::setLayerProperty( output_ = output_info_map.begin()->first; } -void Models::LandmarksDetectionModel::checkLayerProperty( - const InferenceEngine::CNNNetReader::Ptr & net_reader) +void Models::LandmarksDetectionModel::checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr& model) { - InferenceEngine::InputsDataMap input_info_map( - net_reader->getNetwork().getInputsInfo()); + InferenceEngine::InputsDataMap input_info_map(model->getNetwork().getInputsInfo()); if (input_info_map.size() != 1) { throw std::logic_error("Landmarks Detection topology should have only one input"); } - InferenceEngine::OutputsDataMap output_info_map( - net_reader->getNetwork().getOutputsInfo()); + InferenceEngine::OutputsDataMap output_info_map(model->getNetwork().getOutputsInfo()); if (output_info_map.size() != 1) { throw std::logic_error("Landmarks Detection Network expects networks having one output"); } diff --git a/dynamic_vino_lib/src/models/license_plate_detection_model.cpp b/openvino_wrapper_lib/src/models/license_plate_detection_model.cpp similarity index 53% rename from dynamic_vino_lib/src/models/license_plate_detection_model.cpp rename to openvino_wrapper_lib/src/models/license_plate_detection_model.cpp index 171764f5..7ca145bb 100644 --- a/dynamic_vino_lib/src/models/license_plate_detection_model.cpp +++ b/openvino_wrapper_lib/src/models/license_plate_detection_model.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,40 +17,42 @@ * @file license_plate_detection_model.cpp */ #include -#include "dynamic_vino_lib/models/license_plate_detection_model.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/models/license_plate_detection_model.hpp" +#include "openvino_wrapper_lib/slog.hpp" // Validated Vehicle Attributes Detection Network -Models::LicensePlateDetectionModel::LicensePlateDetectionModel( - const std::string & label_loc, const std::string & model_loc, int max_batch_size) -: BaseModel(label_loc, model_loc, max_batch_size) {} +Models::LicensePlateDetectionModel::LicensePlateDetectionModel(const std::string& label_loc, + const std::string& model_loc, int max_batch_size) + : BaseModel(label_loc, model_loc, max_batch_size) +{ +} -bool Models::LicensePlateDetectionModel::updateLayerProperty( - InferenceEngine::CNNNetwork& net_reader) +bool Models::LicensePlateDetectionModel::updateLayerProperty(std::shared_ptr& model) { slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; - InferenceEngine::InputsDataMap input_info_map( - net_reader.getInputsInfo()); + auto input_info_map = model->inputs(); if (input_info_map.size() != 2) { throw std::logic_error("Vehicle Attribs topology should have only two inputs"); } - auto sequence_input = (++input_info_map.begin()); - if (sequence_input->second->getTensorDesc().getDims()[0] != getMaxSequenceSize()) { + + auto sequence_input = input_info_map[1]; + if (sequence_input.get_shape()[0] != getMaxSequenceSize()) { throw std::logic_error("License plate detection max sequence size dismatch"); } - InferenceEngine::OutputsDataMap output_info_map( - net_reader.getOutputsInfo()); + + auto output_info_map = model->outputs(); if (output_info_map.size() != 1) { throw std::logic_error("Vehicle Attribs Network expects networks having one output"); } - InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; - input_info->setPrecision(InferenceEngine::Precision::U8); - input_info->getInputData()->setLayout(InferenceEngine::Layout::NCHW); + ov::preprocess::PrePostProcessor ppp = ov::preprocess::PrePostProcessor(model); + input_tensor_name_ = input_info_map[0].get_any_name(); + const ov::Layout tensor_layout{ "NCHW" }; + ppp.input(input_tensor_name_).tensor().set_element_type(ov::element::u8).set_layout(tensor_layout); + model = ppp.build(); - // set input and output layer name - input_ = input_info_map.begin()->first; - seq_input_ = (++input_info_map.begin())->first; - output_ = output_info_map.begin()->first; + input_ = input_tensor_name_; + seq_input_ = sequence_input.get_any_name(); + output_ = model->output().get_any_name(); return true; } diff --git a/dynamic_vino_lib/src/models/object_detection_ssd_model.cpp b/openvino_wrapper_lib/src/models/object_detection_ssd_model.cpp similarity index 53% rename from dynamic_vino_lib/src/models/object_detection_ssd_model.cpp rename to openvino_wrapper_lib/src/models/object_detection_ssd_model.cpp index d0996fdc..f5471c56 100644 --- a/dynamic_vino_lib/src/models/object_detection_ssd_model.cpp +++ b/openvino_wrapper_lib/src/models/object_detection_ssd_model.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,19 +19,18 @@ #include #include #include -#include "dynamic_vino_lib/inferences/object_detection.hpp" -#include "dynamic_vino_lib/models/object_detection_ssd_model.hpp" -#include "dynamic_vino_lib/slog.hpp" -#include "dynamic_vino_lib/engines/engine.hpp" -#include "dynamic_vino_lib/models/attributes/base_attribute.hpp" +#include "openvino_wrapper_lib/inferences/object_detection.hpp" +#include "openvino_wrapper_lib/models/object_detection_ssd_model.hpp" +#include "openvino_wrapper_lib/slog.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino_wrapper_lib/models/attributes/base_attribute.hpp" // Validated Object Detection Network -Models::ObjectDetectionSSDModel::ObjectDetectionSSDModel( - const std::string & label_loc, const std::string & model_loc, int max_batch_size) -: ObjectDetectionModel(label_loc, model_loc, max_batch_size) +Models::ObjectDetectionSSDModel::ObjectDetectionSSDModel(const std::string& label_loc, const std::string& model_loc, + int max_batch_size) + : ObjectDetectionModel(label_loc, model_loc, max_batch_size) { slog::debug << "TESTING: in ObjectDetectionSSDModel" << slog::endl; - //addCandidatedAttr(std::make_shared()); } const std::string Models::ObjectDetectionSSDModel::getModelCategory() const @@ -39,10 +38,8 @@ const std::string Models::ObjectDetectionSSDModel::getModelCategory() const return "Object Detection SSD"; } -bool Models::ObjectDetectionSSDModel::enqueue( - const std::shared_ptr & engine, - const cv::Mat & frame, - const cv::Rect & input_frame_loc) +bool Models::ObjectDetectionSSDModel::enqueue(const std::shared_ptr& engine, const cv::Mat& frame, + const cv::Rect& input_frame_loc) { if (!this->matToBlob(frame, input_frame_loc, 1, 0, engine)) { return false; @@ -52,9 +49,8 @@ bool Models::ObjectDetectionSSDModel::enqueue( return true; } -bool Models::ObjectDetectionSSDModel::matToBlob( - const cv::Mat & orig_image, const cv::Rect &, float scale_factor, - int batch_index, const std::shared_ptr & engine) +bool Models::ObjectDetectionSSDModel::matToBlob(const cv::Mat& orig_image, const cv::Rect&, float scale_factor, + int batch_index, const std::shared_ptr& engine) { if (engine == nullptr) { slog::err << "A frame is trying to be enqueued in a NULL Engine." << slog::endl; @@ -63,14 +59,15 @@ bool Models::ObjectDetectionSSDModel::matToBlob( std::string input_name = getInputName(); slog::debug << "add input image to blob: " << input_name << slog::endl; - InferenceEngine::Blob::Ptr input_blob = - engine->getRequest()->GetBlob(input_name); - InferenceEngine::SizeVector blob_size = input_blob->getTensorDesc().getDims(); - const int width = blob_size[3]; - const int height = blob_size[2]; - const int channels = blob_size[1]; - u_int8_t * blob_data = input_blob->buffer().as(); + ov::Tensor in_tensor = engine->getRequest().get_tensor(input_name); + + ov::Shape in_shape = in_tensor.get_shape(); + const int width = in_shape[3]; + const int height = in_shape[2]; + const int channels = in_shape[1]; + + u_int8_t* blob_data = (u_int8_t*)in_tensor.data(); cv::Mat resized_image(orig_image); if (width != orig_image.size().width || height != orig_image.size().height) { @@ -82,7 +79,7 @@ bool Models::ObjectDetectionSSDModel::matToBlob( for (int h = 0; h < height; h++) { for (int w = 0; w < width; w++) { blob_data[batchOffset + c * width * height + h * width + w] = - resized_image.at(h, w)[c] * scale_factor; + resized_image.at(h, w)[c] * scale_factor; } } } @@ -91,11 +88,9 @@ bool Models::ObjectDetectionSSDModel::matToBlob( return true; } -bool Models::ObjectDetectionSSDModel::fetchResults( - const std::shared_ptr & engine, - std::vector & results, - const float & confidence_thresh, - const bool & enable_roi_constraint) +bool Models::ObjectDetectionSSDModel::fetchResults(const std::shared_ptr& engine, + std::vector& results, + const float& confidence_thresh, const bool& enable_roi_constraint) { slog::debug << "fetching Infer Resulsts from the given SSD model" << slog::endl; if (engine == nullptr) { @@ -104,39 +99,39 @@ bool Models::ObjectDetectionSSDModel::fetchResults( } slog::debug << "Fetching Detection Results ..." << slog::endl; - InferenceEngine::InferRequest::Ptr request = engine->getRequest(); + ov::InferRequest request = engine->getRequest(); std::string output = getOutputName(); - const float * detections = request->GetBlob(output)->buffer().as(); + const float* detections = (float*)request.get_tensor(output).data(); slog::debug << "Analyzing Detection results..." << slog::endl; auto max_proposal_count = getMaxProposalCount(); auto object_size = getObjectSize(); - slog::debug << "MaxProprosalCount=" << max_proposal_count - << ", ObjectSize=" << object_size << slog::endl; + slog::debug << "MaxProprosalCount=" << max_proposal_count << ", ObjectSize=" << object_size << slog::endl; for (int i = 0; i < max_proposal_count; i++) { float image_id = detections[i * object_size + 0]; if (image_id < 0) { - //slog::info << "Found objects: " << i << "|" << results.size() << slog::endl; break; } cv::Rect r; auto label_num = static_cast(detections[i * object_size + 1]); - std::vector & labels = getLabels(); + std::vector& labels = getLabels(); auto frame_size = getFrameSize(); r.x = static_cast(detections[i * object_size + 3] * frame_size.width); r.y = static_cast(detections[i * object_size + 4] * frame_size.height); r.width = static_cast(detections[i * object_size + 5] * frame_size.width - r.x); r.height = static_cast(detections[i * object_size + 6] * frame_size.height - r.y); - if (enable_roi_constraint) {r &= cv::Rect(0, 0, frame_size.width, frame_size.height);} + if (enable_roi_constraint) { + r &= cv::Rect(0, 0, frame_size.width, frame_size.height); + } - dynamic_vino_lib::ObjectDetectionResult result(r); - std::string label = label_num < labels.size() ? labels[label_num] : - std::string("label #") + std::to_string(label_num); + openvino_wrapper_lib::ObjectDetectionResult result(r); + std::string label = + label_num < labels.size() ? labels[label_num] : std::string("label #") + std::to_string(label_num); result.setLabel(label); float confidence = detections[i * object_size + 2]; - if (confidence <= confidence_thresh /* || r.x == 0 */) { // why r.x needs to be checked? + if (confidence <= confidence_thresh) { continue; } result.setConfidence(confidence); @@ -147,49 +142,62 @@ bool Models::ObjectDetectionSSDModel::fetchResults( return true; } -bool Models::ObjectDetectionSSDModel::updateLayerProperty( - InferenceEngine::CNNNetwork& net_reader) +bool Models::ObjectDetectionSSDModel::updateLayerProperty(std::shared_ptr& model) { slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; - InferenceEngine::InputsDataMap input_info_map(net_reader.getInputsInfo()); + auto input_info_map = model->inputs(); if (input_info_map.size() != 1) { slog::warn << "This model seems not SSDNet-like, SSDnet has only one input, but we got " - << std::to_string(input_info_map.size()) << "inputs" << slog::endl; + << std::to_string(input_info_map.size()) << "inputs" << slog::endl; return false; } - - InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; - input_info->setPrecision(InferenceEngine::Precision::U8); - addInputInfo("input", input_info_map.begin()->first); + ov::preprocess::PrePostProcessor ppp = ov::preprocess::PrePostProcessor(model); + input_tensor_name_ = model->input().get_any_name(); + ov::preprocess::InputInfo& input_info = ppp.input(input_tensor_name_); + + input_info.tensor().set_element_type(ov::element::u8); + addInputInfo(ModelAttribute::DefaultInputName, input_tensor_name_); - const InferenceEngine::SizeVector input_dims = input_info->getTensorDesc().getDims(); + ov::Shape input_dims = input_info_map[0].get_shape(); + + ov::Layout tensor_layout = ov::Layout("NCHW"); + ov::Layout expect_layout = ov::Layout("NHWC"); setInputHeight(input_dims[2]); setInputWidth(input_dims[3]); + if (input_dims[1] == 3) + expect_layout = ov::Layout("NCHW"); + else if (input_dims[3] == 3) + expect_layout = ov::Layout("NHWC"); + else + slog::warn << "unexpect input shape " << input_dims << slog::endl; + + input_info.tensor().set_element_type(ov::element::u8).set_layout(tensor_layout); + input_info.preprocess().convert_layout(expect_layout).resize(ov::preprocess::ResizeAlgorithm::RESIZE_LINEAR); slog::info << "Checking OUTPUTs for model " << getModelName() << slog::endl; - InferenceEngine::OutputsDataMap output_info_map(net_reader.getOutputsInfo()); - if (output_info_map.size() != 1) { - slog::warn << "This model seems not SSDNet-like! We got " - << std::to_string(output_info_map.size()) << "outputs, but SSDnet has only one." - << slog::endl; + auto outputs_info = model->outputs(); + if (outputs_info.size() != 1) { + slog::warn << "This model seems not SSDNet-like! We got " << std::to_string(outputs_info.size()) + << "outputs, but SSDnet has only one." << slog::endl; return false; } - InferenceEngine::DataPtr & output_data_ptr = output_info_map.begin()->second; - addOutputInfo("output", output_info_map.begin()->first); - slog::info << "Checking Object Detection output ... Name=" << output_info_map.begin()->first - << slog::endl; - output_data_ptr->setPrecision(InferenceEngine::Precision::FP32); - -///TODO: double check this part: BEGIN -#if(0) + ov::preprocess::OutputInfo& output_info = ppp.output(); + addOutputInfo(ModelAttribute::DefaultOutputName, model->output().get_any_name()); + slog::info << "Checking Object Detection output ... Name=" << model->output().get_any_name() << slog::endl; + + output_info.tensor().set_element_type(ov::element::f32); + model = ppp.build(); + +/// TODO: double check this part: BEGIN +#if (0) const InferenceEngine::CNNLayerPtr output_layer = - net_reader->getNetwork().getLayerByName(output_info_map.begin()->first.c_str()); + model->getNetwork().getLayerByName(output_info_map.begin()->first.c_str()); // output layer should have attribute called num_classes slog::info << "Checking Object Detection num_classes" << slog::endl; if (output_layer->params.find("num_classes") == output_layer->params.end()) { slog::warn << "This model's output layer (" << output_info_map.begin()->first - << ") should have num_classes integer attribute" << slog::endl; + << ") should have num_classes integer attribute" << slog::endl; return false; } // class number should be equal to size of label vector @@ -206,25 +214,26 @@ bool Models::ObjectDetectionSSDModel::updateLayerProperty( } } #endif - ///TODO: double check this part: END + /// TODO: double check this part: END // last dimension of output layer should be 7 - const InferenceEngine::SizeVector output_dims = output_data_ptr->getTensorDesc().getDims(); + auto outputsDataMap = model->outputs(); + auto& data = outputsDataMap[0]; + ov::Shape output_dims = data.get_shape(); setMaxProposalCount(static_cast(output_dims[2])); slog::info << "max proposal count is: " << getMaxProposalCount() << slog::endl; auto object_size = static_cast(output_dims[3]); if (object_size != 7) { slog::warn << "This model is NOT SSDNet-like, whose output data for each detected object" - << "should have 7 dimensions, but was " << std::to_string(object_size) - << slog::endl; + << "should have 7 dimensions, but was " << std::to_string(object_size) << slog::endl; return false; } setObjectSize(object_size); if (output_dims.size() != 4) { slog::warn << "This model is not SSDNet-like, output dimensions shoulld be 4, but was" - << std::to_string(output_dims.size()) << slog::endl; + << std::to_string(output_dims.size()) << slog::endl; return false; } diff --git a/openvino_wrapper_lib/src/models/object_detection_yolov5_model.cpp b/openvino_wrapper_lib/src/models/object_detection_yolov5_model.cpp new file mode 100644 index 00000000..ce5126ba --- /dev/null +++ b/openvino_wrapper_lib/src/models/object_detection_yolov5_model.cpp @@ -0,0 +1,191 @@ +// Copyright (c) 2022-2023 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a header file with declaration of ObjectDetectionYolov5Model class + * @file object_detection_yolov5_model.cpp + */ +#include +#include +#include +#include +#include +#include "openvino_wrapper_lib/slog.hpp" +#include "openvino_wrapper_lib/utils/common.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino_wrapper_lib/inferences/object_detection.hpp" +#include "openvino_wrapper_lib/models/object_detection_yolov5_model.hpp" + +using namespace cv; +using namespace dnn; + +// Validated Object Detection Network +Models::ObjectDetectionYolov5Model::ObjectDetectionYolov5Model(const std::string& label_loc, + const std::string& model_loc, int max_batch_size) + : ObjectDetectionModel(label_loc, model_loc, max_batch_size) +{ + setKeepInputShapeRatio(true); +} + +bool Models::ObjectDetectionYolov5Model::updateLayerProperty(std::shared_ptr& model) +{ + Models::BaseModel::updateLayerProperty(model); + + ov::preprocess::PrePostProcessor ppp = ov::preprocess::PrePostProcessor(model); + + // preprocess image inputs + ov::preprocess::InputInfo& input_info = ppp.input(getInputInfo("input0")); + ov::Layout tensor_layout = ov::Layout("NHWC"); + + if (model->input(0).get_partial_shape().is_dynamic()) { + auto expected_size = getExpectedFrameSize(); + slog::info << "Model's input has dynamic shape, set to expected size: " << expected_size << slog::endl; + input_info.tensor().set_shape({ 1, expected_size.height, expected_size.width, 3 }); + } + + input_info.tensor() + .set_element_type(ov::element::u8) + .set_layout(tensor_layout) + .set_color_format(ov::preprocess::ColorFormat::BGR); + + input_info.preprocess() + .convert_element_type(ov::element::f32) + .convert_color(ov::preprocess::ColorFormat::RGB) + .scale({ 255., 255., 255. }); + ppp.input().model().set_layout("NCHW"); + + ppp.output().tensor().set_element_type(ov::element::f32); + + model = ppp.build(); + + ov::Shape input_shape = model->input(getInputInfo("input0")).get_shape(); + slog::debug << "image_tensor shape is:" << input_shape.size() << slog::endl; + OPENVINO_ASSERT(input_shape.size() == 4); + setInputHeight(input_shape[1]); + setInputWidth(input_shape[2]); + + auto output_info_map = model->outputs(); + ov::Shape output_dims = output_info_map[0].get_shape(); + if (output_dims[1] < output_dims[2]) { + slog::info << "Object-Size bigger than Proposal-Count, Outputs need Transform!" << slog::endl; + setTranspose(true); + setMaxProposalCount(static_cast(output_dims[2])); + setObjectSize(static_cast(output_dims[1])); + } else { + setTranspose(false); + setMaxProposalCount(static_cast(output_dims[1])); + setObjectSize(static_cast(output_dims[2])); + } + printAttribute(); + slog::info << "This model is Yolo-like, Layer Property updated!" << slog::endl; + return true; +} + +const std::string Models::ObjectDetectionYolov5Model::getModelCategory() const +{ + return "Object Detection Yolo v5"; +} + +bool Models::ObjectDetectionYolov5Model::enqueue(const std::shared_ptr& engine, const cv::Mat& frame, + const cv::Rect& input_frame_loc) +{ + setFrameSize(frame.cols, frame.rows); + + if (!matToBlob(frame, input_frame_loc, 1, 0, engine)) { + return false; + } + return true; +} + +bool Models::ObjectDetectionYolov5Model::fetchResults(const std::shared_ptr& engine, + std::vector& results, + const float& confidence_thresh, const bool& enable_roi_constraint) +{ + const float NMS_THRESHOLD = 0.45; // remove overlapping bounding boxes + + ov::InferRequest request = engine->getRequest(); + std::string output = getOutputName(); + const ov::Tensor& output_tensor = request.get_output_tensor(); + ov::Shape output_shape = output_tensor.get_shape(); + auto* detections = output_tensor.data(); + int rows = output_shape.at(1); + int dimentions = output_shape.at(2); + Mat output_buffer(output_shape[1], output_shape[2], CV_32F, detections); + // Check if transpose is needed + if (output_shape.at(2) > output_shape.at(1) && output_shape.at(2) > 300) { // 300 is just a random number(bigger than + // the number of classes) + transpose(output_buffer, output_buffer); //[8400,84] for yolov8 + detections = (float*)output_buffer.data; + rows = output_shape.at(2); + dimentions = output_shape.at(1); + } + // slog::debug << "AFTER calibration: rows->" << rows << ", dimentions->" << dimentions << slog::endl; + + std::vector boxes; + std::vector class_ids; + std::vector confidences; + std::vector& labels = getLabels(); + + for (size_t i = 0; i < rows; i++) { + float* detection = &detections[i * dimentions]; + if (hasConfidenceOutput()) { + float confidence = detection[4]; + if (confidence < confidence_thresh) + continue; + } + + const int classes_scores_start_pos = hasConfidenceOutput() ? 5 : 4; + float* classes_scores = &detection[classes_scores_start_pos]; + int col = static_cast(dimentions - classes_scores_start_pos); + + cv::Mat scores(1, col, CV_32FC1, classes_scores); + cv::Point class_id; + double max_class_score; + cv::minMaxLoc(scores, nullptr, &max_class_score, nullptr, &class_id); + + if (max_class_score > confidence_thresh) { + confidences.emplace_back(max_class_score); + class_ids.emplace_back(class_id.x); + + float x = detection[0]; + float y = detection[1]; + float w = detection[2]; + float h = detection[3]; + auto x_min = x - (w / 2); + auto y_min = y - (h / 2); + + boxes.emplace_back(x_min, y_min, w, h); + } + } + + std::vector nms_result; + cv::dnn::NMSBoxes(boxes, confidences, confidence_thresh, NMS_THRESHOLD, nms_result); + for (int idx : nms_result) { + double rx = getFrameResizeRatioWidth(); + double ry = getFrameResizeRatioHeight(); + int vx = int(rx * boxes[idx].x); + double vy = int(ry * boxes[idx].y); + double vw = int(rx * boxes[idx].width); + double vh = int(ry * boxes[idx].height); + cv::Rect rec(vx, vy, vw, vh); + Result result(rec); + result.setConfidence(confidences[idx]); + std::string label = class_ids[idx] < labels.size() ? labels[class_ids[idx]] : + std::string("label #") + std::to_string(class_ids[idx]); + result.setLabel(label); + results.push_back(result); + } + + return true; +} diff --git a/openvino_wrapper_lib/src/models/object_detection_yolov8_model.cpp b/openvino_wrapper_lib/src/models/object_detection_yolov8_model.cpp new file mode 100644 index 00000000..9460b502 --- /dev/null +++ b/openvino_wrapper_lib/src/models/object_detection_yolov8_model.cpp @@ -0,0 +1,24 @@ +// Copyright (c) 2023 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "openvino_wrapper_lib/models/object_detection_yolov8_model.hpp" + +Models::ObjectDetectionYolov8Model::ObjectDetectionYolov8Model(const std::string& label_loc, + const std::string& model_loc, int max_batch_size) + : ObjectDetectionYolov5Model(label_loc, model_loc, max_batch_size) +{ + // setKeepInputShapeRatio(true); + setHasConfidenceOutput(false); + setExpectedFrameSize({ 640, 640 }); +} diff --git a/openvino_wrapper_lib/src/models/object_segmentation_instance_maskrcnn_model.cpp b/openvino_wrapper_lib/src/models/object_segmentation_instance_maskrcnn_model.cpp new file mode 100644 index 00000000..c7fd80c5 --- /dev/null +++ b/openvino_wrapper_lib/src/models/object_segmentation_instance_maskrcnn_model.cpp @@ -0,0 +1,224 @@ +// Copyright (c) 2023 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a source code file with declaration of ObjectSegmentationInstanceMaskrcnnModel class + * It is a child of class ObjectSegmentationInstanceModel. + */ +#include +#include +#include +#include "openvino_wrapper_lib/inferences/object_segmentation_instance.hpp" +#include "openvino_wrapper_lib/utils/common.hpp" +#include "openvino_wrapper_lib/models/object_segmentation_instance_maskrcnn_model.hpp" +#include "openvino_wrapper_lib/slog.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" + +// Validated Object Segmentation Network +Models::ObjectSegmentationInstanceMaskrcnnModel::ObjectSegmentationInstanceMaskrcnnModel(const std::string& label_loc, + const std::string& model_loc, + int max_batch_size) + : ObjectSegmentationInstanceModel(label_loc, model_loc, max_batch_size) +{ + setHasConfidenceOutput(true); + setKeepInputShapeRatio(true); + setCountOfInputs(2); + setCountOfOutputs(2); + setExpectedFrameSize({ 640, 360 }); +} + +bool Models::ObjectSegmentationInstanceMaskrcnnModel::updateLayerProperty(std::shared_ptr& model) +{ + Models::BaseModel::updateLayerProperty(model); + + slog::debug << "in Models' PrePostProcessor:" << slog::endl; + ov::preprocess::PrePostProcessor ppp = ov::preprocess::PrePostProcessor(model); + slog::debug << "Model's input size=" << model->inputs().size() << slog::endl; + // 1. preprocess image inputs + for (int i = 0; i < getCountOfInputs(); i++) { + std::string name{ "input" }; + name += std::to_string(i); + slog::debug << "Preprocessing Input: [" << name << "->" << getInputInfo(name) << slog::endl; + auto& input_info = ppp.input(getInputInfo(name)); + ov::Layout tensor_layout = ov::Layout("NHWC"); + auto input_shape = model->input(getInputInfo(name)).get_partial_shape(); + + if (input_shape.size() == 4) { // first input contains images + slog::debug << "handling Input[image_tensor]..." << slog::endl; + input_info.tensor().set_element_type(ov::element::u8).set_layout(tensor_layout); + // addInputInfo(ModelAttribute::DefaultInputName, name); + // retagInputByValue(getInputInfo(name), "image_tensor"); + + if (input_shape.is_dynamic()) { + auto expected_size = getExpectedFrameSize(); + slog::info << "Model's input has dynamic shape, fix it to " << expected_size << slog::endl; + input_info.tensor().set_shape({ 1, expected_size.height, expected_size.width, 3 }); + } + } else if (input_shape.size() == 2) { // second input contains image info + slog::debug << "handling Input[image_info]..." << slog::endl; + input_info.tensor().set_element_type(ov::element::f32); + // addInputInfo("input2", info_name_); + // retagInputByValue(getInputInfo(name), "image_info"); + } else { + throw std::logic_error("Unsupported input shape with size = " + std::to_string(input_shape.size())); + } + } + + // ppp.input(0).model().set_layout("NCHW"); + + model = ppp.build(); + ppp = ov::preprocess::PrePostProcessor(model); + + ov::Shape input0_shape = model->input(getInputName("input0")).get_shape(); + slog::debug << "image_tensor shape is:" << input0_shape.size() << slog::endl; + OPENVINO_ASSERT(input0_shape.size() == 4); + setInputHeight(input0_shape[1]); + setInputWidth(input0_shape[2]); + + // 2. Preprocess Outputs + auto check_output_and_rename = [&](const std::string& output) { + auto output_info = model->output(output); + auto shape = output_info.get_partial_shape(); + slog::info << "Output shape for [" << output << "] is: " << shape << slog::endl; + if (shape.size() == 4) { + slog::info << "find output tensor - [masks]" << slog::endl; + retagOutputByValue(output, "masks"); + } else if (shape.size() == 2) { + slog::info << "find output tensor - [detection]" << slog::endl; + retagOutputByValue(output, "detection"); + } else { + throw std::logic_error("The shape ofoutput tensers are wrong, must be 4 or 3!"); + } + }; + check_output_and_rename(getOutputName("output0")); + check_output_and_rename(getOutputName("output1")); + + ov::preprocess::OutputInfo& output_info = ppp.output(getOutputName("masks")); + output_info.tensor().set_element_type(ov::element::f32); + + model = ppp.build(); + + if (model->is_dynamic()) { + slog::warn << "Model is still dynamic !!!!" << slog::endl; + } else { + auto output_info_map = model->outputs(); + ov::Shape output_dims = output_info_map[0].get_shape(); + if (output_dims[1] < output_dims[2]) { + slog::info << "Object-Size bigger than Proposal-Count, Outputs need Transform!" << slog::endl; + setTranspose(true); + setMaxProposalCount(static_cast(output_dims[2])); + setObjectSize(static_cast(output_dims[1])); + } else { + setTranspose(false); + setMaxProposalCount(static_cast(output_dims[1])); + setObjectSize(static_cast(output_dims[2])); + } + } + + printAttribute(); + slog::info << "Layer Property updated!" << slog::endl; + return true; +} + +bool Models::ObjectSegmentationInstanceMaskrcnnModel::fetchResults( + const std::shared_ptr& engine, + std::vector& results, const float& confidence_thresh, + const bool& enable_roi_constraint) +{ + ov::InferRequest infer_request = engine->getRequest(); + slog::debug << "Analyzing Detection results..." << slog::endl; + std::string detection_output = getOutputName("detection"); + std::string mask_output = getOutputName("masks"); + slog::debug << "Detection_output=" << detection_output << ", Mask_output=" << mask_output << slog::endl; + + // get detection data + ov::Tensor do_tensor = infer_request.get_tensor(detection_output); + const auto do_data = do_tensor.data(); + ov::Shape do_shape = do_tensor.get_shape(); + slog::debug << "Detection Blob getDims = " << do_shape.size() << "[Should be 2]" << slog::endl; + // get mask data + ov::Tensor mask_tensor = infer_request.get_tensor(mask_output); + const auto mask_data = mask_tensor.data(); + ov::Shape mask_shape = mask_tensor.get_shape(); + + // determine models + size_t box_description_size = do_shape.back(); + OPENVINO_ASSERT(mask_shape.size() == 4); + size_t box_num = mask_shape[0]; + size_t C = mask_shape[1]; + size_t H = mask_shape[2]; + size_t W = mask_shape[3]; + size_t box_stride = W * H * C; + slog::debug << "box_description is:" << box_description_size << slog::endl; + slog::debug << "box_num is:" << box_num << slog::endl; + slog::debug << "C is:" << C << slog::endl; + slog::debug << "H is:" << H << slog::endl; + slog::debug << "W is:" << W << slog::endl; + + for (size_t box = 0; box < box_num; ++box) { + // box description: batch, label, prob, x1, y1, x2, y2 + float* box_info = do_data + box * box_description_size; + auto batch = static_cast(box_info[0]); + slog::debug << "batch =" << batch << slog::endl; + if (batch < 0) { + slog::warn << "Batch size should be greater than 0. [batch=" << batch << "]." << slog::endl; + break; + } + float prob = box_info[2]; + const double rx = getFrameResizeRatioWidth(); + const double ry = getFrameResizeRatioHeight(); + // slog::debug << "FrameResizeRatio W:" < confidence_thresh) { + float x1 = std::min(std::max(0.0f, box_info[3] * iW), static_cast(iW)) * rx; + float y1 = std::min(std::max(0.0f, box_info[4] * iH), static_cast(iH)) * ry; + float x2 = std::min(std::max(0.0f, box_info[5] * iW), static_cast(iW)) * rx; + float y2 = std::min(std::max(0.0f, box_info[6] * iH), static_cast(iH)) * ry; + auto fSize = getFrameSize(); + if ((int)x2 >= fSize.width) { + x2 = fSize.width - 2; + } + if ((int)y2 >= fSize.height) { + x2 = fSize.height - 2; + } + int box_width = static_cast(x2 - x1); + int box_height = static_cast(y2 - y1); + slog::debug << "Box[" << box_width << "x" << box_height << "]" << slog::endl; + if (box_width <= 0 || box_height <= 0) + break; + int class_id = static_cast(box_info[1] + 1e-6f); + float* mask_arr = mask_data + box_stride * box + H * W * (class_id - 1); + cv::Mat mask_mat(H, W, CV_32FC1, mask_arr); + cv::Rect roi = cv::Rect(static_cast(x1), static_cast(y1), box_width, box_height) + /*& cv::Rect({0, 0}, getFrameSize()-cv::Size{2, 2})*/; + slog::info << "Detected class " << class_id << " with probability " << prob << " from batch " << batch << ": " + << roi << slog::endl; + cv::Mat resized_mask_mat(box_height, box_width, CV_32FC1); + cv::resize(mask_mat, resized_mask_mat, cv::Size(box_width, box_height)); + Result result(roi); + result.setConfidence(prob); + std::vector& labels = getLabels(); + std::string label = + class_id < labels.size() ? labels[class_id] : std::string("label #") + std::to_string(class_id); + result.setLabel(label); + result.setMask(resized_mask_mat); + slog::debug << "adding one segmentation Box ..." << slog::endl; + results.emplace_back(result); + } + } + + return true; +} diff --git a/openvino_wrapper_lib/src/models/object_segmentation_instance_model.cpp b/openvino_wrapper_lib/src/models/object_segmentation_instance_model.cpp new file mode 100644 index 00000000..1362705e --- /dev/null +++ b/openvino_wrapper_lib/src/models/object_segmentation_instance_model.cpp @@ -0,0 +1,317 @@ +// Copyright (c) 2023 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a header file with declaration of ObjectSegmentationInstanceModel class + * By default yolov8 segementation models are supported. + */ +#include +#include +#include +#include "openvino_wrapper_lib/inferences/object_segmentation_instance.hpp" +#include "openvino_wrapper_lib/utils/common.hpp" +#include "openvino_wrapper_lib/models/object_segmentation_instance_model.hpp" +#include "openvino_wrapper_lib/slog.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" + +// Validated Object Segmentation Network +Models::ObjectSegmentationInstanceModel::ObjectSegmentationInstanceModel(const std::string& label_loc, + const std::string& model_loc, + int max_batch_size) + : BaseModel(label_loc, model_loc, max_batch_size) +{ + setDefaultConfig(); +} + +Models::ObjectSegmentationInstanceModel::ObjectSegmentationInstanceModel( + const Params::ParamManager::InferenceRawData& config) + : BaseModel(config) +{ + setDefaultConfig(); +} + +void Models::ObjectSegmentationInstanceModel::setDefaultConfig() +{ + setHasConfidenceOutput(false); + setKeepInputShapeRatio(true); + setCountOfInputs(1); + setCountOfOutputs(2); + setExpectedFrameSize({ 640, 640 }); +} + +bool Models::ObjectSegmentationInstanceModel::enqueue(const std::shared_ptr& engine, + const cv::Mat& frame, const cv::Rect& input_frame_loc) +{ + if (engine == nullptr) { + slog::err << "A frame is trying to be enqueued in a NULL Engine." << slog::endl; + return false; + } + + setFrameSize(frame.cols, frame.rows); + + for (const auto& inputInfoItem : inputs_info_) { + auto dims = inputInfoItem.get_partial_shape(); + slog::debug << "input tensor shape is:" << dims.size() << slog::endl; + + if (dims.size() == 4) { + matToBlob(frame, input_frame_loc, 1.0, 0, engine); + } + + // Fill second input tensor with image info + if (dims.size() == 2) { + ov::Tensor in_tensor = engine->getRequest().get_tensor(inputInfoItem); + auto data = in_tensor.data(); + data[0] = static_cast(frame.rows); // height + data[1] = static_cast(frame.cols); // width + data[2] = 1; + } + } + + return true; +} + +const std::string Models::ObjectSegmentationInstanceModel::getModelCategory() const +{ + return "Object Segmentation - Yolo-Like"; +} + +bool Models::ObjectSegmentationInstanceModel::updateLayerProperty(std::shared_ptr& model) +{ + Models::BaseModel::updateLayerProperty(model); + + ov::preprocess::PrePostProcessor ppp = ov::preprocess::PrePostProcessor(model); + + // 1. preprocess image inputs + ov::preprocess::InputInfo& input_info = ppp.input(getInputInfo("input0")); + ov::Layout tensor_layout = ov::Layout("NHWC"); + + if (model->input(0).get_partial_shape().is_dynamic()) { + auto expected_size = getExpectedFrameSize(); + slog::info << "Model's input has dynamic shape, fix it to " << expected_size << slog::endl; + input_info.tensor().set_shape({ 1, expected_size.height, expected_size.width, 3 }); + } + + input_info.tensor() + .set_element_type(ov::element::u8) + .set_layout(tensor_layout) + .set_color_format(ov::preprocess::ColorFormat::BGR); + + input_info.preprocess() + .convert_element_type(ov::element::f32) + .convert_color(ov::preprocess::ColorFormat::RGB) + .scale({ 255., 255., 255. }); + ppp.input().model().set_layout("NCHW"); + + model = ppp.build(); + ppp = ov::preprocess::PrePostProcessor(model); + + ov::Shape input_shape = model->input(getInputInfo("input0")).get_shape(); + slog::debug << "image_tensor shape is:" << input_shape.size() << slog::endl; + OPENVINO_ASSERT(input_shape.size() == 4); + setInputHeight(input_shape[1]); + setInputWidth(input_shape[2]); + + // 2. Preprocess Outputs + auto check_output_and_rename = [&](const std::string& output) { + auto output_info = model->output(output); + auto shape = output_info.get_partial_shape(); + slog::info << "Output shape for [" << output << "] is: " << shape << slog::endl; + if (shape.size() == 4) { + slog::info << "find output tensor - [masks]" << slog::endl; + retagOutputByValue(output, "masks"); + } else if (shape.size() == 3) { + slog::info << "find output tensor - [detection]" << slog::endl; + retagOutputByValue(output, "detection"); + } else { + throw std::logic_error("The shape ofoutput tensers are wrong, must be 4 or 3!"); + } + }; + check_output_and_rename(getOutputName("output0")); + check_output_and_rename(getOutputName("output1")); + + ov::preprocess::OutputInfo& output_info = ppp.output(getOutputName("masks")); + output_info.tensor().set_element_type(ov::element::f32); + + model = ppp.build(); + + if (model->is_dynamic()) { + slog::warn << "Model is still dynamic !!!!" << slog::endl; + } else { + auto output_info_map = model->outputs(); + ov::Shape output_dims = output_info_map[0].get_shape(); + if (output_dims[1] < output_dims[2]) { + slog::info << "Object-Size bigger than Proposal-Count, Outputs need Transform!" << slog::endl; + setTranspose(true); + setMaxProposalCount(static_cast(output_dims[2])); + setObjectSize(static_cast(output_dims[1])); + } else { + setTranspose(false); + setMaxProposalCount(static_cast(output_dims[1])); + setObjectSize(static_cast(output_dims[2])); + } + } + + printAttribute(); + slog::info << "Layer Property updated!" << slog::endl; + return true; +} + +bool Models::ObjectSegmentationInstanceModel::fetchResults( + const std::shared_ptr& engine, + std::vector& results, const float& confidence_thresh, + const bool& enable_roi_constraint) +{ + const float NMS_THRESHOLD = config_.nms_threshold; // threshold for removing overlapping bounding boxes + slog::debug << "NMS_THRESHOLD=" << NMS_THRESHOLD << slog::endl; + + ov::InferRequest request = engine->getRequest(); + std::string det_output = getOutputName("detection"); + const ov::Tensor det_output_tensor = request.get_tensor(det_output); + ov::Shape det_output_shape = det_output_tensor.get_shape(); + auto* detections = det_output_tensor.data(); + int rows = det_output_shape.at(1); + int dimentions = det_output_shape.at(2); + cv::Mat output_buffer(det_output_shape[1], det_output_shape[2], CV_32F, detections); + // Check if transpose is needed + // if ( needTranspose()){ //DON'T use func needTranspose(), it is not correctly set when calling updateLayerProperty() + if (det_output_shape.at(2) > det_output_shape.at(1) && + det_output_shape.at(2) > 300) { // 300 is just a random number(bigger than the number of classes) + cv::transpose(output_buffer, output_buffer); //[8400,84+32] for yolov8 seg + detections = (float*)output_buffer.data; + rows = det_output_shape.at(2); + dimentions = det_output_shape.at(1); + } + slog::debug << "AFTER calibration: rows->" << rows << ", dimentions->" << dimentions << slog::endl; + + std::vector boxes; + std::vector mask_confs; + std::vector class_ids; + std::vector confidences; + std::vector& labels = getLabels(); + + for (int i = 0; i < rows; i++) { + // float *detection = &detections[i * dimentions]; + if (hasConfidenceOutput()) { + float confidence = output_buffer.at(int(i), 4); + if (confidence < confidence_thresh) + continue; + } + + const int classes_scores_start_pos = hasConfidenceOutput() ? 5 : 4; + cv::Mat classes_scores = output_buffer.row(i).colRange(classes_scores_start_pos, dimentions - 32); // 4, 84 + + cv::Point class_id; + double max_class_score; + cv::minMaxLoc(classes_scores, nullptr, &max_class_score, nullptr, &class_id); + + if (max_class_score > confidence_thresh) { + confidences.emplace_back(max_class_score); + class_ids.emplace_back(class_id.x); + + float x = output_buffer.at(i, 0); // detection[0]; + float y = output_buffer.at(i, 1); // detection[1]; + float w = output_buffer.at(i, 2); // detection[2]; + float h = output_buffer.at(i, 3); // detection[3]; + auto x_min = x - (w / 2); + auto y_min = y - (h / 2); + + boxes.emplace_back(x_min, y_min, w, h); + cv::Mat mask_conf = output_buffer.row(i).colRange(dimentions - 32, dimentions); // 84, 116 + mask_confs.emplace_back(mask_conf); + } + } + + std::vector nms_result; + cv::dnn::NMSBoxes(boxes, confidences, confidence_thresh, NMS_THRESHOLD, nms_result); + + const ov::Tensor mask_output_tensor = request.get_tensor(getOutputName("masks")); + ov::Shape mask_output_shape = mask_output_tensor.get_shape(); + // const ov::Layout mask_layout {"NCHW"}; //must be "NCHW"? + const auto MASK_CHANNEL = mask_output_shape[1]; + const auto MASK_HEIGHT = mask_output_shape[2]; // mask_output_shape[ov::layout::height_idx(mask_layout)]; + const auto MASK_WIDTH = mask_output_shape[3]; // mask_output_shape[ov::layout::width_idx(mask_layout)]; + slog::debug << "mask_output_shape: " << mask_output_shape << ",MASK_HEIGHT:" << MASK_HEIGHT + << ", MASK_WIDTH:" << MASK_WIDTH << slog::endl; + // cv::Mat proto(32, 25600, CV_32F, mask_output_tensor.data()); //[32,25600] + cv::Mat proto(MASK_CHANNEL, MASK_HEIGHT * MASK_WIDTH, CV_32F, mask_output_tensor.data()); //[32,25600] + + for (int idx : nms_result) { + double rx = getFrameResizeRatioWidth(); + double ry = getFrameResizeRatioHeight(); + slog::debug << "Detection-Ratio (Input Image to Input Tensor): " << rx << "x" << ry << slog::endl; + + // Bounding-Box in Input Tensor Size + int vx = std::max(0, int(boxes[idx].x)); + int vy = std::max(0, int(boxes[idx].y)); + int vw = std::min(std::max(0, int(boxes[idx].width)), getInputWidth() - vx - 1); + int vh = std::min(std::max(0, int(boxes[idx].height)), getInputHeight() - vy - 1); + + cv::Rect vrec(vx, vy, vw, vh); + slog::debug << "Detection Rectangle in Input Tensor Size: " << vrec << slog::endl; + const int det_bb_x = vx * rx; + const int det_bb_y = vy * ry; + const auto frame_size = getFrameSize(); + int det_bb_w = vw * rx; + int det_bb_h = vh * ry; + if (det_bb_w + det_bb_x >= frame_size.width) { + det_bb_w = std::max(0, frame_size.width - det_bb_x - 2); + } + if (det_bb_h + det_bb_y >= frame_size.height) { + det_bb_h = std::max(0, frame_size.height - det_bb_y - 2); + } + cv::Rect det_bb(det_bb_x, det_bb_y, det_bb_w, det_bb_h); + slog::debug << "Detection Rectangle in Input Image Size: " << det_bb << slog::endl; + Result result(det_bb); + result.setConfidence(confidences[idx]); + std::string label = class_ids[idx] < labels.size() ? labels[class_ids[idx]] : + std::string("label #") + std::to_string(class_ids[idx]); + result.setLabel(label); + + // Mask data operation + auto sigmoid = [](float a) { return 1. / (1. + exp(-a)); }; + cv::Mat m = mask_confs[idx] * proto; + for (int col = 0; col < m.cols; col++) { + m.at(0, col) = sigmoid(m.at(0, col)); + } + cv::Mat reshaped_m = m.reshape(1, MASK_HEIGHT); // 1x25600-->160x160, mask_output_shape:NCHW + + double mask_rx = static_cast(MASK_WIDTH) / getInputWidth(); + double mask_ry = static_cast(MASK_HEIGHT) / getInputHeight(); + slog::debug << "Mask-Ratio (Mask Tensor to Input Tensor): " << mask_rx << "x" << mask_ry << slog::endl; + int mask_x = int(mask_rx * vx); + int mask_y = int(mask_ry * vy); + int mask_w = std::ceil(mask_rx * vw); // ensuring mask_w > 0 by std::ceil (rather than int()) + int mask_h = std::ceil(mask_ry * vh); // ensuring mask_h > 0 by std::ceil (rather than int()) + + if (mask_w + mask_x >= MASK_WIDTH) { + mask_w = MASK_WIDTH - mask_x - 1; + } + if (mask_h + mask_y >= MASK_HEIGHT) { + mask_h = MASK_HEIGHT - mask_y - 1; + } + if (mask_w <= 0 || mask_h <= 0) { + break; + } + cv::Rect roi{ mask_x, mask_y, mask_w, mask_h }; + slog::debug << "Mask ROI:" << roi << slog::endl; + cv::Mat roi_mask = reshaped_m(roi); + cv::Mat resized_mask; + cv::resize(roi_mask, resized_mask, cv::Size(det_bb_w, det_bb_h)); + result.setMask(resized_mask); + + results.push_back(result); + } + + return true; +} diff --git a/openvino_wrapper_lib/src/models/object_segmentation_maskrcnn_model.cpp b/openvino_wrapper_lib/src/models/object_segmentation_maskrcnn_model.cpp new file mode 100644 index 00000000..4790886d --- /dev/null +++ b/openvino_wrapper_lib/src/models/object_segmentation_maskrcnn_model.cpp @@ -0,0 +1,221 @@ +// Copyright (c) 2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a header file with declaration of ObjectSegmentationModel class + * @file object_segmentation_model.cpp + */ +#include +#include +#include +#include "openvino_wrapper_lib/models/object_segmentation_maskrcnn_model.hpp" +#include "openvino_wrapper_lib/slog.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" + +// Validated Object Segmentation Network +Models::ObjectSegmentationMaskrcnnModel::ObjectSegmentationMaskrcnnModel(const std::string& label_loc, + const std::string& model_loc, + int max_batch_size) + : BaseModel(label_loc, model_loc, max_batch_size) +{ +} + +bool Models::ObjectSegmentationMaskrcnnModel::enqueue(const std::shared_ptr& engine, + const cv::Mat& frame, const cv::Rect& input_frame_loc) +{ + if (engine == nullptr) { + slog::err << "A frame is trying to be enqueued in a NULL Engine." << slog::endl; + return false; + } + + for (const auto& inputInfoItem : inputs_info_) { + // Fill first input tensor with images. First b channel, then g and r channels + auto dims = inputInfoItem.get_shape(); + slog::debug << "input tensor shape is:" << dims.size() << slog::endl; + + if (dims.size() == 4) { + matToBlob(frame, input_frame_loc, 1.0, 0, engine); + } + + // Fill second input tensor with image info + if (dims.size() == 2) { + ov::Tensor in_tensor = engine->getRequest().get_tensor(inputInfoItem); + auto data = in_tensor.data(); + data[0] = static_cast(frame.rows); // height + data[1] = static_cast(frame.cols); // width + data[2] = 1; + } + } + + return true; +} + +bool Models::ObjectSegmentationMaskrcnnModel::matToBlob(const cv::Mat& orig_image, const cv::Rect&, float scale_factor, + int batch_index, const std::shared_ptr& engine) +{ + (void)scale_factor; + (void)batch_index; + + if (engine == nullptr) { + slog::err << "A frame is trying to be enqueued in a NULL Engine." << slog::endl; + return false; + } + + ov::InferRequest infer_request = engine->getRequest(); + ov::Tensor input_tensor = infer_request.get_tensor(getInputName()); + ov::Shape input_shape = input_tensor.get_shape(); + + OPENVINO_ASSERT(input_shape.size() == 4); + // For frozen graph model: layout= "NHWC" + const size_t width = input_shape[2]; + const size_t height = input_shape[1]; + const size_t channels = input_shape[3]; + + slog::debug << "width is:" << width << slog::endl; + slog::debug << "height is:" << height << slog::endl; + slog::debug << "channels is:" << channels << slog::endl; + slog::debug << "origin channels is:" << orig_image.channels() << slog::endl; + slog::debug << "input shape is:" << input_shape << slog::endl; + + if (static_cast(orig_image.channels()) != channels) { + throw std::runtime_error("The number of channels for net input and image must match"); + } + +#if 1 + // input_tensor = ov::Tensor(ov::element::u8, {1, height, width, channels}, resized_image.data); + // engine->getRequest().set_tensor(input_tensor_name_, input_tensor); + unsigned char* data = input_tensor.data(); + cv::Size size = { (int)width, (int)height }; + cv::Mat resized_image(size, CV_8UC3, data); + cv::resize(orig_image, resized_image, size); +#else + const auto input_data = input_tensor.data(); + cv::Mat resized_image(orig_image); + if (static_cast(width) != orig_image.size().width || static_cast(height) != orig_image.size().height) { + cv::resize(orig_image, resized_image, cv::Size(width, height)); + } + + int batchOffset = batch_index * width * height * channels; + if (channels == 1) { + for (size_t h = 0; h < height; h++) { + for (size_t w = 0; w < width; w++) { + input_data[batchOffset + h * width + w] = resized_image.at(h, w); + } + } + } else if (channels == 3) { + for (size_t c = 0; c < channels; c++) { + for (size_t h = 0; h < height; h++) { + for (size_t w = 0; w < width; w++) { + input_data[batchOffset + c * width * height + h * width + w] = resized_image.at(h, w)[c]; + } + } + } + } else { + throw std::runtime_error("Unsupported number of channels"); + } +#endif + + return true; +} + +const std::string Models::ObjectSegmentationMaskrcnnModel::getModelCategory() const +{ + return "Object Segmentation"; +} + +bool Models::ObjectSegmentationMaskrcnnModel::updateLayerProperty(std::shared_ptr& model) +{ + slog::info << "Checking INPUTS for Model" << getModelName() << slog::endl; + + // check input shape + inputs_info_ = model->inputs(); + slog::debug << "input size=" << inputs_info_.size() << slog::endl; + if (inputs_info_.size() != 2) { + slog::warn << "This inference sample should have have two inputs, but we got" << std::to_string(inputs_info_.size()) + << "inputs" << slog::endl; + return false; + } + + ov::preprocess::PrePostProcessor ppp = ov::preprocess::PrePostProcessor(model); + input_tensor_name_ = inputs_info_[0].get_any_name(); + auto info_name_ = inputs_info_[1].get_any_name(); + slog::debug << "input_tensor_name is:" << input_tensor_name_ << slog::endl; + slog::debug << "input_info_name is:" << info_name_ << slog::endl; + + // preprocess image inputs + ov::preprocess::InputInfo& input_info = ppp.input(input_tensor_name_); + ov::Layout tensor_layout = ov::Layout("NHWC"); + ov::Shape input_shape = model->input("image_tensor").get_shape(); + slog::debug << "image_tensor shape is:" << input_shape.size() << slog::endl; + + // preprocess image info inputs + ov::preprocess::InputInfo& image_info = ppp.input(info_name_); + ov::Layout info_layout = ov::Layout("NC"); + ov::Shape info_shape = model->input("image_info").get_shape(); + slog::debug << "image_info shape is:" << info_shape.size() << slog::endl; + + for (const auto& inputInfoItem : inputs_info_) { + if (input_shape.size() == 4) { // first input contains images + input_info.tensor().set_element_type(ov::element::u8).set_layout(tensor_layout); + addInputInfo(ModelAttribute::DefaultInputName, input_tensor_name_); + } else if (info_shape.size() == 2) { // second input contains image info + image_info.tensor().set_element_type(ov::element::f32); + addInputInfo("input2", info_name_); + } else { + throw std::logic_error("Unsupported input shape with size = " + std::to_string(input_shape.size())); + } + } + + std::string inputName = getInputName(); + slog::debug << "input name is:" << inputName << slog::endl; + OPENVINO_ASSERT(input_shape.size() == 4); + size_t netBatchSize = input_shape[0]; + size_t netInputHeight = input_shape[1]; + size_t netInputWidth = input_shape[2]; + slog::debug << "netBatchSize=" << netBatchSize << ", netInputHeight=" << netInputHeight + << ", netInputWidth=" << netInputWidth << slog::endl; + + // check output shape + outputs_info_ = model->outputs(); + slog::debug << "output size=" << outputs_info_.size() << slog::endl; + if (outputs_info_.size() != 2) { + slog::warn << "This inference sample should have have 2 outputs, but we got" << std::to_string(outputs_info_.size()) + << "outputs" << slog::endl; + return false; + } + + // preprocess outshape + output_tensor_name_ = outputs_info_[0].get_any_name(); + auto detection_name_ = outputs_info_[1].get_any_name(); + slog::debug << "output_tensor_name is:" << output_tensor_name_ << slog::endl; + slog::debug << "detection_name_is:" << detection_name_ << slog::endl; + + ov::preprocess::OutputInfo& output_info = ppp.output(output_tensor_name_); + ov::Shape mask_shape = model->output("masks").get_shape(); + slog::debug << "masks shape is:" << mask_shape.size() << slog::endl; + ov::Shape detection_shape = model->output("reshape_do_2d").get_shape(); + slog::debug << "detection shape is:" << detection_shape.size() << slog::endl; + output_info.tensor().set_element_type(ov::element::f32); + + model = ppp.build(); + + addOutputInfo("masks", output_tensor_name_); + slog::debug << "Mask_Output is set to " << output_tensor_name_ << slog::endl; + addOutputInfo("detection", detection_name_); + slog::debug << "Detection_Output is set to " << detection_name_ << slog::endl; + + printAttribute(); + slog::info << "This model is SSDNet-like, Layer Property updated!" << slog::endl; + return true; +} diff --git a/openvino_wrapper_lib/src/models/object_segmentation_model.cpp b/openvino_wrapper_lib/src/models/object_segmentation_model.cpp new file mode 100644 index 00000000..02ba172d --- /dev/null +++ b/openvino_wrapper_lib/src/models/object_segmentation_model.cpp @@ -0,0 +1,215 @@ +// Copyright (c) 2018-2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a header file with declaration of ObjectSegmentationModel class + * @file object_segmentation_model.cpp + */ +#include +#include +#include +#include "openvino_wrapper_lib/models/object_segmentation_model.hpp" +#include "openvino_wrapper_lib/slog.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +// Validated Object Segmentation Network +Models::ObjectSegmentationModel::ObjectSegmentationModel(const std::string& label_loc, const std::string& model_loc, + int max_batch_size) + : BaseModel(label_loc, model_loc, max_batch_size) +{ +} + +bool Models::ObjectSegmentationModel::enqueue(const std::shared_ptr& engine, const cv::Mat& frame, + const cv::Rect& input_frame_loc) +{ + if (engine == nullptr) { + slog::err << "A frame is trying to be enqueued in a NULL Engine." << slog::endl; + return false; + } + + for (const auto& inputInfoItem : inputs_info_) { + // Fill first input tensor with images. First b channel, then g and r channels + auto dims = inputInfoItem.get_shape(); + if (dims.size() == 4) { + matToBlob(frame, input_frame_loc, 1.0, 0, engine); + } + + // Fill second input tensor with image info + if (dims.size() == 2) { + ov::Tensor in_tensor = engine->getRequest().get_tensor(inputInfoItem); + auto data = in_tensor.data(); + data[0] = static_cast(frame.rows); // height + data[1] = static_cast(frame.cols); // width + data[2] = 1; + } + } + + return true; +} + +bool Models::ObjectSegmentationModel::matToBlob(const cv::Mat& orig_image, const cv::Rect&, float scale_factor, + int batch_index, const std::shared_ptr& engine) +{ + (void)scale_factor; + (void)batch_index; + + if (engine == nullptr) { + slog::err << "A frame is trying to be enqueued in a NULL Engine." << slog::endl; + return false; + } +#if 1 + const size_t width = getInputWidth(); + const size_t height = getInputHeight(); + const size_t channels = 3; + slog::debug << "width is:" << width << slog::endl; + slog::debug << "height is:" << height << slog::endl; + + if (orig_image.cols != width || orig_image.rows != height) { + cv::Size size = { (int)width, (int)height }; + cv::Mat resized_image(size, CV_8UC3); + cv::resize(orig_image, resized_image, size); + ov::Tensor input_tensor = ov::Tensor(ov::element::u8, { 1, height, width, channels }, resized_image.data); + engine->getRequest().set_tensor(input_tensor_name_, input_tensor); + } else { + ov::Tensor input_tensor = ov::Tensor(ov::element::u8, { 1, height, width, channels }, orig_image.data); + engine->getRequest().set_tensor(input_tensor_name_, input_tensor); + } +#else + ov::InferRequest infer_request = engine->getRequest(); + ov::Tensor input_tensor = infer_request.get_tensor(getInputName()); + ov::Shape input_shape = input_tensor.get_shape(); + + OPENVINO_ASSERT(input_shape.size() == 4); + // For frozen graph model: + const size_t width = input_shape[2]; + const size_t height = input_shape[1]; + const size_t channels = input_shape[3]; + + slog::debug << "width is:" << width << slog::endl; + slog::debug << "height is:" << height << slog::endl; + slog::debug << "channels is:" << channels << slog::endl; + slog::debug << "origin channels is:" << orig_image.channels() << slog::endl; + slog::debug << "input shape is:" << input_shape << slog::endl; + + if (static_cast(orig_image.channels()) != channels) { + throw std::runtime_error("The number of channels for net input and image must match"); + } + + unsigned char* data = input_tensor.data(); + cv::Size size = { (int)width, (int)height }; + cv::Mat resized_image(size, CV_8UC3, data); + cv::resize(orig_image, resized_image, size); +#endif + return true; +} + +const std::string Models::ObjectSegmentationModel::getModelCategory() const +{ + return "Object Segmentation"; +} + +bool Models::ObjectSegmentationModel::updateLayerProperty(std::shared_ptr& model) +{ + slog::info << "Checking INPUTS for Model" << getModelName() << slog::endl; + + inputs_info_ = model->inputs(); + slog::debug << "input size" << inputs_info_.size() << slog::endl; + if (inputs_info_.size() != 1) { + slog::warn << "This inference sample should have only one input, but we got" << std::to_string(inputs_info_.size()) + << "inputs" << slog::endl; + return false; + } + ov::preprocess::PrePostProcessor ppp = ov::preprocess::PrePostProcessor(model); + input_tensor_name_ = model->input().get_any_name(); + ov::preprocess::InputInfo& input_info = ppp.input(input_tensor_name_); + + ov::Layout tensor_layout = ov::Layout("NHWC"); + ov::Layout expect_layout = ov::Layout("NCHW"); + ov::Shape input_shape = model->input().get_shape(); + if (input_shape[1] == 3) { + expect_layout = ov::Layout("NCHW"); + setInputWidth(input_shape[3]); + setInputHeight(input_shape[2]); + } else if (input_shape[3] == 3) { + expect_layout = ov::Layout("NHWC"); + setInputWidth(input_shape[2]); + setInputHeight(input_shape[1]); + } else + slog::warn << "unexpect input shape " << input_shape << slog::endl; + + input_info.tensor().set_element_type(ov::element::u8).set_layout(tensor_layout).set_spatial_dynamic_shape(); + input_info.preprocess().convert_layout(expect_layout).resize(ov::preprocess::ResizeAlgorithm::RESIZE_LINEAR); + addInputInfo(ModelAttribute::DefaultInputName, input_tensor_name_); + + auto outputs_info = model->outputs(); + if (outputs_info.size() != 1) { + slog::warn << "This inference sample should have only one output, but we got" << std::to_string(outputs_info.size()) + << "outputs" << slog::endl; + return false; + } + + output_tensor_name_ = model->output().get_any_name(); + auto data = model->output(); + + ov::preprocess::OutputInfo& output_info = ppp.output(output_tensor_name_); + output_info.tensor().set_element_type(ov::element::f32); + model = ppp.build(); + std::vector& in_size_vector = input_shape; + slog::debug << "dimensional" << in_size_vector.size() << slog::endl; + if (in_size_vector.size() != 4) { + slog::warn << "3-channel 4-dimensional model's input is expected, but we got " + << std::to_string(in_size_vector.size()) << " dimensions." << slog::endl; + return false; + } + + auto& outSizeVector = data.get_shape(); + int outChannels, outHeight, outWidth; + slog::debug << "output size vector " << outSizeVector.size() << slog::endl; + ov::Layout outputLayout(""); + switch (outSizeVector.size()) { + case 3: + outputLayout = "CHW"; + outChannels = 1; + outHeight = static_cast(outSizeVector[ov::layout::height_idx(outputLayout)]); + outWidth = static_cast(outSizeVector[ov::layout::width_idx(outputLayout)]); + break; + case 4: + // outChannels = outSizeVector[1]; + // outHeight = outSizeVector[2]; + // outWidth = outSizeVector[3]; + outputLayout = "NCHW"; + outChannels = static_cast(outSizeVector[ov::layout::channels_idx(outputLayout)]); + outHeight = static_cast(outSizeVector[ov::layout::height_idx(outputLayout)]); + outWidth = static_cast(outSizeVector[ov::layout::width_idx(outputLayout)]); + break; + default: + throw std::runtime_error( + "Unexpected output blob shape. Only 4D and 3D output blobs are" + "supported."); + } + if (outHeight == 0 || outWidth == 0) { + slog::err << "output_height or output_width is not set, please check the MaskOutput Info " + << "is set correctly." << slog::endl; + return false; + } + + slog::debug << "output WIDTH " << outWidth << slog::endl; + slog::debug << "output HEIGHT " << outHeight << slog::endl; + slog::debug << "output CHANNELS " << outChannels << slog::endl; + slog::debug << "output NAME " << output_tensor_name_ << slog::endl; + addOutputInfo("detection", output_tensor_name_); + + printAttribute(); + slog::info << "This model is SSDNet-like, Layer Property updated!" << slog::endl; + return true; +} diff --git a/openvino_wrapper_lib/src/models/person_attribs_detection_model.cpp b/openvino_wrapper_lib/src/models/person_attribs_detection_model.cpp new file mode 100644 index 00000000..4fcae419 --- /dev/null +++ b/openvino_wrapper_lib/src/models/person_attribs_detection_model.cpp @@ -0,0 +1,62 @@ +// Copyright (c) 2018-2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a header file with declaration of PersonAttribsDetectionModel class + * @file person_attribs_detection_model.cpp + */ +#include +#include +#include "openvino_wrapper_lib/models/person_attribs_detection_model.hpp" +#include "openvino_wrapper_lib/slog.hpp" +// Validated Person Attributes Detection Network +Models::PersonAttribsDetectionModel::PersonAttribsDetectionModel(const std::string& label_loc, + const std::string& model_loc, int max_batch_size) + : BaseModel(label_loc, model_loc, max_batch_size) +{ +} + +bool Models::PersonAttribsDetectionModel::updateLayerProperty(std::shared_ptr& model) +{ + slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; + auto input_info_map = model->inputs(); + if (input_info_map.size() != 1) { + throw std::logic_error("Person Attribs topology should have only one input"); + } + ov::preprocess::PrePostProcessor ppp = ov::preprocess::PrePostProcessor(model); + input_tensor_name_ = model->input().get_any_name(); + ov::preprocess::InputInfo& input_info = ppp.input(input_tensor_name_); + const ov::Layout tensor_layout{ "NCHW" }; + input_info.tensor().set_element_type(ov::element::u8).set_layout(tensor_layout); + + slog::info << "Checking OUTPUTs for model " << getModelName() << slog::endl; + auto output_info_map = model->outputs(); + if (output_info_map.size() != 3) { + throw std::logic_error("Person Attribs Network expects networks having 3 output"); + } + + model = ppp.build(); + addInputInfo(ModelAttribute::DefaultInputName, input_tensor_name_); + addOutputInfo("attributes_output_", output_info_map[0].get_any_name()); + addOutputInfo("top_output_", output_info_map[1].get_any_name()); + addOutputInfo("bottom_output_", output_info_map[2].get_any_name()); + + printAttribute(); + return true; +} + +const std::string Models::PersonAttribsDetectionModel::getModelCategory() const +{ + return "Person Attributes Detection"; +} diff --git a/openvino_wrapper_lib/src/models/person_reidentification_model.cpp b/openvino_wrapper_lib/src/models/person_reidentification_model.cpp new file mode 100644 index 00000000..076b65db --- /dev/null +++ b/openvino_wrapper_lib/src/models/person_reidentification_model.cpp @@ -0,0 +1,51 @@ +// Copyright (c) 2018-2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a header file with declaration of PersonReidentificationModel class + * @file person_reidentification_model.cpp + */ +#include +#include "openvino_wrapper_lib/models/person_reidentification_model.hpp" +#include "openvino_wrapper_lib/slog.hpp" +// Validated Person Reidentification Network +Models::PersonReidentificationModel::PersonReidentificationModel(const std::string& label_loc, + const std::string& model_loc, int max_batch_size) + : BaseModel(label_loc, model_loc, max_batch_size) +{ +} + +bool Models::PersonReidentificationModel::updateLayerProperty(std::shared_ptr& model) +{ + slog::info << "Checking Inputs for Model" << getModelName() << slog::endl; + auto input_info_map = model->inputs(); + ov::preprocess::PrePostProcessor ppp = ov::preprocess::PrePostProcessor(model); + input_ = input_info_map[0].get_any_name(); + const ov::Layout input_tensor_layout{ "NCHW" }; + ppp.input(input_).tensor().set_element_type(ov::element::u8).set_layout(input_tensor_layout); + + // set output property + auto output_info_map = model->outputs(); + output_ = output_info_map[0].get_any_name(); + + model = ppp.build(); + ov::set_batch(model, getMaxBatchSize()); + + return true; +} + +const std::string Models::PersonReidentificationModel::getModelCategory() const +{ + return "Person Reidentification"; +} diff --git a/dynamic_vino_lib/src/models/vehicle_attribs_detection_model.cpp b/openvino_wrapper_lib/src/models/vehicle_attribs_detection_model.cpp similarity index 51% rename from dynamic_vino_lib/src/models/vehicle_attribs_detection_model.cpp rename to openvino_wrapper_lib/src/models/vehicle_attribs_detection_model.cpp index 0637f3f6..39b48ab4 100644 --- a/dynamic_vino_lib/src/models/vehicle_attribs_detection_model.cpp +++ b/openvino_wrapper_lib/src/models/vehicle_attribs_detection_model.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,44 +17,40 @@ * @file vehicle_attribs_detection_model.cpp */ #include -#include "dynamic_vino_lib/models/vehicle_attribs_detection_model.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/models/vehicle_attribs_detection_model.hpp" +#include "openvino_wrapper_lib/slog.hpp" // Validated Vehicle Attributes Detection Network -Models::VehicleAttribsDetectionModel::VehicleAttribsDetectionModel( - const std::string & label_loc, const std::string & model_loc, int max_batch_size) -: BaseModel(label_loc, model_loc, max_batch_size) {} +Models::VehicleAttribsDetectionModel::VehicleAttribsDetectionModel(const std::string& label_loc, + const std::string& model_loc, int max_batch_size) + : BaseModel(label_loc, model_loc, max_batch_size) +{ +} -bool Models::VehicleAttribsDetectionModel::updateLayerProperty( - InferenceEngine::CNNNetwork& net_reader) +bool Models::VehicleAttribsDetectionModel::updateLayerProperty(std::shared_ptr& model) { slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; - // set input property - InferenceEngine::InputsDataMap input_info_map( - net_reader.getInputsInfo()); + auto input_info_map = model->inputs(); if (input_info_map.size() != 1) { throw std::logic_error("Vehicle Attribs topology should have only one input"); } - InferenceEngine::OutputsDataMap output_info_map( - net_reader.getOutputsInfo()); + + auto output_info_map = model->outputs(); if (output_info_map.size() != 2) { throw std::logic_error("Vehicle Attribs Network expects networks having two outputs"); } - InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; - input_info->setPrecision(InferenceEngine::Precision::U8); - input_info->getInputData()->setLayout(InferenceEngine::Layout::NCHW); - + ov::preprocess::PrePostProcessor ppp = ov::preprocess::PrePostProcessor(model); + input_tensor_name_ = model->input().get_any_name(); + ov::preprocess::InputInfo& input_info = ppp.input(input_tensor_name_); + const ov::Layout tensor_layout{ "NCHW" }; + input_info.tensor().set_element_type(ov::element::u8).set_layout(tensor_layout); + model = ppp.build(); + + addInputInfo(ModelAttribute::DefaultInputName, input_tensor_name_); + // set input and output layer name - input_ = input_info_map.begin()->first; - auto output_iter = output_info_map.begin(); - // color_output_ = (output_iter++)->second->name; - // type_output_ = (output_iter++)->second->name; - InferenceEngine::DataPtr color_output_ptr = (output_iter++)->second; - InferenceEngine::DataPtr type_output_ptr = (output_iter++)->second; - - addOutputInfo("color_output_", color_output_ptr->getName()); - //output_gender_ = gender_output_ptr->name; - addOutputInfo("type_output_", type_output_ptr->getName()); + addOutputInfo("color_output_", output_info_map[1].get_any_name()); + addOutputInfo("type_output_", output_info_map[0].get_any_name()); printAttribute(); return true; @@ -64,4 +60,3 @@ const std::string Models::VehicleAttribsDetectionModel::getModelCategory() const { return "Vehicle Attributes Detection"; } - diff --git a/dynamic_vino_lib/src/outputs/base_output.cpp b/openvino_wrapper_lib/src/outputs/base_output.cpp similarity index 72% rename from dynamic_vino_lib/src/outputs/base_output.cpp rename to openvino_wrapper_lib/src/outputs/base_output.cpp index 84081496..e085bf85 100644 --- a/dynamic_vino_lib/src/outputs/base_output.cpp +++ b/openvino_wrapper_lib/src/outputs/base_output.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,15 +12,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "dynamic_vino_lib/outputs/base_output.hpp" -#include "dynamic_vino_lib/pipeline.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" -void Outputs::BaseOutput::setPipeline(Pipeline * const pipeline) +void Outputs::BaseOutput::setPipeline(Pipeline* const pipeline) { pipeline_ = pipeline; } -Pipeline * Outputs::BaseOutput::getPipeline() const +Pipeline* Outputs::BaseOutput::getPipeline() const { return pipeline_; } diff --git a/dynamic_vino_lib/src/outputs/image_window_output.cpp b/openvino_wrapper_lib/src/outputs/image_window_output.cpp similarity index 63% rename from dynamic_vino_lib/src/outputs/image_window_output.cpp rename to openvino_wrapper_lib/src/outputs/image_window_output.cpp index 1653b6f6..a16d38f9 100644 --- a/dynamic_vino_lib/src/outputs/image_window_output.cpp +++ b/openvino_wrapper_lib/src/outputs/image_window_output.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -23,16 +23,16 @@ #include #include -#include "dynamic_vino_lib/outputs/image_window_output.hpp" -#include "dynamic_vino_lib/pipeline.hpp" +#include "openvino_wrapper_lib/outputs/image_window_output.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" -Outputs::ImageWindowOutput::ImageWindowOutput(const std::string & output_name, int focal_length) -: BaseOutput(output_name), focal_length_(focal_length) +Outputs::ImageWindowOutput::ImageWindowOutput(const std::string& output_name, int focal_length) + : BaseOutput(output_name), focal_length_(focal_length) { cv::namedWindow(output_name_, cv::WINDOW_AUTOSIZE); } -void Outputs::ImageWindowOutput::feedFrame(const cv::Mat & frame) +void Outputs::ImageWindowOutput::feedFrame(const cv::Mat& frame) { // frame_ = frame; frame_ = frame.clone(); @@ -48,8 +48,7 @@ void Outputs::ImageWindowOutput::feedFrame(const cv::Mat & frame) } } -unsigned Outputs::ImageWindowOutput::findOutput( - const cv::Rect & result_rect) +unsigned Outputs::ImageWindowOutput::findOutput(const cv::Rect& result_rect) { for (unsigned i = 0; i < outputs_.size(); i++) { if (outputs_[i].rect == result_rect) { @@ -63,8 +62,7 @@ unsigned Outputs::ImageWindowOutput::findOutput( return outputs_.size() - 1; } -void Outputs::ImageWindowOutput::accept( - const std::vector & results) +void Outputs::ImageWindowOutput::accept(const std::vector& results) { for (unsigned i = 0; i < results.size(); i++) { cv::Rect result_rect = results[i].getLocation(); @@ -74,20 +72,17 @@ void Outputs::ImageWindowOutput::accept( } } -void Outputs::ImageWindowOutput::accept( - const std::vector & results) +void Outputs::ImageWindowOutput::accept(const std::vector& results) { for (unsigned i = 0; i < results.size(); i++) { cv::Rect result_rect = results[i].getLocation(); unsigned target_index = findOutput(result_rect); outputs_[target_index].rect = result_rect; - outputs_[target_index].desc += - ("[" + results[i].getColor() + "," + results[i].getType() + "]"); + outputs_[target_index].desc += ("[" + results[i].getColor() + "," + results[i].getType() + "]"); } } -void Outputs::ImageWindowOutput::accept( - const std::vector & results) +void Outputs::ImageWindowOutput::accept(const std::vector& results) { for (unsigned i = 0; i < results.size(); i++) { cv::Rect result_rect = results[i].getLocation(); @@ -97,8 +92,7 @@ void Outputs::ImageWindowOutput::accept( } } -void Outputs::ImageWindowOutput::accept( - const std::vector & results) +void Outputs::ImageWindowOutput::accept(const std::vector& results) { for (unsigned i = 0; i < results.size(); i++) { cv::Rect result_rect = results[i].getLocation(); @@ -110,30 +104,27 @@ void Outputs::ImageWindowOutput::accept( } } -void Outputs::ImageWindowOutput::accept( - const std::vector & results) +void Outputs::ImageWindowOutput::accept(const std::vector& results) { for (unsigned i = 0; i < results.size(); i++) { cv::Rect result_rect = results[i].getLocation(); unsigned target_index = findOutput(result_rect); if (results[i].getMaleProbability() < 0.5) { outputs_[target_index].scalar = cv::Scalar(0, 0, 255); - } - else{ + } else { outputs_[target_index].scalar = cv::Scalar(0, 255, 0); } - outputs_[target_index].pa_top.x = results[i].getTopLocation().x*result_rect.width + result_rect.x; - outputs_[target_index].pa_top.y = results[i].getTopLocation().y*result_rect.height + result_rect.y; - outputs_[target_index].pa_bottom.x = results[i].getBottomLocation().x*result_rect.width + result_rect.x; - outputs_[target_index].pa_bottom.y = results[i].getBottomLocation().y*result_rect.height + result_rect.y; + outputs_[target_index].pa_top.x = results[i].getTopLocation().x * result_rect.width + result_rect.x; + outputs_[target_index].pa_top.y = results[i].getTopLocation().y * result_rect.height + result_rect.y; + outputs_[target_index].pa_bottom.x = results[i].getBottomLocation().x * result_rect.width + result_rect.x; + outputs_[target_index].pa_bottom.y = results[i].getBottomLocation().y * result_rect.height + result_rect.y; outputs_[target_index].rect = result_rect; outputs_[target_index].desc += "[" + results[i].getAttributes() + "]"; } } -void Outputs::ImageWindowOutput::accept( - const std::vector & results) +void Outputs::ImageWindowOutput::accept(const std::vector& results) { for (unsigned i = 0; i < results.size(); i++) { cv::Rect result_rect = results[i].getLocation(); @@ -143,46 +134,88 @@ void Outputs::ImageWindowOutput::accept( } } +void Outputs::ImageWindowOutput::mergeMask(const std::vector& results) +{ + const float alpha = 0.7f; + // const float MASK_THRESHOLD = 0.5; + // only for merged mask mat got from modles::fetchResults() + for (unsigned i = 0; i < results.size(); i++) { + cv::Rect location = results[i].getLocation(); + slog::debug << "Rect:" << location << slog::endl; + slog::debug << " Frame Size: " << frame_.size() << slog::endl; + cv::Mat mask = results[i].getMask(); + cv::resize(mask, mask, frame_.size()); + cv::addWeighted(mask, alpha, frame_, 1.0f - alpha, 0.0f, frame_); + } +} + +void Outputs::ImageWindowOutput::accept(const std::vector& results) +{ + for (unsigned i = 0; i < results.size(); i++) { + cv::Rect result_rect = results[i].getLocation(); + unsigned target_index = findOutput(result_rect); + + auto fd_conf = results[i].getConfidence(); + if (fd_conf > 0) { + outputs_[target_index].rect = result_rect; + std::ostringstream ostream; + ostream << "[" << std::fixed << std::setprecision(3) << fd_conf << "]"; + outputs_[target_index].desc += ostream.str(); + auto label = results[i].getLabel(); + outputs_[target_index].desc += "[" + label + "]"; + } + } + mergeMask(results); +} + +// TODO: Deprecated, will merge the impl into the func for instanceResult. void Outputs::ImageWindowOutput::mergeMask( - const std::vector & results) + const std::vector& results) { - /* std::map class_color; for (unsigned i = 0; i < results.size(); i++) { std::string class_label = results[i].getLabel(); if (class_color.find(class_label) == class_color.end()) { class_color[class_label] = class_color.size(); } - auto & color = colors_[class_color[class_label]]; + auto& color = colors_[class_color[class_label] % colors_.size()]; const float alpha = 0.7f; const float MASK_THRESHOLD = 0.5; cv::Rect location = results[i].getLocation(); cv::Mat roi_img = frame_(location); cv::Mat mask = results[i].getMask(); - cv::Mat colored_mask(location.height, location.width, frame_.type()); - - for (int h = 0; h < mask.size().height; ++h) { - for (int w = 0; w < mask.size().width; ++w) { - for (int ch = 0; ch < colored_mask.channels(); ++ch) { - colored_mask.at(h, w)[ch] = mask.at(h, w) > MASK_THRESHOLD ? - 255 * color[ch] : - roi_img.at(h, w)[ch]; - } - } + cv::Mat colored_mask(location.height, location.width, frame_.type(), cv::Scalar(color[2], color[1], color[0])); + roi_img.copyTo(colored_mask, mask <= MASK_THRESHOLD); + cv::addWeighted(colored_mask, alpha, roi_img, 1.0f - alpha, 0.0f, roi_img); + } +} + +void Outputs::ImageWindowOutput::mergeMask( + const std::vector& results) +{ + std::map class_color; + for (unsigned i = 0; i < results.size(); i++) { + std::string class_label = results[i].getLabel(); + if (class_color.find(class_label) == class_color.end()) { + class_color[class_label] = class_color.size(); } + auto& color = colors_[class_color[class_label] % colors_.size()]; + const float alpha = 0.7f; + const float MASK_THRESHOLD = 0.5; + + cv::Rect location = results[i].getLocation(); + cv::Mat roi_img = frame_(location); + cv::Mat mask = results[i].getMask(); + cv::Mat colored_mask(location.height, location.width, frame_.type(), cv::Scalar(color[2], color[1], color[0])); + roi_img.copyTo(colored_mask, mask <= MASK_THRESHOLD); cv::addWeighted(colored_mask, alpha, roi_img, 1.0f - alpha, 0.0f, roi_img); } - */ - const float alpha = 0.5f; - cv::Mat roi_img = frame_; - cv::Mat colored_mask = results[0].getMask(); - cv::resize(colored_mask,colored_mask,cv::Size(frame_.size().width,frame_.size().height)); - cv::addWeighted(colored_mask, alpha, roi_img, 1.0f - alpha, 0.0f, roi_img); } +// TODO: Deprecated, will merge the impl into the func for instanceResult. void Outputs::ImageWindowOutput::accept( - const std::vector & results) + const std::vector& results) { for (unsigned i = 0; i < results.size(); i++) { cv::Rect result_rect = results[i].getLocation(); @@ -201,7 +234,7 @@ void Outputs::ImageWindowOutput::accept( } void Outputs::ImageWindowOutput::accept( - const std::vector & results) + const std::vector& results) { for (unsigned i = 0; i < results.size(); i++) { cv::Rect result_rect = results[i].getLocation(); @@ -213,11 +246,28 @@ void Outputs::ImageWindowOutput::accept( ostream << "[" << std::fixed << std::setprecision(3) << fd_conf << "]"; outputs_[target_index].desc += ostream.str(); } + auto label = results[i].getLabel(); + outputs_[target_index].desc += "[" + label + "]"; } + mergeMask(results); } -void Outputs::ImageWindowOutput::accept( - const std::vector & results) +void Outputs::ImageWindowOutput::accept(const std::vector& results) +{ + for (unsigned i = 0; i < results.size(); i++) { + cv::Rect result_rect = results[i].getLocation(); + unsigned target_index = findOutput(result_rect); + outputs_[target_index].rect = result_rect; + auto fd_conf = results[i].getConfidence(); + if (fd_conf >= 0) { + std::ostringstream ostream; + ostream << "[" << std::fixed << std::setprecision(3) << fd_conf << "]"; + outputs_[target_index].desc += ostream.str(); + } + } +} + +void Outputs::ImageWindowOutput::accept(const std::vector& results) { for (unsigned i = 0; i < results.size(); i++) { cv::Rect result_rect = results[i].getLocation(); @@ -234,8 +284,7 @@ void Outputs::ImageWindowOutput::accept( } } -void Outputs::ImageWindowOutput::accept( - const std::vector & results) +void Outputs::ImageWindowOutput::accept(const std::vector& results) { for (unsigned i = 0; i < results.size(); i++) { cv::Rect result_rect = results[i].getLocation(); @@ -247,8 +296,7 @@ void Outputs::ImageWindowOutput::accept( } } -void Outputs::ImageWindowOutput::accept( - const std::vector & results) +void Outputs::ImageWindowOutput::accept(const std::vector& results) { for (unsigned i = 0; i < results.size(); i++) { cv::Rect result_rect = results[i].getLocation(); @@ -265,9 +313,7 @@ void Outputs::ImageWindowOutput::accept( } } -cv::Point Outputs::ImageWindowOutput::calcAxis( - cv::Mat r, double cx, double cy, double cz, - cv::Point cp) +cv::Point Outputs::ImageWindowOutput::calcAxis(cv::Mat r, double cx, double cy, double cz, cv::Point cp) { cv::Mat Axis(3, 1, CV_32F); Axis.at(0) = cx; @@ -277,10 +323,8 @@ cv::Point Outputs::ImageWindowOutput::calcAxis( o.at(2) = camera_matrix_.at(0); Axis = r * Axis + o; cv::Point point; - point.x = static_cast((Axis.at(0) / Axis.at(2) * camera_matrix_.at(0)) + - cp.x); - point.y = static_cast((Axis.at(1) / Axis.at(2) * camera_matrix_.at(4)) + - cp.y); + point.x = static_cast((Axis.at(0) / Axis.at(2) * camera_matrix_.at(0)) + cp.x); + point.y = static_cast((Axis.at(1) / Axis.at(2) * camera_matrix_.at(4)) + cp.y); return point; } @@ -296,8 +340,7 @@ cv::Mat Outputs::ImageWindowOutput::getRotationTransform(double yaw, double pitc return r; } -void Outputs::ImageWindowOutput::accept( - const std::vector & results) +void Outputs::ImageWindowOutput::accept(const std::vector& results) { for (unsigned i = 0; i < results.size(); i++) { auto result = results[i]; @@ -326,15 +369,13 @@ void Outputs::ImageWindowOutput::decorateFrame() int fps = getPipeline()->getFPS(); std::stringstream ss; ss << "FPS: " << fps; - cv::putText(frame_, ss.str(), cv::Point2f(0, 65), cv::FONT_HERSHEY_TRIPLEX, 0.5, - cv::Scalar(255, 0, 0)); + cv::putText(frame_, ss.str(), cv::Point2f(0, 65), cv::FONT_HERSHEY_TRIPLEX, 0.5, cv::Scalar(255, 0, 0)); } for (auto o : outputs_) { auto new_y = std::max(15, o.rect.y - 15); - cv::putText(frame_, o.desc, cv::Point2f(o.rect.x, new_y), cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, - o.scalar); + cv::putText(frame_, o.desc, cv::Point2f(o.rect.x, new_y), cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, o.scalar); cv::rectangle(frame_, o.rect, o.scalar, 1); - if (o.pa_top != o.pa_bottom){ + if (o.pa_top != o.pa_bottom) { cv::circle(frame_, o.pa_top, 3, cv::Scalar(255, 0, 0), 2); cv::circle(frame_, o.pa_bottom, 3, cv::Scalar(0, 255, 0), 2); } @@ -357,7 +398,7 @@ void Outputs::ImageWindowOutput::decorateFrame() void Outputs::ImageWindowOutput::handleOutput() { - if(frame_.cols == 0 || frame_.rows == 0){ + if (frame_.cols == 0 || frame_.rows == 0) { return; } decorateFrame(); diff --git a/dynamic_vino_lib/src/outputs/ros_service_output.cpp b/openvino_wrapper_lib/src/outputs/ros_service_output.cpp similarity index 75% rename from dynamic_vino_lib/src/outputs/ros_service_output.cpp rename to openvino_wrapper_lib/src/outputs/ros_service_output.cpp index bb20c00a..9cb2e534 100644 --- a/dynamic_vino_lib/src/outputs/ros_service_output.cpp +++ b/openvino_wrapper_lib/src/outputs/ros_service_output.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -20,11 +20,10 @@ #include #include #include -#include "dynamic_vino_lib/outputs/ros_service_output.hpp" +#include "openvino_wrapper_lib/outputs/ros_service_output.hpp" #include "cv_bridge/cv_bridge.h" -void Outputs::RosServiceOutput::setServiceResponse( - std::shared_ptr response) +void Outputs::RosServiceOutput::setServiceResponse(std::shared_ptr response) { if (detected_objects_topic_ != nullptr && detected_objects_topic_->objects_vector.size() > 0) { response->objects.objects_vector = detected_objects_topic_->objects_vector; @@ -33,40 +32,35 @@ void Outputs::RosServiceOutput::setServiceResponse( } } -void Outputs::RosServiceOutput::setResponseForFace( - std::shared_ptr response) +void Outputs::RosServiceOutput::setResponseForFace(std::shared_ptr response) { if (faces_topic_ != nullptr && faces_topic_->objects_vector.size() > 0) { response->objects.objects_vector = faces_topic_->objects_vector; } } -void Outputs::RosServiceOutput::setServiceResponse( - std::shared_ptr response) +void Outputs::RosServiceOutput::setServiceResponse(std::shared_ptr response) { if (age_gender_topic_ != nullptr) { response->age_gender.objects = age_gender_topic_->objects; } } -void Outputs::RosServiceOutput::setServiceResponse( - std::shared_ptr response) +void Outputs::RosServiceOutput::setServiceResponse(std::shared_ptr response) { if (emotions_topic_ != nullptr) { response->emotion.emotions = emotions_topic_->emotions; } } -void Outputs::RosServiceOutput::setServiceResponse( - std::shared_ptr response) +void Outputs::RosServiceOutput::setServiceResponse(std::shared_ptr response) { if (headpose_topic_ != nullptr) { response->headpose.headposes = headpose_topic_->headposes; } } -void Outputs::RosServiceOutput::setServiceResponse( - std::shared_ptr response) +void Outputs::RosServiceOutput::setServiceResponse(std::shared_ptr response) { slog::info << "in People::Response ..."; if (faces_topic_ != nullptr) { diff --git a/dynamic_vino_lib/src/outputs/ros_topic_output.cpp b/openvino_wrapper_lib/src/outputs/ros_topic_output.cpp similarity index 57% rename from dynamic_vino_lib/src/outputs/ros_topic_output.cpp rename to openvino_wrapper_lib/src/outputs/ros_topic_output.cpp index 1d24fbac..196bdaf5 100644 --- a/dynamic_vino_lib/src/outputs/ros_topic_output.cpp +++ b/openvino_wrapper_lib/src/outputs/ros_topic_output.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -20,51 +20,43 @@ #include #include #include -#include "dynamic_vino_lib/outputs/ros_topic_output.hpp" -#include "dynamic_vino_lib/pipeline_params.hpp" -#include "dynamic_vino_lib/pipeline.hpp" +#include "openvino_wrapper_lib/outputs/ros_topic_output.hpp" +#include "openvino_wrapper_lib/pipeline_params.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" #include "cv_bridge/cv_bridge.h" -Outputs::RosTopicOutput::RosTopicOutput(std::string output_name, - const rclcpp::Node::SharedPtr node) -: BaseOutput(output_name) +Outputs::RosTopicOutput::RosTopicOutput(std::string output_name, const rclcpp::Node::SharedPtr node) + : BaseOutput(output_name) { - // rmw_qos_profile_t qos = rmw_qos_profile_default; - // qos.depth = 10; - // qos.reliability = RMW_QOS_POLICY_RELIABILITY_RELIABLE; - // qos.history = RMW_QOS_POLICY_HISTORY_KEEP_ALL; - if(node != nullptr){ + if (node != nullptr) { node_ = node; } else { node_ = rclcpp::Node::make_shared(output_name + "_topic_publisher"); } - pub_license_plate_ = node_->create_publisher( - "/openvino_toolkit/" + output_name_ + "/detected_license_plates", 16); - pub_vehicle_attribs_ = node_->create_publisher( - "/openvino_toolkit/" + output_name_ + "/detected_vehicles_attribs", 16); - pub_landmarks_ = node_->create_publisher( - "/openvino_toolkit/" + output_name_ + "/detected_landmarks", 16); - pub_face_reid_ = node_->create_publisher( - "/openvino_toolkit/" + output_name_ + "/reidentified_faces", 16); - pub_person_attribs_ = node_->create_publisher( - "/openvino_toolkit/" + output_name_ + "/person_attributes", 16); - pub_person_reid_ = node_->create_publisher( - "/openvino_toolkit/" + output_name_ + "/reidentified_persons", 16); - pub_segmented_object_ = node_->create_publisher( - "/openvino_toolkit/" + output_name_ + "/segmented_obejcts", 16); + pub_license_plate_ = node_->create_publisher( + "/openvino_toolkit/" + output_name_ + "/detected_license_plates", 16); + pub_vehicle_attribs_ = node_->create_publisher( + "/openvino_toolkit/" + output_name_ + "/detected_vehicles_attribs", 16); + pub_landmarks_ = node_->create_publisher( + "/openvino_toolkit/" + output_name_ + "/detected_landmarks", 16); + pub_face_reid_ = node_->create_publisher( + "/openvino_toolkit/" + output_name_ + "/reidentified_faces", 16); + pub_person_attribs_ = node_->create_publisher( + "/openvino_toolkit/" + output_name_ + "/person_attributes", 16); + pub_person_reid_ = node_->create_publisher( + "/openvino_toolkit/" + output_name_ + "/reidentified_persons", 16); + pub_segmented_object_ = node_->create_publisher( + "/openvino_toolkit/" + output_name_ + "/segmented_obejcts", 16); pub_detected_object_ = node_->create_publisher( - "/openvino_toolkit/" + output_name_ + "/detected_objects", 16); + "/openvino_toolkit/" + output_name_ + "/detected_objects", 16); pub_face_ = - node_->create_publisher( - "/openvino_toolkit/" + output_name_ + "/faces", 16); + node_->create_publisher("/openvino_toolkit/" + output_name_ + "/faces", 16); pub_emotion_ = - node_->create_publisher( - "/openvino_toolkit/" + output_name_ + "/emotions", 16); - pub_age_gender_ = node_->create_publisher( - "/openvino_toolkit/" + output_name_ + "/age_genders", 16); - pub_headpose_ = - node_->create_publisher( - "/openvino_toolkit/" + output_name_ + "/headposes", 16); + node_->create_publisher("/openvino_toolkit/" + output_name_ + "/emotions", 16); + pub_age_gender_ = node_->create_publisher( + "/openvino_toolkit/" + output_name_ + "/age_genders", 16); + pub_headpose_ = node_->create_publisher( + "/openvino_toolkit/" + output_name_ + "/headposes", 16); emotions_topic_ = nullptr; detected_objects_topic_ = nullptr; faces_topic_ = nullptr; @@ -79,18 +71,16 @@ Outputs::RosTopicOutput::RosTopicOutput(std::string output_name, license_plate_topic_ = nullptr; } -void Outputs::RosTopicOutput::feedFrame(const cv::Mat & frame) +void Outputs::RosTopicOutput::feedFrame(const cv::Mat& frame) { frame_ = frame.clone(); } -void Outputs::RosTopicOutput::accept( - const std::vector & results) +void Outputs::RosTopicOutput::accept(const std::vector& results) { - vehicle_attribs_topic_ = std::make_shared(); - people_msgs::msg::VehicleAttribs attribs; - for (auto & r : results) { - // slog::info << ">"; + vehicle_attribs_topic_ = std::make_shared(); + object_msgs::msg::VehicleAttribs attribs; + for (auto& r : results) { auto loc = r.getLocation(); attribs.roi.x_offset = loc.x; attribs.roi.y_offset = loc.y; @@ -102,13 +92,11 @@ void Outputs::RosTopicOutput::accept( } } -void Outputs::RosTopicOutput::accept( - const std::vector & results) +void Outputs::RosTopicOutput::accept(const std::vector& results) { - license_plate_topic_ = std::make_shared(); - people_msgs::msg::LicensePlate plate; - for (auto & r : results) { - // slog::info << ">"; + license_plate_topic_ = std::make_shared(); + object_msgs::msg::LicensePlate plate; + for (auto& r : results) { auto loc = r.getLocation(); plate.roi.x_offset = loc.x; plate.roi.y_offset = loc.y; @@ -119,13 +107,11 @@ void Outputs::RosTopicOutput::accept( } } -void Outputs::RosTopicOutput::accept( - const std::vector & results) +void Outputs::RosTopicOutput::accept(const std::vector& results) { - face_reid_topic_ = std::make_shared(); - people_msgs::msg::Reidentification face; - for (auto & r : results) { - // slog::info << ">"; + face_reid_topic_ = std::make_shared(); + object_msgs::msg::Reidentification face; + for (auto& r : results) { auto loc = r.getLocation(); face.roi.x_offset = loc.x; face.roi.y_offset = loc.y; @@ -136,13 +122,11 @@ void Outputs::RosTopicOutput::accept( } } -void Outputs::RosTopicOutput::accept( - const std::vector & results) +void Outputs::RosTopicOutput::accept(const std::vector& results) { - landmarks_topic_ = std::make_shared(); - people_msgs::msg::Landmark landmark; - for (auto & r : results) { - // slog::info << ">"; + landmarks_topic_ = std::make_shared(); + object_msgs::msg::Landmark landmark; + for (auto& r : results) { auto loc = r.getLocation(); landmark.roi.x_offset = loc.x; landmark.roi.y_offset = loc.y; @@ -159,13 +143,11 @@ void Outputs::RosTopicOutput::accept( } } -void Outputs::RosTopicOutput::accept( - const std::vector & results) +void Outputs::RosTopicOutput::accept(const std::vector& results) { - person_attribs_topic_ = std::make_shared(); - people_msgs::msg::PersonAttribute person_attrib; - for (auto & r : results) { - // slog::info << ">"; + person_attribs_topic_ = std::make_shared(); + object_msgs::msg::PersonAttribute person_attrib; + for (auto& r : results) { auto loc = r.getLocation(); person_attrib.roi.x_offset = loc.x; person_attrib.roi.y_offset = loc.y; @@ -176,13 +158,11 @@ void Outputs::RosTopicOutput::accept( } } -void Outputs::RosTopicOutput::accept( - const std::vector & results) +void Outputs::RosTopicOutput::accept(const std::vector& results) { - person_reid_topic_ = std::make_shared(); - people_msgs::msg::Reidentification person; - for (auto & r : results) { - // slog::info << ">"; + person_reid_topic_ = std::make_shared(); + object_msgs::msg::Reidentification person; + for (auto& r : results) { auto loc = r.getLocation(); person.roi.x_offset = loc.x; person.roi.y_offset = loc.y; @@ -193,13 +173,11 @@ void Outputs::RosTopicOutput::accept( } } -void Outputs::RosTopicOutput::accept( - const std::vector & results) +void Outputs::RosTopicOutput::accept(const std::vector& results) { - segmented_objects_topic_ = std::make_shared(); - people_msgs::msg::ObjectInMask object; - for (auto & r : results) { - // slog::info << ">"; + segmented_objects_topic_ = std::make_shared(); + object_msgs::msg::ObjectInMask object; + for (auto& r : results) { auto loc = r.getLocation(); object.roi.x_offset = loc.x; object.roi.y_offset = loc.y; @@ -217,13 +195,55 @@ void Outputs::RosTopicOutput::accept( } } -void Outputs::RosTopicOutput::accept( - const std::vector & results) +void Outputs::RosTopicOutput::accept(const std::vector& results) +{ + segmented_objects_topic_ = std::make_shared(); + object_msgs::msg::ObjectInMask object; + for (auto& r : results) { + auto loc = r.getLocation(); + object.roi.x_offset = loc.x; + object.roi.y_offset = loc.y; + object.roi.width = loc.width; + object.roi.height = loc.height; + object.object_name = r.getLabel(); + object.probability = r.getConfidence(); + cv::Mat mask = r.getMask(); + for (int h = 0; h < mask.size().height; ++h) { + for (int w = 0; w < mask.size().width; ++w) { + object.mask_array.push_back(mask.at(h, w)); + } + } + segmented_objects_topic_->objects_vector.push_back(object); + } +} + +void Outputs::RosTopicOutput::accept(const std::vector& results) +{ + segmented_objects_topic_ = std::make_shared(); + object_msgs::msg::ObjectInMask object; + for (auto& r : results) { + auto loc = r.getLocation(); + object.roi.x_offset = loc.x; + object.roi.y_offset = loc.y; + object.roi.width = loc.width; + object.roi.height = loc.height; + object.object_name = r.getLabel(); + object.probability = r.getConfidence(); + cv::Mat mask = r.getMask(); + for (int h = 0; h < mask.size().height; ++h) { + for (int w = 0; w < mask.size().width; ++w) { + object.mask_array.push_back(mask.at(h, w)); + } + } + segmented_objects_topic_->objects_vector.push_back(object); + } +} + +void Outputs::RosTopicOutput::accept(const std::vector& results) { detected_objects_topic_ = std::make_shared(); object_msgs::msg::ObjectInBox object; - for (auto & r : results) { - // slog::info << ">"; + for (auto& r : results) { auto loc = r.getLocation(); object.roi.x_offset = loc.x; object.roi.y_offset = loc.y; @@ -235,14 +255,12 @@ void Outputs::RosTopicOutput::accept( } } -void Outputs::RosTopicOutput::accept( - const std::vector & results) +void Outputs::RosTopicOutput::accept(const std::vector& results) { faces_topic_ = std::make_shared(); object_msgs::msg::ObjectInBox face; for (auto r : results) { - // slog::info << ">"; auto loc = r.getLocation(); face.roi.x_offset = loc.x; face.roi.y_offset = loc.y; @@ -255,13 +273,12 @@ void Outputs::RosTopicOutput::accept( } } -void Outputs::RosTopicOutput::accept(const std::vector & results) +void Outputs::RosTopicOutput::accept(const std::vector& results) { - emotions_topic_ = std::make_shared(); + emotions_topic_ = std::make_shared(); - people_msgs::msg::Emotion emotion; + object_msgs::msg::Emotion emotion; for (auto r : results) { - // slog::info << ">"; auto loc = r.getLocation(); emotion.roi.x_offset = loc.x; emotion.roi.y_offset = loc.y; @@ -272,13 +289,12 @@ void Outputs::RosTopicOutput::accept(const std::vector & results) +void Outputs::RosTopicOutput::accept(const std::vector& results) { - age_gender_topic_ = std::make_shared(); + age_gender_topic_ = std::make_shared(); - people_msgs::msg::AgeGender ag; + object_msgs::msg::AgeGender ag; for (auto r : results) { - // slog::info << ">"; auto loc = r.getLocation(); ag.roi.x_offset = loc.x; ag.roi.y_offset = loc.y; @@ -297,11 +313,11 @@ void Outputs::RosTopicOutput::accept(const std::vector & results) +void Outputs::RosTopicOutput::accept(const std::vector& results) { - headpose_topic_ = std::make_shared(); + headpose_topic_ = std::make_shared(); - people_msgs::msg::HeadPose hp; + object_msgs::msg::HeadPose hp; for (auto r : results) { auto loc = r.getLocation(); hp.roi.x_offset = loc.x; @@ -317,69 +333,58 @@ void Outputs::RosTopicOutput::accept(const std::vectorgetInputDevice()->getLockedHeader(); + auto header = getPipeline()->getInputDevice()->getHeader(); if (vehicle_attribs_topic_ != nullptr) { - // slog::info << "publishing landmarks detection outputs." << slog::endl; vehicle_attribs_topic_->header = header; pub_vehicle_attribs_->publish(*vehicle_attribs_topic_); vehicle_attribs_topic_ = nullptr; } if (license_plate_topic_ != nullptr) { - // slog::info << "publishing face reidentification outputs." << slog::endl; license_plate_topic_->header = header; pub_license_plate_->publish(*license_plate_topic_); license_plate_topic_ = nullptr; } if (landmarks_topic_ != nullptr) { - // slog::info << "publishing landmarks detection outputs." << slog::endl; landmarks_topic_->header = header; pub_landmarks_->publish(*landmarks_topic_); landmarks_topic_ = nullptr; } if (face_reid_topic_ != nullptr) { - // slog::info << "publishing face reidentification outputs." << slog::endl; face_reid_topic_->header = header; pub_face_reid_->publish(*face_reid_topic_); face_reid_topic_ = nullptr; } if (person_attribs_topic_ != nullptr) { - // slog::info << "publishing person attributes outputs." << slog::endl; person_attribs_topic_->header = header; pub_person_attribs_->publish(*person_attribs_topic_); person_attribs_topic_ = nullptr; } if (person_reid_topic_ != nullptr) { - // slog::info << "publishing preson reidentification outputs." << slog::endl; person_reid_topic_->header = header; pub_person_reid_->publish(*person_reid_topic_); person_reid_topic_ = nullptr; } if (segmented_objects_topic_ != nullptr) { - // slog::info << "publishing segmented objects outputs." << slog::endl; segmented_objects_topic_->header = header; pub_segmented_object_->publish(*segmented_objects_topic_); segmented_objects_topic_ = nullptr; } if (detected_objects_topic_ != nullptr) { - // slog::info << "publishing detected objects outputs." << slog::endl; detected_objects_topic_->header = header; pub_detected_object_->publish(*detected_objects_topic_); detected_objects_topic_ = nullptr; } if (faces_topic_ != nullptr) { - // slog::info << "publishing faces outputs." << slog::endl; faces_topic_->header = header; pub_face_->publish(*faces_topic_); faces_topic_ = nullptr; } if (emotions_topic_ != nullptr) { - // slog::info << "publishing emotions outputs." << slog::endl; emotions_topic_->header = header; pub_emotion_->publish(*emotions_topic_); emotions_topic_ = nullptr; } if (age_gender_topic_ != nullptr) { - // slog::info << "publishing age gender outputs." << slog::endl; age_gender_topic_->header = header; pub_age_gender_->publish(*age_gender_topic_); age_gender_topic_ = nullptr; @@ -390,4 +395,3 @@ void Outputs::RosTopicOutput::handleOutput() headpose_topic_ = nullptr; } } - diff --git a/openvino_wrapper_lib/src/outputs/rviz_output.cpp b/openvino_wrapper_lib/src/outputs/rviz_output.cpp new file mode 100644 index 00000000..6e6b7b8e --- /dev/null +++ b/openvino_wrapper_lib/src/outputs/rviz_output.cpp @@ -0,0 +1,114 @@ +// Copyright (c) 2018-2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a header file with declaration of RvizOutput class + * @file rviz_output.cpp + */ + +#include +#include +#include +#include +#include "cv_bridge/cv_bridge.h" +#include "openvino_wrapper_lib/pipeline.hpp" +#include "openvino_wrapper_lib/outputs/rviz_output.hpp" + +Outputs::RvizOutput::RvizOutput(std::string output_name, const rclcpp::Node::SharedPtr node) : BaseOutput(output_name) +{ + if (node != nullptr) { + node_ = node; + } else { + node_ = rclcpp::Node::make_shared(output_name + "_image_publisher"); + } + image_topic_ = nullptr; + pub_image_ = node_->create_publisher("/openvino_toolkit/" + output_name_ + "/images", 16); + image_window_output_ = std::make_shared(output_name_, 950); +} + +void Outputs::RvizOutput::feedFrame(const cv::Mat& frame) +{ + image_window_output_->feedFrame(frame); +} + +void Outputs::RvizOutput::accept(const std::vector& results) +{ + image_window_output_->accept(results); +} + +void Outputs::RvizOutput::accept(const std::vector& results) +{ + image_window_output_->accept(results); +} + +void Outputs::RvizOutput::accept(const std::vector& results) +{ + image_window_output_->accept(results); +} + +void Outputs::RvizOutput::accept(const std::vector& results) +{ + image_window_output_->accept(results); +} + +void Outputs::RvizOutput::accept(const std::vector& results) +{ + image_window_output_->accept(results); +} + +void Outputs::RvizOutput::accept(const std::vector& results) +{ + image_window_output_->accept(results); +} + +void Outputs::RvizOutput::accept(const std::vector& results) +{ + image_window_output_->accept(results); +} + +void Outputs::RvizOutput::accept(const std::vector& results) +{ + image_window_output_->accept(results); +} + +void Outputs::RvizOutput::accept(const std::vector& results) +{ + image_window_output_->accept(results); +} + +void Outputs::RvizOutput::accept(const std::vector& results) +{ + image_window_output_->accept(results); +} + +void Outputs::RvizOutput::accept(const std::vector& results) +{ + image_window_output_->accept(results); +} + +void Outputs::RvizOutput::accept(const std::vector& results) +{ + image_window_output_->accept(results); +} + +void Outputs::RvizOutput::handleOutput() +{ + image_window_output_->setPipeline(getPipeline()); + image_window_output_->decorateFrame(); + cv::Mat frame = image_window_output_->getFrame(); + std_msgs::msg::Header header = getPipeline()->getInputDevice()->getLockedHeader(); + std::shared_ptr cv_ptr = std::make_shared(header, "bgr8", frame); + image_topic_ = cv_ptr->toImageMsg(); + pub_image_->publish(*image_topic_); +} diff --git a/dynamic_vino_lib/src/pipeline.cpp b/openvino_wrapper_lib/src/pipeline.cpp similarity index 69% rename from dynamic_vino_lib/src/pipeline.cpp rename to openvino_wrapper_lib/src/pipeline.cpp index 25ba0f55..e886c838 100644 --- a/dynamic_vino_lib/src/pipeline.cpp +++ b/openvino_wrapper_lib/src/pipeline.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,18 +17,19 @@ * @file pipeline.cpp */ -#include +#include #include #include #include #include #include +#include -#include "dynamic_vino_lib/inputs/base_input.hpp" -#include "dynamic_vino_lib/inputs/image_input.hpp" -#include "dynamic_vino_lib/pipeline.hpp" +#include "openvino_wrapper_lib/inputs/base_input.hpp" +#include "openvino_wrapper_lib/inputs/image_input.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" -Pipeline::Pipeline(const std::string & name) +Pipeline::Pipeline(const std::string& name) { if (!name.empty()) { params_ = std::make_shared(name); @@ -36,7 +37,7 @@ Pipeline::Pipeline(const std::string & name) counter_ = 0; } -bool Pipeline::add(const std::string & name, std::shared_ptr input_device) +bool Pipeline::add(const std::string& name, std::shared_ptr input_device) { if (name.empty()) { slog::err << "Item name can't be empty!" << slog::endl; @@ -50,9 +51,7 @@ bool Pipeline::add(const std::string & name, std::shared_ptr output) +bool Pipeline::add(const std::string& parent, const std::string& name, std::shared_ptr output) { if (parent.empty() || name.empty() || !isLegalConnect(parent, name) || output == nullptr) { slog::err << "ARGuments ERROR when adding output instance!" << slog::endl; @@ -68,7 +67,7 @@ bool Pipeline::add( return false; } -bool Pipeline::add(const std::string & parent, const std::string & name) +bool Pipeline::add(const std::string& parent, const std::string& name) { if (isLegalConnect(parent, name)) { addConnect(parent, name); @@ -78,18 +77,16 @@ bool Pipeline::add(const std::string & parent, const std::string & name) return false; } -bool Pipeline::add(const std::string & name, std::shared_ptr output) +bool Pipeline::add(const std::string& name, std::shared_ptr output) { if (name.empty()) { slog::err << "Item name can't be empty!" << slog::endl; return false; } - std::map>::iterator it = - name_to_output_map_.find(name); + std::map>::iterator it = name_to_output_map_.find(name); if (it != name_to_output_map_.end()) { - slog::warn << "inferance instance for [" << name << - "] already exists, update it with new instance." << slog::endl; + slog::warn << "inferance instance for [" << name << "] already exists, update it with new instance." << slog::endl; } name_to_output_map_[name] = output; output_names_.insert(name); @@ -99,28 +96,23 @@ bool Pipeline::add(const std::string & name, std::shared_ptr::iterator, - std::multimap::iterator> - ret; + std::pair::iterator, std::multimap::iterator> ret; ret = next_.equal_range(parent); for (std::multimap::iterator it = ret.first; it != ret.second; ++it) { if (it->second == name) { - slog::warn << "The connect [" << parent << "<-->" << name << "] already exists." << - slog::endl; + slog::warn << "The connect [" << parent << "<-->" << name << "] already exists." << slog::endl; return; } } - slog::info << "Adding connection into pipeline:[" << parent << "<-->" << name << "]" << - slog::endl; - next_.insert({parent, name}); + slog::info << "Adding connection into pipeline:[" << parent << "<-->" << name << "]" << slog::endl; + next_.insert({ parent, name }); } -bool Pipeline::add( - const std::string & parent, const std::string & name, - std::shared_ptr inference) +bool Pipeline::add(const std::string& parent, const std::string& name, + std::shared_ptr inference) { if (parent.empty() || name.empty() || !isLegalConnect(parent, name)) { slog::err << "ARGuments ERROR when adding inference instance!" << slog::endl; @@ -135,20 +127,17 @@ bool Pipeline::add( return false; } -bool Pipeline::add( - const std::string & name, - std::shared_ptr inference) +bool Pipeline::add(const std::string& name, std::shared_ptr inference) { if (name.empty()) { slog::err << "Item name can't be empty!" << slog::endl; return false; } - std::map>::iterator it = - name_to_detection_map_.find(name); + std::map>::iterator it = + name_to_detection_map_.find(name); if (it != name_to_detection_map_.end()) { - slog::warn << "inferance instance for [" << name << - "] already exists, update it with new instance." << slog::endl; + slog::warn << "inferance instance for [" << name << "] already exists, update it with new instance." << slog::endl; } else { ++total_inference_; } @@ -161,9 +150,9 @@ bool Pipeline::isLegalConnect(const std::string parent, const std::string child) { int parent_order = getCatagoryOrder(parent); int child_order = getCatagoryOrder(child); - slog::info << "Checking connection into pipeline:[" << parent << "(" << parent_order << ")" << - "<-->" << child << "(" << child_order << ")" << - "]" << slog::endl; + slog::info << "Checking connection into pipeline:[" << parent << "(" << parent_order << ")" + << "<-->" << child << "(" << child_order << ")" + << "]" << slog::endl; return (parent_order != kCatagoryOrder_Unknown) && (child_order != kCatagoryOrder_Unknown) && (parent_order <= child_order); } @@ -187,14 +176,12 @@ void Pipeline::runOnce() initInferenceCounter(); if (!input_device_->read(&frame_)) { - // throw std::logic_error("Failed to get frame from cv::VideoCapture"); - // slog::warn << "Failed to get frame from input_device." << slog::endl; - return; //do nothing if now frame read out + return; // do nothing if now frame read out } width_ = frame_.cols; height_ = frame_.rows; slog::debug << "DEBUG: in Pipeline run process..." << slog::endl; - // auto t0 = std::chrono::high_resolution_clock::now(); + for (auto pos = next_.equal_range(input_device_name_); pos.first != pos.second; ++pos.first) { std::string detection_name = pos.first->second; slog::debug << "DEBUG: Enqueue for detection: " << detection_name << slog::endl; @@ -205,50 +192,47 @@ void Pipeline::runOnce() detection_ptr->submitRequest(); } - for (auto &pair : name_to_output_map_) - { + for (auto& pair : name_to_output_map_) { pair.second->feedFrame(frame_); } countFPS(); slog::debug << "DEBUG: align inference process, waiting until all inferences done!" << slog::endl; std::unique_lock lock(counter_mutex_); - cv_.wait(lock, [self = this]() {return self->counter_ == 0;}); - - //auto t1 = std::chrono::high_resolution_clock::now(); - //typedef std::chrono::duration> ms; + cv_.wait(lock, [self = this]() { return self->counter_ == 0; }); slog::debug << "DEBUG: in Pipeline run process...handleOutput" << slog::endl; - for (auto & pair : name_to_output_map_) { - // slog::info << "Handling Output ..." << pair.first << slog::endl; + for (auto& pair : name_to_output_map_) { pair.second->handleOutput(); } } void Pipeline::printPipeline() { - for (auto & current_node : next_) { + for (auto& current_node : next_) { printf("%s --> %s\n", current_node.first.c_str(), current_node.second.c_str()); } } void Pipeline::setCallback() { - for (auto & pair : name_to_detection_map_) { + for (auto& pair : name_to_detection_map_) { std::string detection_name = pair.first; - std::function callb; - callb = [detection_name, self = this]() - { - self->callback(detection_name); - return; - }; - pair.second->getEngine()->getRequest()->SetCompletionCallback(callb); + std::function callb; + callb = [detection_name, self = this](std::exception_ptr ex) { + if (ex) + throw ex; + + self->callback(detection_name); + return; + }; + pair.second->getEngine()->getRequest().set_callback(callb); } } -void Pipeline::callback(const std::string & detection_name) +void Pipeline::callback(const std::string& detection_name) { - slog::debug <<"Hello callback ----> " << detection_name < " << detection_name << slog::endl; auto detection_ptr = name_to_detection_map_[detection_name]; detection_ptr->fetchResults(); // set output @@ -274,7 +258,7 @@ void Pipeline::callback(const std::string & detection_name) increaseInferenceCounter(); next_detection_ptr->submitRequest(); auto request = next_detection_ptr->getEngine()->getRequest(); - request->Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY); + request.wait(); } } } @@ -296,14 +280,12 @@ void Pipeline::increaseInferenceCounter() { std::lock_guard lk(counter_mutex_); ++counter_; - // slog::info << "counter = " << counter_ << slog::endl; } void Pipeline::decreaseInferenceCounter() { std::lock_guard lk(counter_mutex_); --counter_; - // slog::info << "counter = " << counter_ << slog::endl; } void Pipeline::countFPS() @@ -312,7 +294,7 @@ void Pipeline::countFPS() auto t_end = std::chrono::high_resolution_clock::now(); typedef std::chrono::duration> ms; ms secondDetection = std::chrono::duration_cast(t_end - t_start_); - if (secondDetection.count() > 1000) { + if (secondDetection.count() > 1000) { setFPS(frame_cnt_); frame_cnt_ = 0; t_start_ = t_end; diff --git a/dynamic_vino_lib/src/pipeline_manager.cpp b/openvino_wrapper_lib/src/pipeline_manager.cpp similarity index 56% rename from dynamic_vino_lib/src/pipeline_manager.cpp rename to openvino_wrapper_lib/src/pipeline_manager.cpp index 5d348255..58a275ac 100644 --- a/dynamic_vino_lib/src/pipeline_manager.cpp +++ b/openvino_wrapper_lib/src/pipeline_manager.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,58 +17,61 @@ * @file pipeline_manager.cpp */ -#include +#include #include #include #include #include -#if 0 -#include "dynamic_vino_lib/inferences/landmarks_detection.hpp" -#include "dynamic_vino_lib/inferences/face_reidentification.hpp" -#include "dynamic_vino_lib/models/face_reidentification_model.hpp" -#include "dynamic_vino_lib/models/landmarks_detection_model.hpp" +#if 0 // in curent versions, these models are not supported any more. +#include "openvino_wrapper_lib/inferences/landmarks_detection.hpp" +#include "openvino_wrapper_lib/inferences/face_reidentification.hpp" +#include "openvino_wrapper_lib/models/face_reidentification_model.hpp" +#include "openvino_wrapper_lib/models/landmarks_detection_model.hpp" #endif -#include "dynamic_vino_lib/models/vehicle_attribs_detection_model.hpp" -#include "dynamic_vino_lib/models/license_plate_detection_model.hpp" -#include "dynamic_vino_lib/models/person_reidentification_model.hpp" -#include "dynamic_vino_lib/models/person_attribs_detection_model.hpp" -#include "dynamic_vino_lib/inferences/vehicle_attribs_detection.hpp" -#include "dynamic_vino_lib/inferences/license_plate_detection.hpp" -#include "dynamic_vino_lib/inferences/person_reidentification.hpp" -#include "dynamic_vino_lib/inferences/person_attribs_detection.hpp" -#include "dynamic_vino_lib/inferences/face_detection.hpp" -#include "dynamic_vino_lib/models/face_detection_model.hpp" -#include "dynamic_vino_lib/inferences/age_gender_detection.hpp" -#include "dynamic_vino_lib/models/age_gender_detection_model.hpp" -#include "dynamic_vino_lib/inferences/emotions_detection.hpp" -#include "dynamic_vino_lib/models/emotion_detection_model.hpp" -#include "dynamic_vino_lib/inferences/head_pose_detection.hpp" -#include "dynamic_vino_lib/models/head_pose_detection_model.hpp" -#include "dynamic_vino_lib/models/object_detection_yolov2_model.hpp" -#include "dynamic_vino_lib/models/object_detection_ssd_model.hpp" -#include "dynamic_vino_lib/inferences/object_segmentation.hpp" -#include "dynamic_vino_lib/models/object_segmentation_model.hpp" -#include "dynamic_vino_lib/inputs/base_input.hpp" -#include "dynamic_vino_lib/inputs/image_input.hpp" -#include "dynamic_vino_lib/inputs/realsense_camera.hpp" -#include "dynamic_vino_lib/inputs/realsense_camera_topic.hpp" -#include "dynamic_vino_lib/inputs/standard_camera.hpp" -#include "dynamic_vino_lib/inputs/ip_camera.hpp" -#include "dynamic_vino_lib/inputs/video_input.hpp" -#include "dynamic_vino_lib/outputs/image_window_output.hpp" -#include "dynamic_vino_lib/outputs/ros_topic_output.hpp" -#include "dynamic_vino_lib/outputs/rviz_output.hpp" -#include "dynamic_vino_lib/outputs/ros_service_output.hpp" -#include "dynamic_vino_lib/pipeline.hpp" -#include "dynamic_vino_lib/pipeline_manager.hpp" -#include "dynamic_vino_lib/pipeline_params.hpp" -#include "dynamic_vino_lib/services/pipeline_processing_server.hpp" -#include "dynamic_vino_lib/engines/engine_manager.hpp" -std::shared_ptr -PipelineManager::createPipeline(const Params::ParamManager::PipelineRawData & params, - rclcpp::Node::SharedPtr node) +#include "openvino_wrapper_lib/models/vehicle_attribs_detection_model.hpp" +#include "openvino_wrapper_lib/models/license_plate_detection_model.hpp" +#include "openvino_wrapper_lib/models/person_reidentification_model.hpp" +#include "openvino_wrapper_lib/models/person_attribs_detection_model.hpp" +#include "openvino_wrapper_lib/inferences/vehicle_attribs_detection.hpp" +#include "openvino_wrapper_lib/inferences/license_plate_detection.hpp" +#include "openvino_wrapper_lib/inferences/person_reidentification.hpp" +#include "openvino_wrapper_lib/inferences/person_attribs_detection.hpp" +#include "openvino_wrapper_lib/inferences/face_detection.hpp" +#include "openvino_wrapper_lib/models/face_detection_model.hpp" +#include "openvino_wrapper_lib/inferences/age_gender_detection.hpp" +#include "openvino_wrapper_lib/models/age_gender_detection_model.hpp" +#include "openvino_wrapper_lib/inferences/emotions_detection.hpp" +#include "openvino_wrapper_lib/models/emotion_detection_model.hpp" +#include "openvino_wrapper_lib/inferences/head_pose_detection.hpp" +#include "openvino_wrapper_lib/models/head_pose_detection_model.hpp" +#include "openvino_wrapper_lib/models/object_detection_yolov5_model.hpp" +#include "openvino_wrapper_lib/models/object_detection_yolov8_model.hpp" +#include "openvino_wrapper_lib/models/object_detection_ssd_model.hpp" +#include "openvino_wrapper_lib/inferences/object_segmentation.hpp" +#include "openvino_wrapper_lib/inferences/object_segmentation_instance.hpp" +#include "openvino_wrapper_lib/models/object_segmentation_model.hpp" +#include "openvino_wrapper_lib/models/object_segmentation_instance_model.hpp" +#include "openvino_wrapper_lib/models/object_segmentation_instance_maskrcnn_model.hpp" +#include "openvino_wrapper_lib/inputs/base_input.hpp" +#include "openvino_wrapper_lib/inputs/image_input.hpp" +#include "openvino_wrapper_lib/inputs/realsense_camera.hpp" +#include "openvino_wrapper_lib/inputs/realsense_camera_topic.hpp" +#include "openvino_wrapper_lib/inputs/standard_camera.hpp" +#include "openvino_wrapper_lib/inputs/ip_camera.hpp" +#include "openvino_wrapper_lib/inputs/video_input.hpp" +#include "openvino_wrapper_lib/outputs/image_window_output.hpp" +#include "openvino_wrapper_lib/outputs/ros_topic_output.hpp" +#include "openvino_wrapper_lib/outputs/rviz_output.hpp" +#include "openvino_wrapper_lib/outputs/ros_service_output.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" +#include "openvino_wrapper_lib/pipeline_manager.hpp" +#include "openvino_wrapper_lib/pipeline_params.hpp" +#include "openvino_wrapper_lib/services/pipeline_processing_server.hpp" +#include "openvino_wrapper_lib/engines/engine_manager.hpp" +std::shared_ptr PipelineManager::createPipeline(const Params::ParamManager::PipelineRawData& params, + rclcpp::Node::SharedPtr node) { if (params.name == "") { throw std::logic_error("The name of pipeline won't be empty!"); @@ -111,10 +114,7 @@ PipelineManager::createPipeline(const Params::ParamManager::PipelineRawData & pa pipeline->add(it->first, it->second); } - // slog::info << "Updateing filters ..." << slog::endl; - // pipeline->addFilters(params.filters); - - pipelines_.insert({params.name, data}); + pipelines_.insert({ params.name, data }); pipeline->setCallback(); slog::info << "One Pipeline Created!" << slog::endl; @@ -123,16 +123,16 @@ PipelineManager::createPipeline(const Params::ParamManager::PipelineRawData & pa } std::map> -PipelineManager::parseInputDevice(const PipelineData & pdata) +PipelineManager::parseInputDevice(const PipelineData& pdata) { std::map> inputs; - for (auto & name : pdata.params.inputs) { + for (auto& name : pdata.params.inputs) { slog::info << "Parsing InputDvice: " << name << slog::endl; std::shared_ptr device = nullptr; if (name == kInputType_RealSenseCamera) { device = std::make_shared(); } else if (name == kInputType_StandardCamera) { - device = std::make_shared(); + device = std::make_shared(pdata.params.input_meta); } else if (name == kInputType_IpCamera) { if (pdata.params.input_meta != "") { device = std::make_shared(pdata.params.input_meta); @@ -153,7 +153,7 @@ PipelineManager::parseInputDevice(const PipelineData & pdata) if (device != nullptr) { device->initialize(); - inputs.insert({name, device}); + inputs.insert({ name, device }); slog::info << " ... Adding one Input device: " << name << slog::endl; } } @@ -161,12 +161,10 @@ PipelineManager::parseInputDevice(const PipelineData & pdata) return inputs; } - -std::map> -PipelineManager::parseOutput(const PipelineData & pdata) +std::map> PipelineManager::parseOutput(const PipelineData& pdata) { std::map> outputs; - for (auto & name : pdata.params.outputs) { + for (auto& name : pdata.params.outputs) { slog::info << "Parsing Output: " << name << slog::endl; std::shared_ptr object = nullptr; if (name == kOutputTpye_RosTopic) { @@ -181,7 +179,7 @@ PipelineManager::parseOutput(const PipelineData & pdata) slog::err << "Invalid output name: " << name << slog::endl; } if (object != nullptr) { - outputs.insert({name, object}); + outputs.insert({ name, object }); slog::info << " ... Adding one Output: " << name << slog::endl; } } @@ -189,16 +187,16 @@ PipelineManager::parseOutput(const PipelineData & pdata) return outputs; } -std::map> -PipelineManager::parseInference(const Params::ParamManager::PipelineRawData & params) +std::map> +PipelineManager::parseInference(const Params::ParamManager::PipelineRawData& params) { - std::map> inferences; - for (auto & infer : params.infers) { + std::map> inferences; + for (auto& infer : params.infers) { if (infer.name.empty() || infer.model.empty()) { continue; } slog::info << "Parsing Inference: " << infer.name << slog::endl; - std::shared_ptr object = nullptr; + std::shared_ptr object = nullptr; if (infer.name == kInferTpye_FaceDetection) { object = createFaceDetection(infer); @@ -212,6 +210,10 @@ PipelineManager::parseInference(const Params::ParamManager::PipelineRawData & pa object = createObjectDetection(infer); } else if (infer.name == kInferTpye_ObjectSegmentation) { object = createObjectSegmentation(infer); + } else if (infer.name == kInferTpye_ObjectSegmentationMaskrcnn) { + object = createObjectSegmentationMaskrcnn(infer); + } else if (infer.name == kInferTpye_ObjectSegmentationInstance) { + object = createObjectSegmentationInstance(infer); } else if (infer.name == kInferTpye_PersonReidentification) { object = createPersonReidentification(infer); } else if (infer.name == kInferTpye_PersonAttribsDetection) { @@ -220,16 +222,17 @@ PipelineManager::parseInference(const Params::ParamManager::PipelineRawData & pa object = createLandmarksDetection(infer); } else if (infer.name == kInferTpye_FaceReidentification) { object = createFaceReidentification(infer); - } */ else if (infer.name == kInferTpye_VehicleAttribsDetection) { + } */ + else if (infer.name == kInferTpye_VehicleAttribsDetection) { object = createVehicleAttribsDetection(infer); } else if (infer.name == kInferTpye_LicensePlateDetection) { object = createLicensePlateDetection(infer); - }else { + } else { slog::err << "Invalid inference name: " << infer.name << slog::endl; } if (object != nullptr) { - inferences.insert({infer.name, object}); + inferences.insert({ infer.name, object }); slog::info << " ... Adding one Inference: " << infer.name << slog::endl; } } @@ -237,77 +240,76 @@ PipelineManager::parseInference(const Params::ParamManager::PipelineRawData & pa return inferences; } - -std::shared_ptr -PipelineManager::createFaceDetection( - const Params::ParamManager::InferenceRawData & infer) +std::shared_ptr +PipelineManager::createFaceDetection(const Params::ParamManager::InferenceRawData& infer) { return createObjectDetection(infer); } -std::shared_ptr -PipelineManager::createAgeGenderRecognition(const Params::ParamManager::InferenceRawData & param) +std::shared_ptr +PipelineManager::createAgeGenderRecognition(const Params::ParamManager::InferenceRawData& param) { auto model = std::make_shared(param.label, param.model, param.batch); model->modelInit(); auto engine = engine_manager_.createEngine(param.engine, model); - auto infer = std::make_shared(); + auto infer = std::make_shared(); infer->loadNetwork(model); infer->loadEngine(engine); return infer; } -std::shared_ptr -PipelineManager::createEmotionRecognition(const Params::ParamManager::InferenceRawData & param) +std::shared_ptr +PipelineManager::createEmotionRecognition(const Params::ParamManager::InferenceRawData& param) { auto model = std::make_shared(param.label, param.model, param.batch); model->modelInit(); auto engine = engine_manager_.createEngine(param.engine, model); - auto infer = std::make_shared(); + auto infer = std::make_shared(); infer->loadNetwork(model); infer->loadEngine(engine); return infer; } -std::shared_ptr -PipelineManager::createHeadPoseEstimation(const Params::ParamManager::InferenceRawData & param) +std::shared_ptr +PipelineManager::createHeadPoseEstimation(const Params::ParamManager::InferenceRawData& param) { auto model = std::make_shared(param.label, param.model, param.batch); model->modelInit(); auto engine = engine_manager_.createEngine(param.engine, model); - auto infer = std::make_shared(); + auto infer = std::make_shared(); infer->loadNetwork(model); infer->loadEngine(engine); return infer; } - -std::shared_ptr -PipelineManager::createObjectDetection( - const Params::ParamManager::InferenceRawData & infer) +std::shared_ptr +PipelineManager::createObjectDetection(const Params::ParamManager::InferenceRawData& infer) { std::shared_ptr object_detection_model; - std::shared_ptr object_inference_ptr; + std::shared_ptr object_inference_ptr; slog::debug << "for test in createObjectDetection()" << slog::endl; if (infer.model_type == kInferTpye_ObjectDetectionTypeSSD) { + object_detection_model = std::make_shared(infer.label, infer.model, infer.batch); + } + if (infer.model_type == kInferTpye_ObjectDetectionTypeYolov5) { object_detection_model = - std::make_shared(infer.label, infer.model, infer.batch); + std::make_shared(infer.label, infer.model, infer.batch); } - if (infer.model_type == kInferTpye_ObjectDetectionTypeYolov2) { + + if (infer.model_type == kInferTpye_ObjectDetectionTypeYolov8) { object_detection_model = - std::make_shared(infer.label, infer.model, infer.batch); + std::make_shared(infer.label, infer.model, infer.batch); } slog::debug << "for test in createObjectDetection(), Created SSDModel" << slog::endl; - object_inference_ptr = std::make_shared( - infer.enable_roi_constraint, infer.confidence_threshold); // To-do theshold configuration + object_inference_ptr = std::make_shared( + infer.enable_roi_constraint, infer.confidence_threshold); // To-do theshold configuration slog::debug << "for test in createObjectDetection(), before modelInit()" << slog::endl; object_detection_model->modelInit(); - auto object_detection_engine = engine_manager_.createEngine( - infer.engine, object_detection_model); + auto object_detection_engine = engine_manager_.createEngine(infer.engine, object_detection_model); slog::debug << "for test in createObjectDetection(), before loadNetwork" << slog::endl; object_inference_ptr->loadNetwork(object_detection_model); object_inference_ptr->loadEngine(object_detection_engine); @@ -315,89 +317,121 @@ PipelineManager::createObjectDetection( return object_inference_ptr; } -std::shared_ptr -PipelineManager::createObjectSegmentation(const Params::ParamManager::InferenceRawData & infer) +std::shared_ptr +PipelineManager::createObjectSegmentation(const Params::ParamManager::InferenceRawData& infer) { - auto model = - std::make_shared(infer.label, infer.model, infer.batch); + auto model = std::make_shared(infer.label, infer.model, infer.batch); model->modelInit(); slog::info << "Segmentation model initialized." << slog::endl; auto engine = engine_manager_.createEngine(infer.engine, model); slog::info << "Segmentation Engine initialized." << slog::endl; - auto segmentation_inference_ptr = std::make_shared( - infer.confidence_threshold); - slog::info << "Segmentation Inference instanced." << slog::endl; + auto segmentation_inference_ptr = + std::make_shared(infer.confidence_threshold); + slog::info << "Segmentation Inference instanced." << slog::endl; segmentation_inference_ptr->loadNetwork(model); segmentation_inference_ptr->loadEngine(engine); return segmentation_inference_ptr; } -std::shared_ptr -PipelineManager::createPersonReidentification( - const Params::ParamManager::InferenceRawData & infer) +// TODO: Deprecated +std::shared_ptr +PipelineManager::createObjectSegmentationMaskrcnn(const Params::ParamManager::InferenceRawData& infer) +{ + auto model = std::make_shared(infer.label, infer.model, infer.batch); + model->modelInit(); + slog::info << "Segmentation model initialized." << slog::endl; + auto engine = engine_manager_.createEngine(infer.engine, model); + slog::info << "Segmentation Engine initialized." << slog::endl; + auto segmentation_inference_ptr = + std::make_shared(infer.confidence_threshold); + slog::info << "Segmentation Inference instanced." << slog::endl; + segmentation_inference_ptr->loadNetwork(model); + segmentation_inference_ptr->loadEngine(engine); + + return segmentation_inference_ptr; +} + +std::shared_ptr +PipelineManager::createObjectSegmentationInstance(const Params::ParamManager::InferenceRawData& infer) +{ + std::shared_ptr model; + if (infer.model_type == kInferTpye_ObjectSegmentationTypeMaskrcnn) { + slog::info << "Model Typle: kInferType_ObjectSegmentationTypeMaskrcnn" << slog::endl; + model = std::make_shared(infer.label, infer.model, infer.batch); + } else { + slog::info << "Model Typle: kInferType_ObjectSegmentationTypeYolo" << slog::endl; + model = std::make_shared(infer); + } + model->modelInit(); + slog::info << "Instance Segmentation model initialized." << slog::endl; + auto engine = engine_manager_.createEngine(infer.engine, model); + slog::info << "Engine initialized for Instance Segmentation." << slog::endl; + auto segmentation_inference_ptr = + std::make_shared(infer.confidence_threshold); + slog::info << "Segmentation Inference instanced." << slog::endl; + segmentation_inference_ptr->loadNetwork(model); + segmentation_inference_ptr->loadEngine(engine); + + return segmentation_inference_ptr; +} + +std::shared_ptr +PipelineManager::createPersonReidentification(const Params::ParamManager::InferenceRawData& infer) { std::shared_ptr person_reidentification_model; - std::shared_ptr reidentification_inference_ptr; - slog::debug << "for test in createPersonReidentification()"< reidentification_inference_ptr; + slog::debug << "for test in createPersonReidentification()" << slog::endl; person_reidentification_model = - std::make_shared(infer.label, infer.model, infer.batch); + std::make_shared(infer.label, infer.model, infer.batch); person_reidentification_model->modelInit(); slog::info << "Reidentification model initialized" << slog::endl; auto person_reidentification_engine = engine_manager_.createEngine(infer.engine, person_reidentification_model); reidentification_inference_ptr = - std::make_shared(infer.confidence_threshold); - slog::debug<< "for test in createPersonReidentification(), before loadNetwork"<(infer.confidence_threshold); + slog::debug << "for test in createPersonReidentification(), before loadNetwork" << slog::endl; reidentification_inference_ptr->loadNetwork(person_reidentification_model); reidentification_inference_ptr->loadEngine(person_reidentification_engine); - slog::debug<< "for test in createPersonReidentification(), OK"< -PipelineManager::createVehicleAttribsDetection( - const Params::ParamManager::InferenceRawData & infer) +std::shared_ptr +PipelineManager::createVehicleAttribsDetection(const Params::ParamManager::InferenceRawData& infer) { - auto model = - std::make_shared(infer.label, infer.model, infer.batch); + auto model = std::make_shared(infer.label, infer.model, infer.batch); model->modelInit(); auto engine = engine_manager_.createEngine(infer.engine, model); - auto vehicle_attribs_ptr = - std::make_shared(); + auto vehicle_attribs_ptr = std::make_shared(); vehicle_attribs_ptr->loadNetwork(model); vehicle_attribs_ptr->loadEngine(engine); return vehicle_attribs_ptr; } -std::shared_ptr -PipelineManager::createLicensePlateDetection( - const Params::ParamManager::InferenceRawData & infer) +std::shared_ptr +PipelineManager::createLicensePlateDetection(const Params::ParamManager::InferenceRawData& infer) { - auto model = - std::make_shared(infer.label, infer.model, infer.batch); + auto model = std::make_shared(infer.label, infer.model, infer.batch); model->modelInit(); auto engine = engine_manager_.createEngine(infer.engine, model); - auto license_plate_ptr = - std::make_shared(); + auto license_plate_ptr = std::make_shared(); license_plate_ptr->loadNetwork(model); license_plate_ptr->loadEngine(engine); return license_plate_ptr; } -std::shared_ptr -PipelineManager::createPersonAttribsDetection( - const Params::ParamManager::InferenceRawData & infer) +std::shared_ptr +PipelineManager::createPersonAttribsDetection(const Params::ParamManager::InferenceRawData& infer) { - auto model = - std::make_shared(infer.label, infer.model, infer.batch); - slog::debug << "for test in createPersonAttributesDetection()"<(infer.label, infer.model, infer.batch); + slog::debug << "for test in createPersonAttributesDetection()" << slog::endl; model->modelInit(); auto engine = engine_manager_.createEngine(infer.engine, model); auto attribs_inference_ptr = - std::make_shared(infer.confidence_threshold); + std::make_shared(infer.confidence_threshold); attribs_inference_ptr->loadNetwork(model); attribs_inference_ptr->loadEngine(engine); @@ -405,7 +439,7 @@ PipelineManager::createPersonAttribsDetection( } #if 0 -std::shared_ptr +std::shared_ptr PipelineManager::createPersonReidentification( const Params::ParamManager::InferenceRawData & infer) { @@ -414,14 +448,14 @@ PipelineManager::createPersonReidentification( model->modelInit(); auto engine = engine_manager_.createEngine(infer.engine, model); auto reidentification_inference_ptr = - std::make_shared(infer.confidence_threshold); + std::make_shared(infer.confidence_threshold); reidentification_inference_ptr->loadNetwork(model); reidentification_inference_ptr->loadEngine(engine); return reidentification_inference_ptr; } -std::shared_ptr +std::shared_ptr PipelineManager::createPersonAttribsDetection( const Params::ParamManager::InferenceRawData & infer) { @@ -431,14 +465,14 @@ PipelineManager::createPersonAttribsDetection( model->modelInit(); auto engine = engine_manager_.createEngine(infer.engine, model); auto attribs_inference_ptr = - std::make_shared(infer.confidence_threshold); + std::make_shared(infer.confidence_threshold); attribs_inference_ptr->loadNetwork(model); attribs_inference_ptr->loadEngine(engine); return attribs_inference_ptr; } -std::shared_ptr +std::shared_ptr PipelineManager::createLandmarksDetection( const Params::ParamManager::InferenceRawData & infer) { @@ -447,14 +481,14 @@ PipelineManager::createLandmarksDetection( model->modelInit(); auto engine = engine_manager_.createEngine(infer.engine, model); auto landmarks_inference_ptr = - std::make_shared(); + std::make_shared(); landmarks_inference_ptr->loadNetwork(model); landmarks_inference_ptr->loadEngine(engine); return landmarks_inference_ptr; } -std::shared_ptr +std::shared_ptr PipelineManager::createFaceReidentification( const Params::ParamManager::InferenceRawData & infer) { @@ -463,14 +497,14 @@ PipelineManager::createFaceReidentification( model->modelInit(); auto engine = engine_manager_.createEngine(infer.engine, model); auto face_reid_ptr = - std::make_shared(infer.confidence_threshold); + std::make_shared(infer.confidence_threshold); face_reid_ptr->loadNetwork(model); face_reid_ptr->loadEngine(engine); return face_reid_ptr; } -std::shared_ptr +std::shared_ptr PipelineManager::createVehicleAttribsDetection( const Params::ParamManager::InferenceRawData & infer) { @@ -479,14 +513,14 @@ PipelineManager::createVehicleAttribsDetection( model->modelInit(); auto engine = engine_manager_.createEngine(infer.engine, model); auto vehicle_attribs_ptr = - std::make_shared(); + std::make_shared(); vehicle_attribs_ptr->loadNetwork(model); vehicle_attribs_ptr->loadEngine(engine); return vehicle_attribs_ptr; } -std::shared_ptr +std::shared_ptr PipelineManager::createLicensePlateDetection( const Params::ParamManager::InferenceRawData & infer) { @@ -495,7 +529,7 @@ PipelineManager::createLicensePlateDetection( model->modelInit(); auto engine = engine_manager_.createEngine(infer.engine, model); auto license_plate_ptr = - std::make_shared(); + std::make_shared(); license_plate_ptr->loadNetwork(model); license_plate_ptr->loadEngine(engine); @@ -503,9 +537,9 @@ PipelineManager::createLicensePlateDetection( } #endif -void PipelineManager::threadPipeline(const char * name) +void PipelineManager::threadPipeline(const char* name) { - PipelineData & p = pipelines_[name]; + PipelineData& p = pipelines_[name]; while (p.state != PipelineState_ThreadStopped && p.pipeline != nullptr) { if (p.state == PipelineState_ThreadRunning) { p.pipeline->runOnce(); @@ -513,11 +547,11 @@ void PipelineManager::threadPipeline(const char * name) std::this_thread::sleep_for(std::chrono::microseconds(1)); } } -void PipelineManager::threadSpinNodes(const char * name) +void PipelineManager::threadSpinNodes(const char* name) { - PipelineData & p = pipelines_[name]; + PipelineData& p = pipelines_[name]; while (p.state != PipelineState_ThreadStopped && p.pipeline != nullptr) { - for (auto & node : p.spin_nodes) { + for (auto& node : p.spin_nodes) { rclcpp::spin_some(node); } std::this_thread::sleep_for(std::chrono::microseconds(1)); @@ -534,10 +568,10 @@ void PipelineManager::runAll() service_.state = PipelineState_ThreadRunning; } if (it->second.thread == nullptr) { - it->second.thread = std::make_shared(&PipelineManager::threadPipeline, this, - it->second.params.name.c_str()); + it->second.thread = + std::make_shared(&PipelineManager::threadPipeline, this, it->second.params.name.c_str()); } - #if 0 //DEBUGING +#if 0 // DEBUGING // Consider of saving CPU loads, the spin thread is moved out from pipeline manager, // which is supposed to be handled by the upper-level applications. // (see @file pipeline_with_params.cpp for the calling sample.) @@ -549,14 +583,14 @@ void PipelineManager::runAll() &PipelineManager::threadSpinNodes, this, it->second.params.name.c_str()); } - #endif +#endif } } void PipelineManager::runService() { - auto node = std::make_shared>("pipeline_service"); + auto node = + std::make_shared>("pipeline_service"); while (service_.state != PipelineState_ThreadStopped && service_.thread != nullptr) { rclcpp::spin_some(node); std::this_thread::sleep_for(std::chrono::milliseconds(1)); @@ -585,9 +619,7 @@ void PipelineManager::joinAll() if (it->second.thread != nullptr && it->second.state == PipelineState_ThreadRunning) { it->second.thread->join(); } - if (it->second.thread_spin_nodes != nullptr && - it->second.state == PipelineState_ThreadRunning) - { + if (it->second.thread_spin_nodes != nullptr && it->second.state == PipelineState_ThreadRunning) { it->second.thread_spin_nodes->join(); } } diff --git a/dynamic_vino_lib/src/pipeline_params.cpp b/openvino_wrapper_lib/src/pipeline_params.cpp similarity index 76% rename from dynamic_vino_lib/src/pipeline_params.cpp rename to openvino_wrapper_lib/src/pipeline_params.cpp index 9649e684..e9c25353 100644 --- a/dynamic_vino_lib/src/pipeline_params.cpp +++ b/openvino_wrapper_lib/src/pipeline_params.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,23 +17,23 @@ * @file pipeline.cpp */ -#include +#include #include #include #include -#include "dynamic_vino_lib/pipeline_params.hpp" +#include "openvino_wrapper_lib/pipeline_params.hpp" -PipelineParams::PipelineParams(const std::string & name) +PipelineParams::PipelineParams(const std::string& name) { params_.name = name; } -PipelineParams::PipelineParams(const Params::ParamManager::PipelineRawData & params) +PipelineParams::PipelineParams(const Params::ParamManager::PipelineRawData& params) { params_ = params; } -PipelineParams & PipelineParams::operator=(const Params::ParamManager::PipelineRawData & params) +PipelineParams& PipelineParams::operator=(const Params::ParamManager::PipelineRawData& params) { params_.name = params.name; params_.infers = params.infers; @@ -44,7 +44,7 @@ PipelineParams & PipelineParams::operator=(const Params::ParamManager::PipelineR return *this; } -Params::ParamManager::PipelineRawData PipelineParams::getPipeline(const std::string & name) +Params::ParamManager::PipelineRawData PipelineParams::getPipeline(const std::string& name) { return Params::ParamManager::getInstance().getPipeline(name); } @@ -56,12 +56,12 @@ void PipelineParams::update() } } -void PipelineParams::update(const Params::ParamManager::PipelineRawData & params) +void PipelineParams::update(const Params::ParamManager::PipelineRawData& params) { params_ = params; } -bool PipelineParams::isOutputTo(std::string & output) +bool PipelineParams::isOutputTo(std::string& output) { if (std::find(params_.outputs.begin(), params_.outputs.end(), output) != params_.outputs.end()) { return true; @@ -75,12 +75,10 @@ bool PipelineParams::isGetFps() if (params_.inputs.size() == 0) { return false; } - return std::find(params_.inputs.begin(), params_.inputs.end(), kInputType_Image) == - params_.inputs.end(); + return std::find(params_.inputs.begin(), params_.inputs.end(), kInputType_Image) == params_.inputs.end(); } -std::string PipelineParams::findFilterConditions( - const std::string & input, const std::string & output) +std::string PipelineParams::findFilterConditions(const std::string& input, const std::string& output) { for (auto filter : params_.filters) { if (!input.compare(filter.input) && !output.compare(filter.output)) { diff --git a/dynamic_vino_lib/src/services/frame_processing_server.cpp b/openvino_wrapper_lib/src/services/frame_processing_server.cpp similarity index 59% rename from dynamic_vino_lib/src/services/frame_processing_server.cpp rename to openvino_wrapper_lib/src/services/frame_processing_server.cpp index 5a227798..671c76e5 100644 --- a/dynamic_vino_lib/src/services/frame_processing_server.cpp +++ b/openvino_wrapper_lib/src/services/frame_processing_server.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,12 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "dynamic_vino_lib/services/frame_processing_server.hpp" -#include +#include "openvino_wrapper_lib/services/frame_processing_server.hpp" +#include #include #include -#include +#include #include #include #include @@ -25,28 +25,23 @@ #include #include -#include "dynamic_vino_lib/pipeline_manager.hpp" -#include "dynamic_vino_lib/pipeline.hpp" -#include "dynamic_vino_lib/inputs/base_input.hpp" -#include "dynamic_vino_lib/inputs/image_input.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/pipeline_manager.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" +#include "openvino_wrapper_lib/inputs/base_input.hpp" +#include "openvino_wrapper_lib/inputs/image_input.hpp" +#include "openvino_wrapper_lib/slog.hpp" namespace vino_service { -template -FrameProcessingServer::FrameProcessingServer( - const std::string & service_name, - const std::string & config_path) -: Node("node_with_service"), - service_name_(service_name), - config_path_(config_path) +template +FrameProcessingServer::FrameProcessingServer(const std::string& service_name, const std::string& config_path) + : Node("node_with_service"), service_name_(service_name), config_path_(config_path) { initService(config_path); } -template -void FrameProcessingServer::initService( - const std::string & config_path) +template +void FrameProcessingServer::initService(const std::string& config_path) { Params::ParamManager::getInstance().parse(config_path); Params::ParamManager::getInstance().print(); @@ -56,24 +51,21 @@ void FrameProcessingServer::initService( throw std::logic_error("1 and only 1 pipeline can be set to FrameProcessServer!"); } - for (auto & p : pipelines) { + for (auto& p : pipelines) { PipelineManager::getInstance().createPipeline(p); } - service_ = create_service("/openvino_toolkit/service", - std::bind(&FrameProcessingServer::cbService, this, - std::placeholders::_1, std::placeholders::_2)); + service_ = create_service("/openvino_toolkit/service", std::bind(&FrameProcessingServer::cbService, this, + std::placeholders::_1, std::placeholders::_2)); } -template -void FrameProcessingServer::cbService( - const std::shared_ptr request, - std::shared_ptr response) +template +void FrameProcessingServer::cbService(const std::shared_ptr request, + std::shared_ptr response) { - std::map pipelines_ = - PipelineManager::getInstance().getPipelines(); + std::map pipelines_ = PipelineManager::getInstance().getPipelines(); for (auto it = pipelines_.begin(); it != pipelines_.end(); ++it) { - PipelineManager::PipelineData & p = pipelines_[it->second.params.name.c_str()]; + PipelineManager::PipelineData& p = pipelines_[it->second.params.name.c_str()]; auto input = p.pipeline->getInputDevice(); Input::Config config; config.path = request->image_path; @@ -81,7 +73,7 @@ void FrameProcessingServer::cbService( p.pipeline->runOnce(); auto output_handle = p.pipeline->getOutputHandle(); - for (auto & pair : output_handle) { + for (auto& pair : output_handle) { if (!pair.first.compare(kOutputTpye_RosService)) { pair.second->setServiceResponse(response); pair.second->clearData(); @@ -93,5 +85,5 @@ void FrameProcessingServer::cbService( } template class FrameProcessingServer; -template class FrameProcessingServer; +template class FrameProcessingServer; } // namespace vino_service diff --git a/dynamic_vino_lib/src/services/pipeline_processing_server.cpp b/openvino_wrapper_lib/src/services/pipeline_processing_server.cpp similarity index 58% rename from dynamic_vino_lib/src/services/pipeline_processing_server.cpp rename to openvino_wrapper_lib/src/services/pipeline_processing_server.cpp index c5beeb97..31b1bde9 100644 --- a/dynamic_vino_lib/src/services/pipeline_processing_server.cpp +++ b/openvino_wrapper_lib/src/services/pipeline_processing_server.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,10 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "dynamic_vino_lib/services/pipeline_processing_server.hpp" +#include "openvino_wrapper_lib/services/pipeline_processing_server.hpp" #include -#include +#include #include #include #include @@ -23,42 +23,39 @@ #include #include -#include "dynamic_vino_lib/pipeline_manager.hpp" -#include "dynamic_vino_lib/pipeline.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/pipeline_manager.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" +#include "openvino_wrapper_lib/slog.hpp" namespace vino_service { -template -PipelineProcessingServer::PipelineProcessingServer( - const std::string & service_name) -: Node(service_name), - service_name_(service_name) +template +PipelineProcessingServer::PipelineProcessingServer(const std::string& service_name) + : Node(service_name), service_name_(service_name) { pipelines_ = PipelineManager::getInstance().getPipelinesPtr(); initPipelineService(); } -template +template void PipelineProcessingServer::initPipelineService() { - service_ = create_service("/openvino_toolkit/pipeline_service", - std::bind(&PipelineProcessingServer::cbService, this, - std::placeholders::_1, std::placeholders::_2)); + service_ = + create_service("/openvino_toolkit/pipeline_service", std::bind(&PipelineProcessingServer::cbService, this, + std::placeholders::_1, std::placeholders::_2)); } -template -void PipelineProcessingServer::setResponse( - std::shared_ptr response) +template +void PipelineProcessingServer::setResponse(std::shared_ptr response) { for (auto it = pipelines_->begin(); it != pipelines_->end(); ++it) { - pipeline_srv_msgs::msg::Pipeline pipeline_msg; + openvino_msgs::msg::Pipeline pipeline_msg; pipeline_msg.name = it->first; pipeline_msg.running_status = std::to_string(it->second.state); auto connection_map = it->second.pipeline->getPipelineDetail(); - for (auto & current_pipe : connection_map) { - pipeline_srv_msgs::msg::Connection connection; + for (auto& current_pipe : connection_map) { + openvino_msgs::msg::Connection connection; connection.input = current_pipe.first.c_str(); connection.output = current_pipe.second.c_str(); pipeline_msg.connections.push_back(connection); @@ -66,10 +63,8 @@ void PipelineProcessingServer::setResponse( response->pipelines.push_back(pipeline_msg); } } -template -void PipelineProcessingServer::setPipelineByRequest( - std::string pipeline_name, - PipelineManager::PipelineState state) +template +void PipelineProcessingServer::setPipelineByRequest(std::string pipeline_name, PipelineManager::PipelineState state) { for (auto it = pipelines_->begin(); it != pipelines_->end(); ++it) { if (pipeline_name == it->first) { @@ -79,15 +74,14 @@ void PipelineProcessingServer::setPipelineByRequest( } } -template -void PipelineProcessingServer::cbService( - const std::shared_ptr request, - std::shared_ptr response) +template +void PipelineProcessingServer::cbService(const std::shared_ptr request, + std::shared_ptr response) { std::string req_cmd = request->pipeline_request.cmd; std::string req_val = request->pipeline_request.value; - slog::info << "[PipelineProcessingServer] Pipeline Service get request cmd: " << req_cmd << - " val:" << req_val << slog::endl; + slog::info << "[PipelineProcessingServer] Pipeline Service get request cmd: " << req_cmd << " val:" << req_val + << slog::endl; // Todo set initial state by current state PipelineManager::PipelineState state = PipelineManager::PipelineState_ThreadRunning; if (req_cmd != "GET_PIPELINE") { @@ -95,10 +89,12 @@ void PipelineProcessingServer::cbService( state = PipelineManager::PipelineState_ThreadStopped; } else if (req_cmd == "RUN_PIPELINE") { state = PipelineManager::PipelineState_ThreadRunning; - } else if (req_cmd == "PAUSE_PIPELINE") {state = PipelineManager::PipelineState_ThreadPasued;} + } else if (req_cmd == "PAUSE_PIPELINE") { + state = PipelineManager::PipelineState_ThreadPasued; + } setPipelineByRequest(req_val, state); } setResponse(response); } -template class PipelineProcessingServer; +template class PipelineProcessingServer; } // namespace vino_service diff --git a/sample/CMakeLists.txt b/sample/CMakeLists.txt index 656f0a19..25f50b09 100644 --- a/sample/CMakeLists.txt +++ b/sample/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -9,14 +9,15 @@ # See the License for the specific language governing permissions and # limitations under the License. +set(OpenVINO_LIBRARIES openvino::runtime) cmake_minimum_required(VERSION 3.5) -project(dynamic_vino_sample) +project(openvino_node) -# Default to C++14 +# Default to C++17 if(NOT CMAKE_CXX_STANDARD) - set(CMAKE_CXX_STANDARD 14) + set(CMAKE_CXX_STANDARD 17) endif() -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17") if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") add_compile_options(-Wall -Wextra -Wpedantic) @@ -30,19 +31,17 @@ else() endif() set(CMAKE_CXX_FLAGS "-fPIE -fPIC -D_FORTIFY_SOURCE=2 -fstack-protector -Wformat -Wformat-security -Wall ${CMAKE_CXX_FLAGS}") - find_package(ament_cmake REQUIRED) find_package(ament_index_cpp REQUIRED) find_package(rclcpp REQUIRED) find_package(rcutils) find_package(OpenCV REQUIRED) find_package(cv_bridge REQUIRED) -find_package(InferenceEngine REQUIRED) -find_package(dynamic_vino_lib REQUIRED) +find_package(OpenVINO REQUIRED) +find_package(openvino_wrapper_lib REQUIRED) find_package(object_msgs REQUIRED) -find_package(people_msgs REQUIRED) -find_package(pipeline_srv_msgs REQUIRED) -find_package(vino_param_lib REQUIRED) +find_package(openvino_msgs REQUIRED) +find_package(openvino_param_lib REQUIRED) find_package(yaml_cpp_vendor REQUIRED) find_package(realsense2 REQUIRED) find_package(rclcpp_components) @@ -97,21 +96,12 @@ source_group("include" FILES ${MAIN_HEADERS}) include_directories(${OpenCV_INCLUDE_DIRS}) include_directories(${PROJECT_SOURCE_DIR}/include) -include_directories(${dynamic_vino_lib_INCLUDE_DIRS}) -include_directories(${vino_param_lib_INCLUDE_DIRS}) -include_directories(${InferenceEngine_INCLUDE_DIRS}) -include_directories(${InferenceEngine_INCLUDE_DIRS}/../samples) -include_directories(${InferenceEngine_INCLUDE_DIRS}/../samples/extension) -include_directories(${InferenceEngine_INCLUDE_DIRS}/../src) -#include_directories(${InferenceEngine_INCLUDE_DIRS}/../samples/build/thirdparty/gflags/include) -#include_directories(${InferenceEngine_INCLUDE_DIRS}/../build/samples/thirdparty/gflags/include) - +include_directories(${openvino_wrapper_lib_INCLUDE_DIRS}) +include_directories(${openvino_param_lib_INCLUDE_DIRS}) +include_directories(${OpenVINO_DIRS}) include_directories(${realsense2_INCLUDE_DIRS}) -#include_directories (/opt/ros2_openvino/include) # Create library file from sources. -#add_executable(${PROJECT_NAME} ${MAIN_SRC} ${MAIN_HEADERS}) - if(UNIX) set(LIB_DL dl) endif() @@ -124,8 +114,8 @@ target_link_libraries(vino_param_sample ) ament_target_dependencies(vino_param_sample - "vino_param_lib" - "dynamic_vino_lib" + "openvino_param_lib" + "openvino_wrapper_lib" "yaml_cpp_vendor" ) @@ -142,11 +132,10 @@ ament_target_dependencies(pipeline_with_params "object_msgs" "ament_index_cpp" "class_loader" - "dynamic_vino_lib" - "InferenceEngine" - "people_msgs" - "pipeline_srv_msgs" - "vino_param_lib" + "openvino_wrapper_lib" + "OpenVINO" + "openvino_msgs" + "openvino_param_lib" "OpenCV" "yaml_cpp_vendor" "realsense2" @@ -166,19 +155,15 @@ ament_target_dependencies(composable_pipeline "object_msgs" "ament_index_cpp" "class_loader" - "dynamic_vino_lib" - "InferenceEngine" - "people_msgs" - "pipeline_srv_msgs" - "vino_param_lib" + "openvino_wrapper_lib" + "OpenVINO" + "openvino_msgs" + "openvino_param_lib" "OpenCV" "yaml_cpp_vendor" "realsense2" ) rclcpp_components_register_nodes(composable_pipeline "ComposablePipeline") -#set(node_plugins "") -#set(node_plugins "${node_plugins}ComposablePipeline;$\n") - add_executable(image_object_server src/image_object_server.cpp @@ -193,11 +178,10 @@ ament_target_dependencies(image_object_server "object_msgs" "ament_index_cpp" "class_loader" - "dynamic_vino_lib" - "InferenceEngine" - "people_msgs" - "pipeline_srv_msgs" - "vino_param_lib" + "openvino_wrapper_lib" + "OpenVINO" + "openvino_msgs" + "openvino_param_lib" "OpenCV" ) @@ -214,11 +198,10 @@ ament_target_dependencies(image_people_server "object_msgs" "ament_index_cpp" "class_loader" - "dynamic_vino_lib" - "InferenceEngine" - "people_msgs" - "pipeline_srv_msgs" - "vino_param_lib" + "openvino_wrapper_lib" + "OpenVINO" + "openvino_msgs" + "openvino_param_lib" "OpenCV" ) @@ -235,11 +218,10 @@ ament_target_dependencies(image_object_client "object_msgs" "ament_index_cpp" "class_loader" - "dynamic_vino_lib" - "InferenceEngine" - "people_msgs" - "pipeline_srv_msgs" - "vino_param_lib" + "openvino_wrapper_lib" + "OpenVINO" + "openvino_msgs" + "openvino_param_lib" "OpenCV" ) @@ -256,11 +238,10 @@ ament_target_dependencies(image_people_client "object_msgs" "ament_index_cpp" "class_loader" - "dynamic_vino_lib" - "InferenceEngine" - "people_msgs" - "pipeline_srv_msgs" - "vino_param_lib" + "openvino_wrapper_lib" + "OpenVINO" + "openvino_msgs" + "openvino_param_lib" "OpenCV" ) diff --git a/sample/include/utility.hpp b/sample/include/utility.hpp index f0a302de..06674adb 100644 --- a/sample/include/utility.hpp +++ b/sample/include/utility.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -34,24 +34,24 @@ static const char help_message[] = "Print a usage message."; static const char parameter_file_message[] = "Absolute path of parameter config file."; /** -* \brief This function show a help message -*/ + * \brief This function show a help message + */ static void showUsageForParam(const std::string prog) { std::cout << std::endl; - std::cout << prog <<" [OPTION]" << std::endl; + std::cout << prog << " [OPTION]" << std::endl; std::cout << "Options:" << std::endl; std::cout << std::endl; std::cout << " -h " << help_message << std::endl; std::cout << " -config \"\" " << parameter_file_message << std::endl; } -static std::string getConfigPath(int argc, char * argv[]) +static std::string getConfigPath(int argc, char* argv[]) { - for(int i = 1; i < argc - 1; i++){ + for (int i = 1; i < argc - 1; i++) { std::string arg = argv[i]; - if(arg == "-config" || arg == "--config"){ - return argv[i+1]; + if (arg == "-config" || arg == "--config") { + return argv[i + 1]; } } diff --git a/sample/launch/image_object_server.launch.py b/sample/launch/image_object_server.launch.py index 2d3bbbba..cdf978cd 100644 --- a/sample/launch/image_object_server.launch.py +++ b/sample/launch/image_object_server.launch.py @@ -22,12 +22,12 @@ def generate_launch_description(): - default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', 'image_object_server.yaml') return LaunchDescription([ # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', node_executable='image_object_server', + package='openvino_node', node_executable='image_object_server', arguments=['-config', default_yaml], output='screen'), ]) diff --git a/sample/launch/image_people_server.launch.py b/sample/launch/image_people_server.launch.py index c0a4ee57..0e2873d1 100644 --- a/sample/launch/image_people_server.launch.py +++ b/sample/launch/image_people_server.launch.py @@ -22,12 +22,12 @@ def generate_launch_description(): - default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', 'image_people_server.yaml') return LaunchDescription([ # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', node_executable='image_people_server', + package='openvino_node', node_executable='image_people_server', arguments=['-config', default_yaml], output='screen'), ]) diff --git a/sample/launch/multi_pipeline_service.launch.py b/sample/launch/multi_pipeline_service.launch.py index 2bcdd2f6..aacc6973 100644 --- a/sample/launch/multi_pipeline_service.launch.py +++ b/sample/launch/multi_pipeline_service.launch.py @@ -22,14 +22,14 @@ def generate_launch_description(): - default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', 'multi_pipleine_service.yaml') - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', 'rviz/default2.rviz') return LaunchDescription([ # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', node_executable='pipeline_with_params', + package='openvino_node', node_executable='pipeline_with_params', arguments=['-config', default_yaml], remappings=[ ('/openvino_toolkit/object1/detected_objects', diff --git a/sample/launch/pipeline_composite_object_topic.launch.py b/sample/launch/pipeline_composite_object_topic.launch.py index 5184448f..767a28ed 100644 --- a/sample/launch/pipeline_composite_object_topic.launch.py +++ b/sample/launch/pipeline_composite_object_topic.launch.py @@ -5,7 +5,7 @@ import os def generate_launch_description(): - default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', 'pipeline_composite_object_topic.yaml') container = ComposableNodeContainer( node_name='vision_pipeline', @@ -20,7 +20,7 @@ def generate_launch_description(): parameters=[get_package_share_directory('realsense_examples')+'/config/d435i.yaml'], extra_arguments=[{'use_intra_process_comms':'true'}]), ComposableNode( - package='dynamic_vino_sample', + package='openvino_node', node_plugin='ComposablePipeline', node_name='composable_pipeline', parameters=[{"config":default_yaml}], diff --git a/sample/launch/pipeline_face_reidentification.launch.py b/sample/launch/pipeline_face_reidentification.launch.py index 758cc881..5a2ceb51 100644 --- a/sample/launch/pipeline_face_reidentification.launch.py +++ b/sample/launch/pipeline_face_reidentification.launch.py @@ -24,16 +24,16 @@ import launch def generate_launch_description(): - #default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', #'pipeline_face_reidentification.yaml') - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', 'rviz/default.rviz') return LaunchDescription([ launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = - os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param','pipeline_face_reidentification.yaml')), + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_face_reidentification.yaml')), # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', + package='openvino_node', executable='pipeline_with_params', #arguments=['-config', default_yaml], arguments=['-config', LaunchConfiguration('yaml_path')], diff --git a/sample/launch/pipeline_image.launch.py b/sample/launch/pipeline_image.launch.py index 8b272141..0ac9f013 100644 --- a/sample/launch/pipeline_image.launch.py +++ b/sample/launch/pipeline_image.launch.py @@ -24,18 +24,17 @@ import launch def generate_launch_description(): - #default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', #'pipeline_image.yaml') - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', 'rviz/default.rviz') return LaunchDescription([ launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = - os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param','pipeline_image.yaml')), + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_image.yaml')), # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', + package='openvino_node', executable='pipeline_with_params', - #arguments=['-config', default_yaml], arguments=['-config', LaunchConfiguration('yaml_path')], remappings=[ ('/openvino_toolkit/people/faces', diff --git a/sample/launch/pipeline_image_ci_test.py b/sample/launch/pipeline_image_ci_test.py new file mode 100644 index 00000000..644ccb76 --- /dev/null +++ b/sample/launch/pipeline_image_ci_test.py @@ -0,0 +1,56 @@ +# Copyright 2018 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Launch face detection and rviz.""" + +import os + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +import launch_ros.actions + +from launch.substitutions import LaunchConfiguration, PythonExpression +import launch + +def generate_launch_description(): + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', + #'pipeline_image.yaml') + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', + 'rviz/default.rviz') + return LaunchDescription([ + launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_image_ci.yaml')), + # Openvino detection + launch_ros.actions.Node( + package='openvino_node', + executable='pipeline_with_params', + arguments=['-config', LaunchConfiguration('yaml_path')], + remappings=[ + ('/openvino_toolkit/people/faces', + '/ros2_openvino_toolkit/face_detection'), + ('/openvino_toolkit/people/emotions', + '/ros2_openvino_toolkit/emotions_recognition'), + ('/openvino_toolkit/people/headposes', + '/ros2_openvino_toolkit/headposes_estimation'), + ('/openvino_toolkit/people/age_genders', + '/ros2_openvino_toolkit/people/age_genders_Recognition'), + ('/openvino_toolkit/people/images', '/ros2_openvino_toolkit/image_rviz')], + output='screen'), + + # Rviz + #launch_ros.actions.Node( + #package='rviz2', + #executable='rviz2', output='screen', + #arguments=['--display-config', default_rviz]), + ]) diff --git a/sample/launch/pipeline_object.launch.py b/sample/launch/pipeline_object.launch.py index fd9aaafb..457faae1 100644 --- a/sample/launch/pipeline_object.launch.py +++ b/sample/launch/pipeline_object.launch.py @@ -24,18 +24,17 @@ import launch def generate_launch_description(): - #default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', #'pipeline_object.yaml') - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', 'rviz/default.rviz') return LaunchDescription([ launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = - os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param','pipeline_object.yaml')), + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_object.yaml')), # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', + package='openvino_node', executable='pipeline_with_params', - #arguments=['-config', default_yaml], arguments=['-config', LaunchConfiguration('yaml_path')], remappings=[ ('/openvino_toolkit/object/detected_objects', @@ -44,8 +43,4 @@ def generate_launch_description(): '/ros2_openvino_toolkit/image_rviz')], output='screen'), - # Rviz - #launch_ros.actions.Node( - # package='rviz2', node_executable='rviz2', output='screen', - # arguments=['--display-config', default_rviz]), ]) diff --git a/sample/launch/pipeline_object_topic.launch.py b/sample/launch/pipeline_object_topic.launch.py index cac7cc28..07de7471 100644 --- a/sample/launch/pipeline_object_topic.launch.py +++ b/sample/launch/pipeline_object_topic.launch.py @@ -24,25 +24,21 @@ import launch def generate_launch_description(): - #default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', #'pipeline_object_topic.yaml') - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', 'rviz/default.rviz') return LaunchDescription([ launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = - os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param','pipeline_object_topic.yaml')), + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_object_topic.yaml')), # Realsense # NOTE: Split realsense_node launching from OpenVINO package, which # will be launched by RDK launching file or manually. - #launch_ros.actions.Node( - # package='realsense_ros2_camera', node_executable='realsense_ros2_camera', - # output='screen'), # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', + package='openvino_node', executable='pipeline_with_params', - #arguments=['-config', default_yaml], arguments=['-config', LaunchConfiguration('yaml_path')], remappings=[ ('/openvino_toolkit/object/detected_objects', diff --git a/sample/launch/pipeline_object_yolo.launch.py b/sample/launch/pipeline_object_yolo.launch.py index a4bbd01d..d5df63ac 100644 --- a/sample/launch/pipeline_object_yolo.launch.py +++ b/sample/launch/pipeline_object_yolo.launch.py @@ -24,18 +24,17 @@ import launch def generate_launch_description(): - #default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', #'pipeline_object_yolo.yaml') - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', 'rviz/default.rviz') return LaunchDescription([ launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = - os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param','pipeline_object_yolo.yaml')), + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_object_yolo.yaml')), # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', + package='openvino_node', executable='pipeline_with_params', - #arguments=['-config', default_yaml], arguments=['-config', LaunchConfiguration('yaml_path')], remappings=[ ('/openvino_toolkit/object/detected_objects', diff --git a/sample/launch/pipeline_object_yolo_topic.launch.py b/sample/launch/pipeline_object_yolo_topic.launch.py index 7f6c0d22..451f4b95 100644 --- a/sample/launch/pipeline_object_yolo_topic.launch.py +++ b/sample/launch/pipeline_object_yolo_topic.launch.py @@ -24,25 +24,21 @@ import launch def generate_launch_description(): - #default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', #'pipeline_object_yolo_topic.yaml') - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', 'rviz/default.rviz') return LaunchDescription([ launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = - os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param','pipeline_object_yolo_topic.yaml')), + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_object_yolo_topic.yaml')), # Realsense # NOTE: Split realsense_node launching from OpenVINO package, which # will be launched by RDK launching file or manually. - #launch_ros.actions.Node( - # package='realsense_ros2_camera', node_executable='realsense_ros2_camera', - # output='screen'), # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', + package='openvino_node', executable='pipeline_with_params', - #arguments=['-config', default_yaml], arguments=['-config', LaunchConfiguration('yaml_path')], remappings=[ ('/openvino_toolkit/image_raw', '/camera/color/image_raw'), diff --git a/sample/launch/pipeline_object_yolov5_ci_test.py b/sample/launch/pipeline_object_yolov5_ci_test.py new file mode 100644 index 00000000..cceaa362 --- /dev/null +++ b/sample/launch/pipeline_object_yolov5_ci_test.py @@ -0,0 +1,51 @@ +# Copyright 2018 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Launch face detection and rviz.""" + +import os + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +import launch_ros.actions + +from launch.substitutions import LaunchConfiguration, PythonExpression +import launch + +def generate_launch_description(): + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', + #'pipeline_object_yolo.yaml') + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', + 'rviz/default.rviz') + return LaunchDescription([ + launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_object_yolov5_ci.yaml')), + # Openvino detection + launch_ros.actions.Node( + package='openvino_node', + executable='pipeline_with_params', + arguments=['-config', LaunchConfiguration('yaml_path')], + remappings=[ + ('/openvino_toolkit/object/detected_objects', + '/ros2_openvino_toolkit/detected_objects'), + ('/openvino_toolkit/object/images', + '/ros2_openvino_toolkit/image_rviz')], + output='screen'), + + # Rviz + #launch_ros.actions.Node( + #package='rviz2', + #executable='rviz2', output='screen', + #arguments=['--display-config', default_rviz]), + ]) diff --git a/sample/launch/pipeline_object_yolov8_ci_test.py b/sample/launch/pipeline_object_yolov8_ci_test.py new file mode 100644 index 00000000..4d79a599 --- /dev/null +++ b/sample/launch/pipeline_object_yolov8_ci_test.py @@ -0,0 +1,51 @@ +# Copyright 2018 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Launch face detection and rviz.""" + +import os + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +import launch_ros.actions + +from launch.substitutions import LaunchConfiguration, PythonExpression +import launch + +def generate_launch_description(): + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', + #'pipeline_object_yolo.yaml') + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', + 'rviz/default.rviz') + return LaunchDescription([ + launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_object_yolov8_ci.yaml')), + # Openvino detection + launch_ros.actions.Node( + package='openvino_node', + executable='pipeline_with_params', + arguments=['-config', LaunchConfiguration('yaml_path')], + remappings=[ + ('/openvino_toolkit/object/detected_objects', + '/ros2_openvino_toolkit/detected_objects'), + ('/openvino_toolkit/object/images', + '/ros2_openvino_toolkit/image_rviz')], + output='screen'), + + # Rviz + #launch_ros.actions.Node( + #package='rviz2', + #executable='rviz2', output='screen', + #arguments=['--display-config', default_rviz]), + ]) diff --git a/sample/launch/pipeline_people.launch.py b/sample/launch/pipeline_people.launch.py index 3c10c216..88386dcd 100644 --- a/sample/launch/pipeline_people.launch.py +++ b/sample/launch/pipeline_people.launch.py @@ -25,19 +25,18 @@ def generate_launch_description(): - #default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', #'pipeline_people.yaml') - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', 'rviz/default.rviz') return LaunchDescription([ launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = - os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param','pipeline_people.yaml')), + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_people.yaml')), # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', + package='openvino_node', executable='pipeline_with_params', - #arguments=['-config', default_yaml], arguments=['-config', LaunchConfiguration('yaml_path')], remappings=[ ('/openvino_toolkit/people/detected_objects', diff --git a/sample/launch/pipeline_people_ci_test.py b/sample/launch/pipeline_people_ci_test.py new file mode 100644 index 00000000..e37d4e45 --- /dev/null +++ b/sample/launch/pipeline_people_ci_test.py @@ -0,0 +1,58 @@ +# Copyright 2018 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Launch face detection and rviz.""" + +import os + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +import launch_ros.actions + +from launch.substitutions import LaunchConfiguration, PythonExpression +import launch + + +def generate_launch_description(): + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', + #'pipeline_people.yaml') + + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', + 'rviz/default.rviz') + return LaunchDescription([ + launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_people_ci.yaml')), + # Openvino detection + launch_ros.actions.Node( + package='openvino_node', + executable='pipeline_with_params', + arguments=['-config', LaunchConfiguration('yaml_path')], + remappings=[ + ('/openvino_toolkit/people/detected_objects', + '/ros2_openvino_toolkit/face_detection'), + ('/openvino_toolkit/people/emotions', + '/ros2_openvino_toolkit/emotions_recognition'), + ('/openvino_toolkit/people/headposes', + '/ros2_openvino_toolkit/headposes_estimation'), + ('/openvino_toolkit/people/age_genders', + '/ros2_openvino_toolkit/age_genders_Recognition'), + ('/openvino_toolkit/people/images', '/ros2_openvino_toolkit/image_rviz')], + output='screen'), + + # Rviz + #launch_ros.actions.Node( + #package='rviz2', + #executable='rviz2', output='screen', + #arguments=['--display-config', default_rviz]), + ]) diff --git a/sample/launch/pipeline_people_ip.launch.py b/sample/launch/pipeline_people_ip.launch.py index 2cd41a3b..eb69a212 100644 --- a/sample/launch/pipeline_people_ip.launch.py +++ b/sample/launch/pipeline_people_ip.launch.py @@ -24,18 +24,17 @@ import launch def generate_launch_description(): - #default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', #'pipeline_people_ip.yaml') - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', 'rviz/default.rviz') return LaunchDescription([ launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = - os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param','pipeline_people_ip.yaml')), + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_people_ip.yaml')), # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', + package='openvino_node', executable='pipeline_with_params', - #arguments=['-config', default_yaml], arguments=['-config', LaunchConfiguration('yaml_path')], remappings=[ ('/openvino_toolkit/people/detected_objects', diff --git a/sample/launch/pipeline_person_attributes.launch.py b/sample/launch/pipeline_person_attributes.launch.py index ce6d6d50..10645ca2 100644 --- a/sample/launch/pipeline_person_attributes.launch.py +++ b/sample/launch/pipeline_person_attributes.launch.py @@ -24,18 +24,17 @@ import launch def generate_launch_description(): - #default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', #'pipeline_person_attributes.yaml') - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', 'rviz/default.rviz') return LaunchDescription([ launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = - os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param','pipeline_person_attributes.yaml')), + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_person_attributes.yaml')), # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', + package='openvino_node', executable='pipeline_with_params', - #arguments=['-config', default_yaml], arguments=['-config', LaunchConfiguration('yaml_path')], remappings=[ ('/openvino_toolkit/object/detected_objects', diff --git a/sample/launch/pipeline_person_attributes_ci_test.py b/sample/launch/pipeline_person_attributes_ci_test.py new file mode 100644 index 00000000..75db6d5c --- /dev/null +++ b/sample/launch/pipeline_person_attributes_ci_test.py @@ -0,0 +1,51 @@ +# Copyright 2018 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Launch face detection and rviz.""" + +import os + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +import launch_ros.actions + +from launch.substitutions import LaunchConfiguration, PythonExpression +import launch + +def generate_launch_description(): + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', + #'pipeline_person_attributes.yaml') + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', + 'rviz/default.rviz') + return LaunchDescription([ + launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_person_attributes_ci.yaml')), + # Openvino detection + launch_ros.actions.Node( + package='openvino_node', + executable='pipeline_with_params', + arguments=['-config', LaunchConfiguration('yaml_path')], + remappings=[ + ('/openvino_toolkit/object/detected_objects', + '/ros2_openvino_toolkit/detected_objects'), + ('/openvino_toolkit/object/person_attributes','/ros2_openvino_toolkit/person_attributes'), + ('/openvino_toolkit/object/images', '/ros2_openvino_toolkit/image_rviz')], + output='screen'), + + # Rviz + #launch_ros.actions.Node( + #package='rviz2', + #executable='rviz2', output='screen', + #arguments=['--display-config', default_rviz]), + ]) diff --git a/sample/launch/pipeline_reidentification.launch.py b/sample/launch/pipeline_reidentification.launch.py index 630aa61c..defca5b3 100644 --- a/sample/launch/pipeline_reidentification.launch.py +++ b/sample/launch/pipeline_reidentification.launch.py @@ -24,18 +24,17 @@ import launch def generate_launch_description(): - #default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', #'pipeline_reidentification.yaml') - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', 'rviz/default.rviz') return LaunchDescription([ launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = - os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param','pipeline_reidentification.yaml')), + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_reidentification.yaml')), # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', + package='openvino_node', executable='pipeline_with_params', - #arguments=['-config', default_yaml], arguments=['-config', LaunchConfiguration('yaml_path')], remappings=[ ('/openvino_toolkit/object/detected_objects', diff --git a/sample/launch/pipeline_reidentification_ci_test.py b/sample/launch/pipeline_reidentification_ci_test.py new file mode 100644 index 00000000..9461bcf1 --- /dev/null +++ b/sample/launch/pipeline_reidentification_ci_test.py @@ -0,0 +1,52 @@ +# Copyright 2018 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Launch face detection and rviz.""" + +import os + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +import launch_ros.actions + +from launch.substitutions import LaunchConfiguration, PythonExpression +import launch + +def generate_launch_description(): + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', + #'pipeline_reidentification.yaml') + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', + 'rviz/default.rviz') + return LaunchDescription([ + launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_reidentification_ci.yaml')), + # Openvino detection + launch_ros.actions.Node( + package='openvino_node', + executable='pipeline_with_params', + arguments=['-config', LaunchConfiguration('yaml_path')], + remappings=[ + ('/openvino_toolkit/object/detected_objects', + '/ros2_openvino_toolkit/detected_objects'), + ('/openvino_toolkit/object/reidentified_persons', + '/ros2_openvino_toolkit/reidentified_persons'), + ('/openvino_toolkit/object/images', '/ros2_openvino_toolkit/image_rviz')], + output='screen'), + + # Rviz + #launch_ros.actions.Node( + #package='rviz2', + #executable='rviz2', output='screen', + #arguments=['--display-config', default_rviz]), + ]) diff --git a/sample/launch/pipeline_segmentation.launch.py b/sample/launch/pipeline_segmentation.launch.py index 9b511f6a..d5303ae4 100644 --- a/sample/launch/pipeline_segmentation.launch.py +++ b/sample/launch/pipeline_segmentation.launch.py @@ -24,25 +24,21 @@ import launch def generate_launch_description(): - #default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', #'pipeline_segmentation.yaml') - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', 'rviz/default.rviz') return LaunchDescription([ launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = - os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param','pipeline_segmentation.yaml')), + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_segmentation.yaml')), # Realsense # NOTE: Split realsense_node launching from OpenVINO package, which # will be launched by RDK launching file or manually. - #launch_ros.actions.Node( - # package='realsense_ros2_camera', node_executable='realsense_ros2_camera', - # output='screen'), # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', + package='openvino_node', executable='pipeline_with_params', - #arguments=['-config', default_yaml], arguments=['-config', LaunchConfiguration('yaml_path')], remappings=[ ('/openvino_toolkit/image_raw', '/camera/color/image_raw'), diff --git a/sample/launch/pipeline_segmentation_ci_test.py b/sample/launch/pipeline_segmentation_ci_test.py new file mode 100644 index 00000000..7be59e6d --- /dev/null +++ b/sample/launch/pipeline_segmentation_ci_test.py @@ -0,0 +1,55 @@ +# Copyright 2018 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Launch face detection and rviz.""" + +import os + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +import launch_ros.actions + +from launch.substitutions import LaunchConfiguration, PythonExpression +import launch + +def generate_launch_description(): + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', + #'pipeline_segmentation.yaml') + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', + 'rviz/default.rviz') + return LaunchDescription([ + launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_segmentation_ci.yaml')), + # Realsense + # NOTE: Split realsense_node launching from OpenVINO package, which + # will be launched by RDK launching file or manually. + + # Openvino detection + launch_ros.actions.Node( + package='openvino_node', + executable='pipeline_with_params', + arguments=['-config', LaunchConfiguration('yaml_path')], + remappings=[ + ('/openvino_toolkit/image_raw', '/camera/color/image_raw'), + ('/openvino_toolkit/segmentation/segmented_obejcts', + '/ros2_openvino_toolkit/segmented_obejcts'), + ('/openvino_toolkit/segmentation/images', '/ros2_openvino_toolkit/image_rviz')], + output='screen'), + + # Rviz + #launch_ros.actions.Node( + #package='rviz2', + #executable='rviz2', output='screen', + #arguments=['--display-config', default_rviz]), + ]) diff --git a/sample/launch/pipeline_segmentation_image.launch.py b/sample/launch/pipeline_segmentation_image.launch.py index bf460736..584ddfc3 100644 --- a/sample/launch/pipeline_segmentation_image.launch.py +++ b/sample/launch/pipeline_segmentation_image.launch.py @@ -24,25 +24,21 @@ import launch def generate_launch_description(): - #default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', #'pipeline_segmentation_image.yaml') - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', 'rviz/default.rviz') return LaunchDescription([ launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = - os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param','pipeline_segmentation_image.yaml')), + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_segmentation_image.yaml')), # Realsense # NOTE: Split realsense_node launching from OpenVINO package, which # will be launched by RDK launching file or manually. - #launch_ros.actions.Node( - # package='realsense_ros2_camera', node_executable='realsense_ros2_camera', - # output='screen'), # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', + package='openvino_node', executable='pipeline_with_params', - #arguments=['-config', default_yaml], arguments=['-config', LaunchConfiguration('yaml_path')], remappings=[ ('/openvino_toolkit/image_raw', '/camera/color/image_raw'), diff --git a/sample/launch/pipeline_segmentation_image_ci_test.py b/sample/launch/pipeline_segmentation_image_ci_test.py new file mode 100644 index 00000000..ef831657 --- /dev/null +++ b/sample/launch/pipeline_segmentation_image_ci_test.py @@ -0,0 +1,55 @@ +# Copyright 2018 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Launch face detection and rviz.""" + +import os + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +import launch_ros.actions + +from launch.substitutions import LaunchConfiguration, PythonExpression +import launch + +def generate_launch_description(): + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', + #'pipeline_segmentation_image.yaml') + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', + 'rviz/default.rviz') + return LaunchDescription([ + launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_segmentation_image_ci.yaml')), + # Realsense + # NOTE: Split realsense_node launching from OpenVINO package, which + # will be launched by RDK launching file or manually. + + # Openvino detection + launch_ros.actions.Node( + package='openvino_node', + executable='pipeline_with_params', + arguments=['-config', LaunchConfiguration('yaml_path')], + remappings=[ + ('/openvino_toolkit/image_raw', '/camera/color/image_raw'), + ('/openvino_toolkit/segmentation/segmented_obejcts', + '/ros2_openvino_toolkit/segmented_obejcts'), + ('/openvino_toolkit/segmentation/images', '/ros2_openvino_toolkit/image_rviz')], + output='screen'), + + # Rviz + #launch_ros.actions.Node( + #package='rviz2', + #executable='rviz2', output='screen', + #arguments=['--display-config', default_rviz]), + ]) diff --git a/sample/launch/pipeline_segmentation_instance.launch.py b/sample/launch/pipeline_segmentation_instance.launch.py new file mode 100644 index 00000000..ec9da321 --- /dev/null +++ b/sample/launch/pipeline_segmentation_instance.launch.py @@ -0,0 +1,55 @@ +# Copyright 2018 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Launch face detection and rviz.""" + +import os + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +import launch_ros.actions + +from launch.substitutions import LaunchConfiguration, PythonExpression +import launch + +def generate_launch_description(): + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', + #'pipeline_segmentation.yaml') + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', + 'rviz/default.rviz') + return LaunchDescription([ + launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_segmentation_instance.yaml')), + # Realsense + # NOTE: Split realsense_node launching from OpenVINO package, which + # will be launched by RDK launching file or manually. + + # Openvino detection + launch_ros.actions.Node( + package='openvino_node', + executable='pipeline_with_params', + arguments=['-config', LaunchConfiguration('yaml_path')], + remappings=[ + ('/openvino_toolkit/image_raw', '/camera/color/image_raw'), + ('/openvino_toolkit/segmentation/segmented_obejcts', + '/ros2_openvino_toolkit/segmented_obejcts'), + ('/openvino_toolkit/segmentation/images', '/ros2_openvino_toolkit/image_rviz')], + output='screen'), + + # Rviz + #launch_ros.actions.Node( + # package='rviz2', + # executable='rviz2', output='screen', + # arguments=['--display-config', default_rviz]), + ]) diff --git a/sample/launch/pipeline_segmentation_instance_ci_test.py b/sample/launch/pipeline_segmentation_instance_ci_test.py new file mode 100644 index 00000000..08f13803 --- /dev/null +++ b/sample/launch/pipeline_segmentation_instance_ci_test.py @@ -0,0 +1,55 @@ +# Copyright 2018 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Launch face detection and rviz.""" + +import os + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +import launch_ros.actions + +from launch.substitutions import LaunchConfiguration, PythonExpression +import launch + +def generate_launch_description(): + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', + #'pipeline_segmentation.yaml') + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', + 'rviz/default.rviz') + return LaunchDescription([ + launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_segmentation_instance_yolov8_seg_ci.yaml')), + # Realsense + # NOTE: Split realsense_node launching from OpenVINO package, which + # will be launched by RDK launching file or manually. + + # Openvino detection + launch_ros.actions.Node( + package='openvino_node', + executable='pipeline_with_params', + arguments=['-config', LaunchConfiguration('yaml_path')], + remappings=[ + ('/openvino_toolkit/image_raw', '/camera/color/image_raw'), + ('/openvino_toolkit/segmentation/segmented_obejcts', + '/ros2_openvino_toolkit/segmented_obejcts'), + ('/openvino_toolkit/segmentation/images', '/ros2_openvino_toolkit/image_rviz')], + output='screen'), + + # Rviz + #launch_ros.actions.Node( + # package='rviz2', + # executable='rviz2', output='screen', + # arguments=['--display-config', default_rviz]), + ]) diff --git a/sample/launch/pipeline_segmentation_maskrcnn.launch.py b/sample/launch/pipeline_segmentation_maskrcnn.launch.py new file mode 100644 index 00000000..e1901a3e --- /dev/null +++ b/sample/launch/pipeline_segmentation_maskrcnn.launch.py @@ -0,0 +1,55 @@ +# Copyright 2018 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Launch face detection and rviz.""" + +import os + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +import launch_ros.actions + +from launch.substitutions import LaunchConfiguration, PythonExpression +import launch + +def generate_launch_description(): + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', + #'pipeline_segmentation.yaml') + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', + 'rviz/default.rviz') + return LaunchDescription([ + launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_segmentation_maskrcnn.yaml')), + # Realsense + # NOTE: Split realsense_node launching from OpenVINO package, which + # will be launched by RDK launching file or manually. + + # Openvino detection + launch_ros.actions.Node( + package='openvino_node', + executable='pipeline_with_params', + arguments=['-config', LaunchConfiguration('yaml_path')], + remappings=[ + ('/openvino_toolkit/image_raw', '/camera/color/image_raw'), + ('/openvino_toolkit/segmentation/segmented_obejcts', + '/ros2_openvino_toolkit/segmented_obejcts'), + ('/openvino_toolkit/segmentation/images', '/ros2_openvino_toolkit/image_rviz')], + output='screen'), + + # Rviz + #launch_ros.actions.Node( + # package='rviz2', + # executable='rviz2', output='screen', + # arguments=['--display-config', default_rviz]), + ]) diff --git a/sample/launch/pipeline_segmentation_maskrcnn_ci_test.py b/sample/launch/pipeline_segmentation_maskrcnn_ci_test.py new file mode 100644 index 00000000..98c71950 --- /dev/null +++ b/sample/launch/pipeline_segmentation_maskrcnn_ci_test.py @@ -0,0 +1,55 @@ +# Copyright 2018 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Launch face detection and rviz.""" + +import os + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +import launch_ros.actions + +from launch.substitutions import LaunchConfiguration, PythonExpression +import launch + +def generate_launch_description(): + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', + #'pipeline_segmentation.yaml') + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', + 'rviz/default.rviz') + return LaunchDescription([ + launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_segmentation_maskrcnn_ci.yaml')), + # Realsense + # NOTE: Split realsense_node launching from OpenVINO package, which + # will be launched by RDK launching file or manually. + + # Openvino detection + launch_ros.actions.Node( + package='openvino_node', + executable='pipeline_with_params', + arguments=['-config', LaunchConfiguration('yaml_path')], + remappings=[ + ('/openvino_toolkit/image_raw', '/camera/color/image_raw'), + ('/openvino_toolkit/segmentation/segmented_obejcts', + '/ros2_openvino_toolkit/segmented_obejcts'), + ('/openvino_toolkit/segmentation/images', '/ros2_openvino_toolkit/image_rviz')], + output='screen'), + + # Rviz + #launch_ros.actions.Node( + #package='rviz2', + #executable='rviz2', output='screen', + #arguments=['--display-config', default_rviz]), + ]) diff --git a/sample/launch/pipeline_vehicle_detection.launch.py b/sample/launch/pipeline_vehicle_detection.launch.py index 56cb722d..a147d95c 100644 --- a/sample/launch/pipeline_vehicle_detection.launch.py +++ b/sample/launch/pipeline_vehicle_detection.launch.py @@ -24,18 +24,17 @@ import launch def generate_launch_description(): - #default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', #'pipeline_vehicle_detection.yaml') - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', 'rviz/default.rviz') return LaunchDescription([ launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = - os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param','pipeline_vehicle_detection.yaml')), + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_vehicle_detection.yaml')), # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', + package='openvino_node', executable='pipeline_with_params', - #arguments=['-config', default_yaml], arguments=['-config', LaunchConfiguration('yaml_path')], remappings=[ ('/openvino_toolkit/object/detected_license_plates', diff --git a/sample/launch/pipeline_vehicle_detection_ci_test.py b/sample/launch/pipeline_vehicle_detection_ci_test.py new file mode 100644 index 00000000..f4d72f15 --- /dev/null +++ b/sample/launch/pipeline_vehicle_detection_ci_test.py @@ -0,0 +1,52 @@ +# Copyright 2018 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Launch face detection and rviz.""" + +import os + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +import launch_ros.actions + +from launch.substitutions import LaunchConfiguration, PythonExpression +import launch + +def generate_launch_description(): + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', + #'pipeline_vehicle_detection.yaml') + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', + 'rviz/default.rviz') + return LaunchDescription([ + launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_vehicle_detection_ci.yaml')), + # Openvino detection + launch_ros.actions.Node( + package='openvino_node', + executable='pipeline_with_params', + arguments=['-config', LaunchConfiguration('yaml_path')], + remappings=[ + ('/openvino_toolkit/object/detected_license_plates', + '/ros2_openvino_toolkit/detected_license_plates'), + ('/openvino_toolkit/object/detected_vehicles_attribs', + '/ros2_openvino_toolkit/detected_vehicles_attribs'), + ('/openvino_toolkit/object/images', '/ros2_openvino_toolkit/image_rviz')], + output='screen'), + + # Rviz + #launch_ros.actions.Node( + #package='rviz2', + #executable='rviz2', output='screen', + #arguments=['--display-config', default_rviz]), + ]) diff --git a/sample/launch/pipeline_video.launch.py b/sample/launch/pipeline_video.launch.py index a232fee9..63f76cec 100644 --- a/sample/launch/pipeline_video.launch.py +++ b/sample/launch/pipeline_video.launch.py @@ -24,18 +24,17 @@ import launch def generate_launch_description(): - #default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', #'pipeline_video.yaml') - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', 'rviz/default.rviz') return LaunchDescription([ launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = - os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param','pipeline_video.yaml')), + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_video.yaml')), # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', + package='openvino_node', executable='pipeline_with_params', - #arguments=['-config', default_yaml], arguments=['-config', LaunchConfiguration('yaml_path')], remappings=[ ('/openvino_toolkit/segmentation/segmented_obejcts', diff --git a/sample/launch/ros2_openvino_oa.launch.py b/sample/launch/ros2_openvino_oa.launch.py index 687c8fc7..b759e165 100644 --- a/sample/launch/ros2_openvino_oa.launch.py +++ b/sample/launch/ros2_openvino_oa.launch.py @@ -22,12 +22,12 @@ def generate_launch_description(): - default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', 'pipeline_object_topic.yaml') return LaunchDescription([ # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', node_executable='pipeline_with_params', + package='openvino_node', node_executable='pipeline_with_params', arguments=['-config', default_yaml], remappings=[ ('/openvino_toolkit/image_raw', '/camera/color/image_raw'), diff --git a/sample/launch/rviz2.launch.py b/sample/launch/rviz2.launch.py new file mode 100644 index 00000000..a0fb3a65 --- /dev/null +++ b/sample/launch/rviz2.launch.py @@ -0,0 +1,35 @@ +# Copyright 2018 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Launch rviz2 for ROS2-OpenVINO.""" + +import os + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +import launch_ros.actions + +from launch.substitutions import LaunchConfiguration, PythonExpression +import launch + +def generate_launch_description(): + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', + 'rviz/default.rviz') + return LaunchDescription([ + # Rviz + launch_ros.actions.Node( + package='rviz2', + executable='rviz2', output='screen', + arguments=['--display-config', default_rviz]), + ]) diff --git a/sample/package.xml b/sample/package.xml index e86a34fa..b82fcf92 100644 --- a/sample/package.xml +++ b/sample/package.xml @@ -1,7 +1,7 @@ - dynamic_vino_sample + openvino_node 0.9.0 a ROS2 wrapper package for Intel OpenVINO Weizhi Liu @@ -33,8 +33,8 @@ limitations under the License. gflags yaml_cpp_vendor ament_index_cpp - dynamic_vino_lib - vino_param_lib + openvino_wrapper_lib + openvino_param_lib cv_bridge object_msgs realsense2 @@ -49,8 +49,8 @@ limitations under the License. ament_index_cpp class_loader cv_bridge - dynamic_vino_lib - vino_param_lib + openvino_wrapper_lib + openvino_param_lib object_msgs realsense2 diff --git a/sample/param/image_object_server.yaml b/sample/param/image_object_server.yaml index 030cb841..19c7bb48 100644 --- a/sample/param/image_object_server.yaml +++ b/sample/param/image_object_server.yaml @@ -1,9 +1,10 @@ Pipelines: - name: object inputs: [Image] + input_path: to/be/set/image_path infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP32/mobilenet-ssd.xml + model: /opt/openvino_toolkit/models/convert/public/mobilenet-ssd/FP16/mobilenet-ssd.xml engine: CPU label: to/be/set/xxx.labels batch: 1 @@ -15,6 +16,6 @@ Pipelines: right: [ObjectDetection] - left: ObjectDetection right: [RosService] - input_path: "/home/intel/Pictures/car.png" + Common: diff --git a/sample/param/image_people_server.yaml b/sample/param/image_people_server.yaml index 578ec311..21e2a2de 100644 --- a/sample/param/image_people_server.yaml +++ b/sample/param/image_people_server.yaml @@ -1,26 +1,27 @@ Pipelines: - name: people inputs: [Image] + input_path: to/be/set/image_path infers: - name: FaceDetection - model: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP32/face-detection-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP32/face-detection-adas-0001.xml engine: CPU - label: /to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels batch: 1 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: AgeGenderRecognition - model: /opt/openvino_toolkit/models/age-gender-recognition/output/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml + model: /opt/openvino_toolkit/models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml engine: CPU label: to/be/set/xxx.labels batch: 16 - name: EmotionRecognition - model: /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml + model: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml engine: CPU - label: to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.labels batch: 16 - name: HeadPoseEstimation - model: /opt/openvino_toolkit/models/head-pose-estimation/output/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml engine: CPU label: to/be/set/xxx.labels batch: 16 @@ -36,6 +37,5 @@ Pipelines: right: [RosService, RViz] - left: HeadPoseEstimation right: [RosService, RViz] - input_path: "~/Pictures/face.jpeg" Common: diff --git a/sample/param/multi_pipleine_service.yaml b/sample/param/multi_pipleine_service.yaml index 2a55d57c..0220ae11 100644 --- a/sample/param/multi_pipleine_service.yaml +++ b/sample/param/multi_pipleine_service.yaml @@ -3,7 +3,7 @@ Pipelines: inputs: [StandardCamera] infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP32/mobilenet-ssd.xml + model: /opt/openvino_toolkit/models/convert/public/mobilenet-ssd/FP16/mobilenet-ssd.xml engine: CPU label: to/be/set/xxx.labels batch: 1 @@ -24,7 +24,7 @@ Pipelines: inputs: [RealSenseCamera] infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP32/mobilenet-ssd.xml + model: /opt/openvino_toolkit/models/convert/public/mobilenet-ssd/FP16/mobilenet-ssd.xml engine: CPU label: to/be/set/xxx.labels batch: 1 diff --git a/sample/param/pipeline_composite_object_topic.yaml b/sample/param/pipeline_composite_object_topic.yaml index 58e6bbf7..61c4d6f2 100644 --- a/sample/param/pipeline_composite_object_topic.yaml +++ b/sample/param/pipeline_composite_object_topic.yaml @@ -3,7 +3,7 @@ Pipelines: inputs: [RealSenseCameraTopic] infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/public/ssd_mobilenet_v2_coco/FP16/ssd_mobilenet_v2_coco.xml + model: /opt/openvino_toolkit/models/convert/public/mobilenet-ssd/FP16/mobilenet-ssd.xml engine: CPU #MYRIAD label: to/be/set/xxx.labels batch: 1 diff --git a/sample/param/pipeline_face_reidentification.yaml b/sample/param/pipeline_face_reidentification.yaml index 08c5bef2..c2cd0f5a 100644 --- a/sample/param/pipeline_face_reidentification.yaml +++ b/sample/param/pipeline_face_reidentification.yaml @@ -3,19 +3,19 @@ Pipelines: inputs: [RealSenseCamera] infers: - name: FaceDetection - model: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml engine: CPU - label: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels batch: 1 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: LandmarksDetection - model: /opt/openvino_toolkit/models/landmarks-regression/output/intel/landmarks-regression-retail-0009/FP32/landmarks-regression-retail-0009.xml + model: /opt/openvino_toolkit/models/intel/landmarks-regression-retail-0009/FP32/landmarks-regression-retail-0009.xml engine: CPU label: to/be/set/xxx.labels batch: 1 - name: FaceReidentification - model: /opt/openvino_toolkit/models/face-reidentification/output/intel/face-reidentification-retail-0095/FP32/face-reidentification-retail-0095.xml + model: /opt/openvino_toolkit/models/intel/face-reidentification-retail-0095/FP32/face-reidentification-retail-0095.xml engine: CPU label: to/be/set/xxx.labels batch: 1 diff --git a/sample/param/pipeline_image.yaml b/sample/param/pipeline_image.yaml index 3a0d0923..f41c3dc2 100644 --- a/sample/param/pipeline_image.yaml +++ b/sample/param/pipeline_image.yaml @@ -1,27 +1,29 @@ -Pipelines: +Pipelines: - name: people inputs: [Image] - input_path: /opt/openvino_toolkit/ros2_openvino_toolkit/data/images/sample_faces.jpg + input_path: to/be/set/image_path infers: - name: FaceDetection - model: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml engine: CPU - label: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels batch: 1 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: AgeGenderRecognition - model: /opt/openvino_toolkit/models/age-gender-recognition/output/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml + model: /opt/openvino_toolkit/models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml engine: CPU + label: to/be/set/xxx.labels batch: 16 - name: EmotionRecognition - model: /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml + model: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml engine: CPU - label: /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.labels + label: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.labels batch: 16 - name: HeadPoseEstimation - model: /opt/openvino_toolkit/models/head-pose-estimation/output/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml engine: CPU + label: to/be/set/xxx.labels batch: 16 outputs: [ImageWindow, RosTopic, RViz] connects: diff --git a/sample/param/pipeline_image_ci.yaml b/sample/param/pipeline_image_ci.yaml new file mode 100644 index 00000000..37da03ba --- /dev/null +++ b/sample/param/pipeline_image_ci.yaml @@ -0,0 +1,41 @@ +Pipelines: +- name: people + inputs: [Image] + input_path: /root/catkin_ws/src/ros2_openvino_toolkit/data/images/sample_faces.jpg + infers: + - name: FaceDetection + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml + engine: CPU + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels + batch: 1 + confidence_threshold: 0.5 + enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame + - name: AgeGenderRecognition + model: /opt/openvino_toolkit/models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 16 + - name: EmotionRecognition + model: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml + engine: CPU + label: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.labels + batch: 16 + - name: HeadPoseEstimation + model: /opt/openvino_toolkit/models/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 16 + outputs: [RosTopic] + connects: + - left: Image + right: [FaceDetection] + - left: FaceDetection + right: [AgeGenderRecognition, EmotionRecognition, HeadPoseEstimation, RosTopic] + - left: AgeGenderRecognition + right: [RosTopic] + - left: EmotionRecognition + right: [RosTopic] + - left: HeadPoseEstimation + right: [RosTopic] + +Common: diff --git a/sample/param/pipeline_image_video.yaml b/sample/param/pipeline_image_video.yaml index 887cfe25..b383f30f 100644 --- a/sample/param/pipeline_image_video.yaml +++ b/sample/param/pipeline_image_video.yaml @@ -1,27 +1,27 @@ Pipelines: - name: people inputs: [Video] - input_path: /home/houk/Desktop/video + input_path: to/be/set/video_path infers: - name: FaceDetection - model: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml engine: CPU - label: to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels batch: 1 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: AgeGenderRecognition - model: /opt/openvino_toolkit/models/age-gender-recognition/output/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml + model: /opt/openvino_toolkit/models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml engine: CPU label: to/be/set/xxx.labels batch: 16 - name: EmotionRecognition - model: /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml + model: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml engine: CPU - label: to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.labels batch: 16 - name: HeadPoseEstimation - model: /opt/openvino_toolkit/models/head-pose-estimation/output/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml engine: CPU label: to/be/set/xxx.labels batch: 16 diff --git a/sample/param/pipeline_object.yaml b/sample/param/pipeline_object.yaml index 7c0f97d7..62e2f9ca 100644 --- a/sample/param/pipeline_object.yaml +++ b/sample/param/pipeline_object.yaml @@ -3,7 +3,7 @@ Pipelines: inputs: [StandardCamera] infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/public/ssd_mobilenet_v2_coco/FP16/ssd_mobilenet_v2_coco.xml + model: /opt/openvino_toolkit/models/convert/public/mobilenet-ssd/FP16/mobilenet-ssd.xml engine: CPU label: to/be/set/xxx.labels batch: 1 diff --git a/sample/param/pipeline_object_topic.yaml b/sample/param/pipeline_object_topic.yaml index 39c9cd34..2d1a2c7d 100644 --- a/sample/param/pipeline_object_topic.yaml +++ b/sample/param/pipeline_object_topic.yaml @@ -3,7 +3,7 @@ Pipelines: inputs: [StandardCamera] #[RealSenseCameraTopic] infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/public/ssd_mobilenet_v2_coco/FP16/ssd_mobilenet_v2_coco.xml + model: /opt/openvino_toolkit/models/convert/public/mobilenet-ssd/FP16/mobilenet-ssd.xml engine: CPU #MYRIAD label: to/be/set/xxx.labels batch: 1 diff --git a/sample/param/pipeline_object_yolo.yaml b/sample/param/pipeline_object_yolo.yaml index 1c629dee..a80fc4c3 100644 --- a/sample/param/pipeline_object_yolo.yaml +++ b/sample/param/pipeline_object_yolo.yaml @@ -1,11 +1,13 @@ Pipelines: - name: object - inputs: [RealSenseCamera] + inputs: [StandardCamera] + input_path: to/be/set/image_path infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/public/yolo-v2-tf/FP16/yolo-v2-tf.xml - #model: /opt/openvino_toolkit/darkflow/output/fp16/yolov2-voc.xml - model_type: yolov2 + #model: /opt/openvino_toolkit/models/convert/public/yolov5n/FP32/yolov5n.xml + model: /home/lewis/develop/openvino/models/models/yolo/yolov7/yolov7_int8.xml + #model: /home/lewis/develop/openvino/models/models/yolo/yolov8/yolov8n_openvino_int8_model/yolov8n.xml + model_type: yolov5 #yolov8 engine: CPU #MYRIAD label: to/be/set/xxx.labels batch: 1 @@ -13,7 +15,7 @@ Pipelines: enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame outputs: [ImageWindow, RosTopic, RViz] connects: - - left: RealSenseCamera + - left: StandardCamera right: [ObjectDetection] - left: ObjectDetection right: [ImageWindow] diff --git a/sample/param/pipeline_object_yolo_topic.yaml b/sample/param/pipeline_object_yolo_topic.yaml index 7d268287..fdff4264 100644 --- a/sample/param/pipeline_object_yolo_topic.yaml +++ b/sample/param/pipeline_object_yolo_topic.yaml @@ -3,9 +3,8 @@ Pipelines: inputs: [RealSenseCameraTopic] infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/object_detection/YOLOv2-voc/tf/output/FP16/yolov2-voc.xml - #model: /opt/openvino_toolkit/darkflow/output/fp16/yolov2-voc.xml - model_type: yolov2 + model: /opt/openvino_toolkit/models/convert/public/yolov5n/FP32/yolov5n.xml + model_type: yolov5 engine: MYRIAD label: to/be/set/xxx.labels batch: 1 diff --git a/sample/param/pipeline_object_yolov5_ci.yaml b/sample/param/pipeline_object_yolov5_ci.yaml new file mode 100644 index 00000000..7804ecb0 --- /dev/null +++ b/sample/param/pipeline_object_yolov5_ci.yaml @@ -0,0 +1,21 @@ +Pipelines: +- name: object + inputs: [Image] + input_path: /root/catkin_ws/src/ros2_openvino_toolkit/data/images/sample_faces.jpg + infers: + - name: ObjectDetection + model: /opt/openvino_toolkit/models/convert/public/yolov5n/FP32/yolov5n.xml + model_type: yolov5 + engine: CPU #MYRIAD + label: to/be/set/xxx.labels + batch: 1 + confidence_threshold: 0.5 + enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame + outputs: [RosTopic] + connects: + - left: Image + right: [ObjectDetection] + - left: ObjectDetection + right: [RosTopic] + +OpenvinoCommon: diff --git a/sample/param/pipeline_object_yolov8_ci.yaml b/sample/param/pipeline_object_yolov8_ci.yaml new file mode 100644 index 00000000..37b6ad91 --- /dev/null +++ b/sample/param/pipeline_object_yolov8_ci.yaml @@ -0,0 +1,21 @@ +Pipelines: +- name: object + inputs: [Image] + input_path: /root/catkin_ws/src/ros2_openvino_toolkit/data/images/sample_faces.jpg + infers: + - name: ObjectDetection + model: /opt/openvino_toolkit/models/convert/public/FP32/yolov8n/yolov8n.xml + model_type: yolov8 + engine: CPU #MYRIAD + label: to/be/set/xxx.labels + batch: 1 + confidence_threshold: 0.5 + enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame + outputs: [RosTopic] + connects: + - left: Image + right: [ObjectDetection] + - left: ObjectDetection + right: [RosTopic] + +OpenvinoCommon: diff --git a/sample/param/pipeline_people.yaml b/sample/param/pipeline_people.yaml index a68b4cc3..6d9805e0 100644 --- a/sample/param/pipeline_people.yaml +++ b/sample/param/pipeline_people.yaml @@ -1,26 +1,26 @@ Pipelines: - name: people inputs: [StandardCamera] - infers: + infers: - name: FaceDetection - model: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml engine: CPU - label: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels batch: 1 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: AgeGenderRecognition - model: /opt/openvino_toolkit/models/age-gender-recognition/output/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml + model: /opt/openvino_toolkit/models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml engine: CPU label: to/be/set/xxx.labels batch: 16 - name: EmotionRecognition - model: /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml + model: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml engine: CPU - label: /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.labels + label: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.labels batch: 16 - name: HeadPoseEstimation - model: /opt/openvino_toolkit/models/head-pose-estimation/output/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml engine: CPU label: to/be/set/xxx.labels batch: 16 diff --git a/sample/param/pipeline_people_ci.yaml b/sample/param/pipeline_people_ci.yaml new file mode 100644 index 00000000..ce0a1e90 --- /dev/null +++ b/sample/param/pipeline_people_ci.yaml @@ -0,0 +1,41 @@ +Pipelines: +- name: people + inputs: [Image] + input_path: /root/catkin_ws/src/ros2_openvino_toolkit/data/images/sample_faces.jpg + infers: + - name: FaceDetection + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml + engine: CPU + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels + batch: 1 + confidence_threshold: 0.5 + enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame + - name: AgeGenderRecognition + model: /opt/openvino_toolkit/models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 16 + - name: EmotionRecognition + model: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml + engine: CPU + label: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.labels + batch: 16 + - name: HeadPoseEstimation + model: /opt/openvino_toolkit/models/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 16 + outputs: [RosTopic] + connects: + - left: Image + right: [FaceDetection] + - left: FaceDetection + right: [AgeGenderRecognition, EmotionRecognition, HeadPoseEstimation, RosTopic] + - left: AgeGenderRecognition + right: [RosTopic] + - left: EmotionRecognition + right: [RosTopic] + - left: HeadPoseEstimation + right: [RosTopic] + +Common: diff --git a/sample/param/pipeline_people_ip.yaml b/sample/param/pipeline_people_ip.yaml index ba01c412..b37903c1 100644 --- a/sample/param/pipeline_people_ip.yaml +++ b/sample/param/pipeline_people_ip.yaml @@ -4,24 +4,24 @@ Pipelines: input_path: "rtsp://" infers: - name: FaceDetection - model: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP32/face-detection-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP32/face-detection-adas-0001.xml engine: CPU - label: to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels batch: 1 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: AgeGenderRecognition - model: /opt/openvino_toolkit/models/age-gender-recognition/output/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml + model: /opt/openvino_toolkit/models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml engine: CPU label: to/be/set/xxx.labels batch: 16 - name: EmotionRecognition - model: /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml + model: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml engine: CPU - label: to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.labels batch: 16 - name: HeadPoseEstimation - model: /opt/openvino_toolkit/models/head-pose-estimation/output/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml engine: CPU label: to/be/set/xxx.labels batch: 16 diff --git a/sample/param/pipeline_person_attributes.yaml b/sample/param/pipeline_person_attributes.yaml index 527ec655..8721c40f 100644 --- a/sample/param/pipeline_person_attributes.yaml +++ b/sample/param/pipeline_person_attributes.yaml @@ -3,14 +3,14 @@ Pipelines: inputs: [StandardCamera] infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/person-detection/output/intel/person-detection-retail-0013/FP32/person-detection-retail-0013.xml + model: /opt/openvino_toolkit/models/intel/person-detection-retail-0013/FP32/person-detection-retail-0013.xml engine: CPU label: to/be/set/xxx.labels batch: 1 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: PersonAttribsDetection - model: /opt/openvino_toolkit/models/person-attributes/output/intel/person-attributes-recognition-crossroad-0230/FP32/person-attributes-recognition-crossroad-0230.xml + model: /opt/openvino_toolkit/models/intel/person-attributes-recognition-crossroad-0230/FP32/person-attributes-recognition-crossroad-0230.xml engine: CPU label: to/be/set/xxx.labels batch: 1 diff --git a/sample/param/pipeline_person_attributes_ci.yaml b/sample/param/pipeline_person_attributes_ci.yaml new file mode 100644 index 00000000..786c2461 --- /dev/null +++ b/sample/param/pipeline_person_attributes_ci.yaml @@ -0,0 +1,28 @@ +Pipelines: +- name: object + inputs: [Image] + input_path: /root/catkin_ws/src/ros2_openvino_toolkit/data/images/sample_faces.jpg + infers: + - name: ObjectDetection + model: /opt/openvino_toolkit/models/intel/person-detection-retail-0013/FP32/person-detection-retail-0013.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 1 + confidence_threshold: 0.5 + enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame + - name: PersonAttribsDetection + model: /opt/openvino_toolkit/models/intel/person-attributes-recognition-crossroad-0230/FP32/person-attributes-recognition-crossroad-0230.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 1 + confidence_threshold: 0.5 + outputs: [RosTopic] + connects: + - left: Image + right: [ObjectDetection] + - left: ObjectDetection + right: [PersonAttribsDetection, RosTopic] + - left: PersonAttribsDetection + right: [RosTopic] + +OpenvinoCommon: diff --git a/sample/param/pipeline_reidentification.yaml b/sample/param/pipeline_reidentification.yaml index 5a0d472a..2598031b 100644 --- a/sample/param/pipeline_reidentification.yaml +++ b/sample/param/pipeline_reidentification.yaml @@ -3,14 +3,14 @@ Pipelines: inputs: [StandardCamera] infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/person-detection/output/intel/person-detection-retail-0013/FP32/person-detection-retail-0013.xml + model: /opt/openvino_toolkit/models/intel/person-detection-retail-0013/FP32/person-detection-retail-0013.xml engine: CPU label: to/be/set/xxx.labels batch: 1 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: PersonReidentification - model: /opt/openvino_toolkit/models/person-reidentification/output/intel/person-reidentification-retail-0277/FP32/person-reidentification-retail-0277.xml + model: /opt/openvino_toolkit/models/intel/person-reidentification-retail-0277/FP32/person-reidentification-retail-0277.xml engine: CPU label: to/be/set/xxx.labels batch: 1 diff --git a/sample/param/pipeline_reidentification_ci.yaml b/sample/param/pipeline_reidentification_ci.yaml new file mode 100644 index 00000000..72b8f22a --- /dev/null +++ b/sample/param/pipeline_reidentification_ci.yaml @@ -0,0 +1,28 @@ +Pipelines: +- name: object + inputs: [Image] + input_path: /root/catkin_ws/src/ros2_openvino_toolkit/data/images/sample_faces.jpg + infers: + - name: ObjectDetection + model: /opt/openvino_toolkit/models/intel/person-detection-retail-0013/FP32/person-detection-retail-0013.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 1 + confidence_threshold: 0.5 + enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame + - name: PersonReidentification + model: /opt/openvino_toolkit/models/intel/person-reidentification-retail-0277/FP32/person-reidentification-retail-0277.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 1 + confidence_threshold: 0.7 + outputs: [RosTopic] + connects: + - left: Image + right: [ObjectDetection] + - left: ObjectDetection + right: [PersonReidentification] + - left: PersonReidentification + right: [RosTopic] + +OpenvinoCommon: diff --git a/sample/param/pipeline_segmentation.yaml b/sample/param/pipeline_segmentation.yaml index bd5a1b80..f0eccb13 100644 --- a/sample/param/pipeline_segmentation.yaml +++ b/sample/param/pipeline_segmentation.yaml @@ -3,7 +3,7 @@ Pipelines: inputs: [RealSenseCameraTopic] infers: - name: ObjectSegmentation - model: /opt/openvino_toolkit/models/public/deeplabv3/FP16/deeplabv3.xml + model: /opt/openvino_toolkit/models/convert/public/deeplabv3/FP16/deeplabv3.xml engine: CPU #"HETERO:CPU,GPU,MYRIAD" label: to/be/set/xxx.labels batch: 1 diff --git a/sample/param/pipeline_segmentation_ci.yaml b/sample/param/pipeline_segmentation_ci.yaml new file mode 100644 index 00000000..b8f075c2 --- /dev/null +++ b/sample/param/pipeline_segmentation_ci.yaml @@ -0,0 +1,19 @@ +Pipelines: +- name: segmentation + inputs: [Image] + input_path: /root/catkin_ws/src/ros2_openvino_toolkit/data/images/sample_car.png + infers: + - name: ObjectSegmentation + model: /opt/openvino_toolkit/models/convert/public/deeplabv3/FP16/deeplabv3.xml + engine: CPU #"HETERO:CPU,GPU,MYRIAD" + label: to/be/set/xxx.labels + batch: 1 + confidence_threshold: 0.5 + outputs: [RosTopic] + connects: + - left: Image + right: [ObjectSegmentation] + - left: ObjectSegmentation + right: [RosTopic] + +OpenvinoCommon: diff --git a/sample/param/pipeline_segmentation_image.yaml b/sample/param/pipeline_segmentation_image.yaml index 616d290d..33481f8b 100644 --- a/sample/param/pipeline_segmentation_image.yaml +++ b/sample/param/pipeline_segmentation_image.yaml @@ -1,10 +1,10 @@ Pipelines: - name: segmentation inputs: [Image] - input_path: /opt/openvino_toolkit/ros2_openvino_toolkit/data/images/expressway.jpg + input_path: to/be/set/image_path infers: - name: ObjectSegmentation - model: /opt/openvino_toolkit/models/semantic-segmentation/output/intel/semantic-segmentation-adas-0001/FP16/semantic-segmentation-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/semantic-segmentation-adas-0001/FP16/semantic-segmentation-adas-0001.xml engine: CPU label: to/be/set/xxx.labels batch: 1 diff --git a/sample/param/pipeline_segmentation_image_ci.yaml b/sample/param/pipeline_segmentation_image_ci.yaml new file mode 100644 index 00000000..c80832bc --- /dev/null +++ b/sample/param/pipeline_segmentation_image_ci.yaml @@ -0,0 +1,19 @@ +Pipelines: +- name: segmentation + inputs: [Image] + input_path: /root/catkin_ws/src/ros2_openvino_toolkit/data/images/sample_car.png + infers: + - name: ObjectSegmentation + model: /opt/openvino_toolkit/models/intel/semantic-segmentation-adas-0001/FP16/semantic-segmentation-adas-0001.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 1 + confidence_threshold: 0.5 + outputs: [RosTopic] + connects: + - left: Image + right: [ObjectSegmentation] + - left: ObjectSegmentation + right: [RosTopic] + +OpenvinoCommon: diff --git a/sample/param/pipeline_segmentation_instance.yaml b/sample/param/pipeline_segmentation_instance.yaml new file mode 100644 index 00000000..f29352b3 --- /dev/null +++ b/sample/param/pipeline_segmentation_instance.yaml @@ -0,0 +1,30 @@ +Pipelines: +- name: segmentation + inputs: [StandardCamera] + infers: + - name: ObjectSegmentationInstance + # for Yolov8 Seg models ----------------- + model: /opt/openvino_toolkit/models/convert/public/yolov8n-seg/FP32/yolov8n-seg.xml + model_type: yolo + label: /opt/openvino_toolkit/labels/object_detection/coco.names + # for maskrcnn inception resnet ----------------- + #model: /opt/openvino_toolkit/models/convert/public/mask_rcnn_inception_resnet_v2_atrous_coco/FP32/mask_rcnn_inception_resnet_v2_atrous_coco.xml + #model_type: maskrcnn + #label: /opt/openvino_toolkit/labels/object_segmentation/frozen_inference_graph.labels #for maskrcnn + #---------------------- + engine: CPU #"HETERO:CPU,GPU," #"HETERO:CPU,GPU,MYRIAD" + batch: 1 + confidence_threshold: 0.5 + nms_threshold: 0.5 + outputs: [ImageWindow, RosTopic, RViz] + connects: + - left: StandardCamera + right: [ObjectSegmentationInstance] + - left: ObjectSegmentationInstance + right: [ImageWindow] + - left: ObjectSegmentationInstance + right: [RosTopic] + - left: ObjectSegmentationInstance + right: [RViz] + +Common: diff --git a/sample/param/pipeline_segmentation_instance_yolov8_seg_ci.yaml b/sample/param/pipeline_segmentation_instance_yolov8_seg_ci.yaml new file mode 100644 index 00000000..ffc3f276 --- /dev/null +++ b/sample/param/pipeline_segmentation_instance_yolov8_seg_ci.yaml @@ -0,0 +1,24 @@ +Pipelines: +- name: segmentation + inputs: [Image] + input_path: /root/catkin_ws/src/ros2_openvino_toolkit/data/images/sample_faces.jpg + + infers: + - name: ObjectSegmentationInstance + # for Yolov8 Seg models ----------------- + model: /opt/openvino_toolkit/models/convert/public/FP32/yolov8n-seg/yolov8n-seg.xml + model_type: yolov8 + label: /opt/openvino_toolkit/labels/object_detection/coco.names + engine: CPU #"HETERO:CPU,GPU," #"HETERO:CPU,GPU,MYRIAD" + batch: 1 + confidence_threshold: 0.5 + outputs: [RosTopic] + connects: + - left: Image + right: [ObjectSegmentationInstance] + - left: ObjectSegmentationInstance + right: [ImageWindow] + - left: ObjectSegmentationInstance + right: [RosTopic] + +Common: diff --git a/sample/param/pipeline_segmentation_maskrcnn.yaml b/sample/param/pipeline_segmentation_maskrcnn.yaml new file mode 100644 index 00000000..fa47f088 --- /dev/null +++ b/sample/param/pipeline_segmentation_maskrcnn.yaml @@ -0,0 +1,22 @@ +Pipelines: +- name: segmentation + inputs: [StandardCamera] + infers: + - name: ObjectSegmentationMaskrcnn + model: /opt/openvino_toolkit/models/public/mask_rcnn_inception_resnet_v2_atrous_coco/FP16/mask_rcnn_inception_resnet_v2_atrous_coco.xml + engine: CPU #"HETERO:CPU,GPU,MYRIAD" + label: to/be/set/xxx.labels + batch: 1 + confidence_threshold: 0.5 + outputs: [ImageWindow, RosTopic, RViz] + connects: + - left: StandardCamera + right: [ObjectSegmentationMaskrcnn] + - left: ObjectSegmentationMaskrcnn + right: [ImageWindow] + - left: ObjectSegmentationMaskrcnn + right: [RosTopic] + - left: ObjectSegmentationMaskrcnn + right: [RViz] + +OpenvinoCommon: diff --git a/sample/param/pipeline_segmentation_maskrcnn_ci.yaml b/sample/param/pipeline_segmentation_maskrcnn_ci.yaml new file mode 100644 index 00000000..855b6833 --- /dev/null +++ b/sample/param/pipeline_segmentation_maskrcnn_ci.yaml @@ -0,0 +1,19 @@ +Pipelines: +- name: segmentation + inputs: [Image] + input_path: /root/catkin_ws/src/ros2_openvino_toolkit/data/images/sample_car.png + infers: + - name: ObjectSegmentationMaskrcnn + model: /opt/openvino_toolkit/models/public/mask_rcnn_inception_resnet_v2_atrous_coco/FP16/mask_rcnn_inception_resnet_v2_atrous_coco.xml + engine: CPU #"HETERO:CPU,GPU,MYRIAD" + label: to/be/set/xxx.labels + batch: 1 + confidence_threshold: 0.5 + outputs: [RosTopic] + connects: + - left: Image + right: [ObjectSegmentationMaskrcnn] + - left: ObjectSegmentationMaskrcnn + right: [RosTopic] + +OpenvinoCommon: diff --git a/sample/param/pipeline_vehicle_detection.yaml b/sample/param/pipeline_vehicle_detection.yaml index a91af9ea..3eff9e59 100644 --- a/sample/param/pipeline_vehicle_detection.yaml +++ b/sample/param/pipeline_vehicle_detection.yaml @@ -3,18 +3,18 @@ Pipelines: inputs: [StandardCamera] infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/vehicle-license-plate-detection/output/intel/vehicle-license-plate-detection-barrier-0106/FP32/vehicle-license-plate-detection-barrier-0106.xml + model: /opt/openvino_toolkit/models/intel/vehicle-license-plate-detection-barrier-0106/FP32/vehicle-license-plate-detection-barrier-0106.xml engine: CPU - label: /opt/openvino_toolkit/models/vehicle-license-plate-detection/output/intel/vehicle-license-plate-detection-barrier-0106/FP32/vehicle-license-plate-detection-barrier-0106.labels + label: /opt/openvino_toolkit/models/intel/vehicle-license-plate-detection-barrier-0106/FP32/vehicle-license-plate-detection-barrier-0106.labels batch: 1 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: VehicleAttribsDetection - model: /opt/openvino_toolkit/models/vehicle-attributes-recognition/output/intel/vehicle-attributes-recognition-barrier-0039/FP32/vehicle-attributes-recognition-barrier-0039.xml + model: /opt/openvino_toolkit/models/intel/vehicle-attributes-recognition-barrier-0039/FP32/vehicle-attributes-recognition-barrier-0039.xml engine: CPU label: to/be/set/xxx.labels batch: 1 - name: LicensePlateDetection - model: /opt/openvino_toolkit/models/license-plate-recognition/output/intel/license-plate-recognition-barrier-0001/FP32/license-plate-recognition-barrier-0001.xml + model: /opt/openvino_toolkit/models/intel/license-plate-recognition-barrier-0001/FP32/license-plate-recognition-barrier-0001.xml engine: CPU label: to/be/set/xxx.labels batch: 1 diff --git a/sample/param/pipeline_vehicle_detection_ci.yaml b/sample/param/pipeline_vehicle_detection_ci.yaml new file mode 100644 index 00000000..760ff276 --- /dev/null +++ b/sample/param/pipeline_vehicle_detection_ci.yaml @@ -0,0 +1,35 @@ +Pipelines: +- name: object + inputs: [Image] + input_path: /root/jpg/car.jpg + infers: + - name: ObjectDetection + model: /opt/openvino_toolkit/models/intel/vehicle-license-plate-detection-barrier-0106/FP32/vehicle-license-plate-detection-barrier-0106.xml + engine: CPU + label: /opt/openvino_toolkit/models/intel/vehicle-license-plate-detection-barrier-0106/FP32/vehicle-license-plate-detection-barrier-0106.labels + batch: 1 + enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame + - name: VehicleAttribsDetection + model: /opt/openvino_toolkit/models/intel/vehicle-attributes-recognition-barrier-0039/FP32/vehicle-attributes-recognition-barrier-0039.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 1 + - name: LicensePlateDetection + model: /opt/openvino_toolkit/models/intel/license-plate-recognition-barrier-0001/FP32/license-plate-recognition-barrier-0001.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 1 + outputs: [RosTopic] + connects: + - left: Image + right: [ObjectDetection] + - left: ObjectDetection + right: [{VehicleAttribsDetection: label == vehicle && confidence >= 0.8}, {LicensePlateDetection: label == license && confidence >= 0.8}] + - left: ObjectDetection + right: [RosTopic] + - left: VehicleAttribsDetection + right: [RosTopic] + - left: LicensePlateDetection + right: [RosTopic] + +OpenvinoCommon: diff --git a/sample/param/pipeline_video.yaml b/sample/param/pipeline_video.yaml index 0872be8e..0493ca76 100644 --- a/sample/param/pipeline_video.yaml +++ b/sample/param/pipeline_video.yaml @@ -1,10 +1,10 @@ Pipelines: - name: segmentation inputs: [Video] - input_path: /home/ubuntu20/jiawei/ros-ov/ros2_galactic_openvino_ws/src/ros2_openvino_toolkit/data/car_cut.mp4 + input_path: to/be/set/video_path infers: - name: ObjectSegmentation - model: /opt/openvino_toolkit/models/public/deeplabv3/FP16/deeplabv3.xml + model: /opt/openvino_toolkit/models/convert/public/deeplabv3/FP16/deeplabv3.xml engine: CPU label: to/be/set/xxx.labels batch: 1 diff --git a/sample/param/testParam/param/image_object_service_test.yaml b/sample/param/testParam/param/image_object_service_test.yaml index 9a1ffd0b..baea4479 100644 --- a/sample/param/testParam/param/image_object_service_test.yaml +++ b/sample/param/testParam/param/image_object_service_test.yaml @@ -1,10 +1,10 @@ Pipelines: - name: object inputs: [Image] - input_path: "/opt/openvino_toolkit/ros2_overlay_ws/src/ros2_openvino_toolkit/data/images/car_vihecle.png" + input_path: to/be/set/image_path infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP16/mobilenet-ssd.xml + model: /opt/openvino_toolkit/models/convert/public/mobilenet-ssd/FP16/mobilenet-ssd.xml engine: CPU label: to/be/set/xxx.labels batch: 16 diff --git a/sample/param/testParam/param/image_people_service_test.yaml b/sample/param/testParam/param/image_people_service_test.yaml index ec579426..40f6513e 100644 --- a/sample/param/testParam/param/image_people_service_test.yaml +++ b/sample/param/testParam/param/image_people_service_test.yaml @@ -1,26 +1,27 @@ Pipelines: - name: people inputs: [Image] + input_path: to/be/set/image_path infers: - name: FaceDetection - model: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml engine: CPU - label: to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels batch: 16 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: AgeGenderRecognition - model: /opt/openvino_toolkit/models/age-gender-recognition/output/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml + model: /opt/openvino_toolkit/models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml engine: CPU label: to/be/set/xxx.labels batch: 16 - name: EmotionRecognition - model: /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml + model: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml engine: CPU - label: to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.labels batch: 16 - name: HeadPoseEstimation - model: /opt/openvino_toolkit/models/head-pose-estimation/output/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml engine: CPU label: to/be/set/xxx.labels batch: 16 @@ -36,6 +37,5 @@ Pipelines: right: [RosService] - left: HeadPoseEstimation right: [RosService] - input_path: "/home/intel/ros2_overlay_ws/src/ros2_openvino_toolkit/data/images/team.png" Common: diff --git a/sample/param/testParam/param/pipeline_anormal.yaml b/sample/param/testParam/param/pipeline_anormal.yaml index 8f5f5146..2e3a4214 100644 --- a/sample/param/testParam/param/pipeline_anormal.yaml +++ b/sample/param/testParam/param/pipeline_anormal.yaml @@ -3,7 +3,7 @@ Pipelines: inputs: [StandardCamera, Image, video] infers: - name: Objectdetection - model: /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP16/mobilenet-ssd.xml + model: /opt/openvino_toolkit/models/convert/public/mobilenet-ssd/FP16/mobilenet-ssd.xml engine: GPU label: to/be/set/xxx.labels batch: 16 @@ -21,7 +21,7 @@ Pipelines: inputs: [StandardCamera, Image, video] infers: - name: - model: /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP16/mobilenet-ssd.xml + model: /opt/openvino_toolkit/models/convert/public/mobilenet-ssd/FP16/mobilenet-ssd.xml engine: GPU label: to/be/set/xxx.labels batch: 16 diff --git a/sample/param/testParam/param/pipeline_face_reid_video.yaml b/sample/param/testParam/param/pipeline_face_reid_video.yaml index f59b2a7d..82986615 100644 --- a/sample/param/testParam/param/pipeline_face_reid_video.yaml +++ b/sample/param/testParam/param/pipeline_face_reid_video.yaml @@ -1,22 +1,22 @@ Pipelines: - name: people inputs: [Video] - input_path: /opt/openvino_toolkit/ros2_openvino_toolkit/sample/param/testParam/data/face_reid.mp4 + input_path: to/be/set/video_path infers: - name: FaceDetection - model: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml engine: CPU - label: /opt/openvino_toolkit/open_model_zoo/model_downloader/Transportation/object_detection/face/pruned_mobilenet_reduced_ssd_shared_weights/dldt/face-detection-adas-0001.labels + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels batch: 1 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: LandmarksDetection - model: /opt/openvino_toolkit/models/landmarks-regression/output/intel/landmarks-regression-retail-0009/FP32/landmarks-regression-retail-0009.xml + model: /opt/openvino_toolkit/models/intel/landmarks-regression-retail-0009/FP32/landmarks-regression-retail-0009.xml engine: CPU label: to/be/set/xxx.labels batch: 1 - name: FaceReidentification - model: /opt/openvino_toolkit/models/face-reidentification/output/intel/face-reidentification-retail-0095/FP32/face-reidentification-retail-0095.xml + model: /opt/openvino_toolkit/models/intel/face-reidentification-retail-0095/FP32/face-reidentification-retail-0095.xml engine: CPU label: to/be/set/xxx.labels batch: 1 diff --git a/sample/param/testParam/param/pipeline_face_reidentification_test.yaml b/sample/param/testParam/param/pipeline_face_reidentification_test.yaml index 6313811a..54ee42ee 100644 --- a/sample/param/testParam/param/pipeline_face_reidentification_test.yaml +++ b/sample/param/testParam/param/pipeline_face_reidentification_test.yaml @@ -3,19 +3,19 @@ Pipelines: inputs: [RealSenseCamera] infers: - name: FaceDetection - model: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml engine: CPU - label: /opt/openvino_toolkit/open_model_zoo/model_downloader/Transportation/object_detection/face/pruned_mobilenet_reduced_ssd_shared_weights/dldt/face-detection-adas-0001.labels + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels batch: 1 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: LandmarksDetection - model: /opt/openvino_toolkit/models/landmarks-regression/output/intel/landmarks-regression-retail-0009/FP32/landmarks-regression-retail-0009.xml + model: /opt/openvino_toolkit/models/intel/landmarks-regression-retail-0009/FP32/landmarks-regression-retail-0009.xml engine: CPU label: to/be/set/xxx.labels batch: 1 - name: FaceReidentification - model: /opt/openvino_toolkit/models/face-reidentification/output/intel/face-reidentification-retail-0095/FP32/face-reidentification-retail-0095.xml + model: /opt/openvino_toolkit/models/intel/face-reidentification-retail-0095/FP32/face-reidentification-retail-0095.xml engine: CPU label: to/be/set/xxx.labels batch: 1 diff --git a/sample/param/testParam/param/pipeline_face_test.yaml b/sample/param/testParam/param/pipeline_face_test.yaml index f831fd19..68b395c9 100644 --- a/sample/param/testParam/param/pipeline_face_test.yaml +++ b/sample/param/testParam/param/pipeline_face_test.yaml @@ -1,27 +1,27 @@ Pipelines: - name: people inputs: [Video] - input_path: /opt/openvino_toolkit/ros2_openvino_toolkit/sample/param/testParam/data/people_detection.mp4 + input_path: to/be/set/video_path infers: - name: FaceDetection - model: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml engine: CPU - label: to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels batch: 1 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: AgeGenderRecognition - model: /opt/openvino_toolkit/models/age-gender-recognition/output/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml + model: /opt/openvino_toolkit/models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml engine: CPU label: to/be/set/xxx.labels batch: 16 - name: EmotionRecognition - model: /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml + model: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml engine: CPU - label: to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.labels batch: 16 - name: HeadPoseEstimation - model: /opt/openvino_toolkit/models/head-pose-estimation/output/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml engine: CPU label: to/be/set/xxx.labels batch: 16 diff --git a/sample/param/testParam/param/pipeline_image_test.yaml b/sample/param/testParam/param/pipeline_image_test.yaml index 50540acd..074cbd22 100644 --- a/sample/param/testParam/param/pipeline_image_test.yaml +++ b/sample/param/testParam/param/pipeline_image_test.yaml @@ -1,27 +1,27 @@ Pipelines: - name: people inputs: [Image] - input_path: /opt/openvino_toolkit/ros2_openvino_toolkit/data/images/team.jpg + input_path: to/be/set/image_path infers: - name: FaceDetection - model: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml engine: CPU - label: to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels batch: 1 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: AgeGenderRecognition - model: /opt/openvino_toolkit/models/age-gender-recognition/output/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml + model: /opt/openvino_toolkit/models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml engine: CPU label: to/be/set/xxx.labels batch: 16 - name: EmotionRecognition - model: /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml + model: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml engine: CPU - label: to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.labels batch: 16 - name: HeadPoseEstimation - model: /opt/openvino_toolkit/models/head-pose-estimation/output/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml engine: CPU label: to/be/set/xxx.labels batch: 16 diff --git a/sample/param/testParam/param/pipeline_object_test.yaml b/sample/param/testParam/param/pipeline_object_test.yaml index c45999ec..542d3142 100644 --- a/sample/param/testParam/param/pipeline_object_test.yaml +++ b/sample/param/testParam/param/pipeline_object_test.yaml @@ -3,7 +3,7 @@ Pipelines: inputs: [RealSenseCamera] infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP16/mobilenet-ssd.xml + model: /opt/openvino_toolkit/models/convert/public/mobilenet-ssd/FP16/mobilenet-ssd.xml engine: GPU label: to/be/set/xxx.labels batch: 16 diff --git a/sample/param/testParam/param/pipeline_object_yolo_test.yaml b/sample/param/testParam/param/pipeline_object_yolo_test.yaml index dfdbe15d..35fc06c9 100644 --- a/sample/param/testParam/param/pipeline_object_yolo_test.yaml +++ b/sample/param/testParam/param/pipeline_object_yolo_test.yaml @@ -3,9 +3,8 @@ Pipelines: inputs: [RealSenseCamera] infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/object_detection/YOLOv2-voc/tf/output/FP16/yolov2-voc.xml - #model: /opt/openvino_toolkit/darkflow/output/fp16/yolov2-voc.xml - model_type: yolov2 + model: /opt/openvino_toolkit/models/convert/public/yolov5n/FP32/yolov5n.xml + model_type: yolov5 engine: GPU label: to/be/set/xxx.labels batch: 1 diff --git a/sample/param/testParam/param/pipeline_reidentification_test.yaml b/sample/param/testParam/param/pipeline_reidentification_test.yaml index 8bb8228d..527742fe 100644 --- a/sample/param/testParam/param/pipeline_reidentification_test.yaml +++ b/sample/param/testParam/param/pipeline_reidentification_test.yaml @@ -1,17 +1,17 @@ Pipelines: - name: object inputs: [Video] - input_path: /opt/openvino_toolkit/ros2_openvino_toolkit/sample/param/testParam/data/people_reid.mp4 + input_path: to/be/set/video_path infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/person-detection/output/intel/person-detection-retail-0013/FP32/person-detection-retail-0013.xml + model: /opt/openvino_toolkit/models/intel/person-detection-retail-0013/FP32/person-detection-retail-0013.xml engine: CPU label: to/be/set/xxx.labels batch: 1 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: PersonReidentification - model: /opt/openvino_toolkit/models/person-reidentification/output/intel/person-reidentification-retail-0076/FP32/person-reidentification-retail-0076.xml + model: /opt/openvino_toolkit/models/intel/person-reidentification-retail-0277/FP32/person-reidentification-retail-0277.xml engine: CPU label: to/be/set/xxx.labels batch: 1 diff --git a/sample/param/testParam/param/pipeline_segmentation_test.yaml b/sample/param/testParam/param/pipeline_segmentation_test.yaml index 2a7a0dfd..7ba13e01 100644 --- a/sample/param/testParam/param/pipeline_segmentation_test.yaml +++ b/sample/param/testParam/param/pipeline_segmentation_test.yaml @@ -1,7 +1,7 @@ Pipelines: - name: segmentation inputs: [Video] - input_path: /opt/openvino_toolkit/ros2_openvino_toolkit/sample/param/testParam/data/segmentation.mp4 + input_path: to/be/set/video_path infers: - name: ObjectSegmentation model: /opt/openvino_toolkit/models/segmentation/output/FP16/frozen_inference_graph.xml diff --git a/sample/param/testParam/param/pipeline_vehicle_detection_test.yaml b/sample/param/testParam/param/pipeline_vehicle_detection_test.yaml index 5f2d7b50..ae4c173b 100644 --- a/sample/param/testParam/param/pipeline_vehicle_detection_test.yaml +++ b/sample/param/testParam/param/pipeline_vehicle_detection_test.yaml @@ -1,21 +1,21 @@ Pipelines: - name: object inputs: [Video] - input_path: /opt/openvino_toolkit/ros2_openvino_toolkit/sample/param/testParam/data/vehicle_detection.mp4 + input_path: to/be/set/video_path infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/vehicle-license-plate-detection/output/intel/vehicle-license-plate-detection-barrier-0106/FP32/vehicle-license-plate-detection-barrier-0106.xml + model: /opt/openvino_toolkit/models/intel/vehicle-license-plate-detection-barrier-0106/FP32/vehicle-license-plate-detection-barrier-0106.xml engine: CPU - label: to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/vehicle-license-plate-detection-barrier-0106/FP32/vehicle-license-plate-detection-barrier-0106.labels batch: 1 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: VehicleAttribsDetection - model: /opt/openvino_toolkit/models/vehicle-attributes-recongnition/output/intel/vehicle-attributes-recognition-barrier-0039/FP32/vehicle-attributes-recognition-barrier-0039.xml + model: /opt/openvino_toolkit/models/intel/vehicle-attributes-recognition-barrier-0039/FP32/vehicle-attributes-recognition-barrier-0039.xml engine: CPU label: to/be/set/xxx.labels batch: 1 - name: LicensePlateDetection - model: /opt/openvino_toolkit/models/license-plate-recognition/output/intel/license-plate-recognition-barrier-0001/FP32/license-plate-recognition-barrier-0001.xml + model: /opt/openvino_toolkit/models/intel/license-plate-recognition-barrier-0001/FP32/license-plate-recognition-barrier-0001.xml engine: CPU label: to/be/set/xxx.labels batch: 1 diff --git a/sample/src/image_object_client.cpp b/sample/src/image_object_client.cpp index 4d58ef72..bfb7e125 100644 --- a/sample/src/image_object_client.cpp +++ b/sample/src/image_object_client.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -13,22 +13,23 @@ // limitations under the License. #include -#include +#include #include #include #include #include -#include "dynamic_vino_lib/services/frame_processing_server.hpp" +#include "openvino_wrapper_lib/services/frame_processing_server.hpp" -int main(int argc, char ** argv) +int main(int argc, char** argv) { rclcpp::init(argc, argv); auto node = rclcpp::Node::make_shared("service_example_for_object"); if (argc != 2) { - RCLCPP_INFO(node->get_logger(), "Usage: ros2 run dynamic_vino_sample image_object_client" - ""); + RCLCPP_INFO(node->get_logger(), + "Usage: ros2 run openvino_node image_object_client" + ""); return -1; } @@ -47,9 +48,7 @@ int main(int argc, char ** argv) } auto result = client->async_send_request(request); - if (rclcpp::spin_until_future_complete(node, result) == - rclcpp::FutureReturnCode::SUCCESS) - { + if (rclcpp::spin_until_future_complete(node, result) == rclcpp::FutureReturnCode::SUCCESS) { auto srv = result.get(); cv::Mat image = cv::imread(image_path); @@ -58,16 +57,13 @@ int main(int argc, char ** argv) for (unsigned int i = 0; i < srv->objects.objects_vector.size(); i++) { std::stringstream ss; - ss << srv->objects.objects_vector[i].object.object_name << ": " << - srv->objects.objects_vector[i].object.probability * 100 << "%"; - RCLCPP_INFO(node->get_logger(), "%d: object: %s", i, - srv->objects.objects_vector[i].object.object_name.c_str()); - RCLCPP_INFO(node->get_logger(), "prob: %f", - srv->objects.objects_vector[i].object.probability); - RCLCPP_INFO( - node->get_logger(), "location: (%d, %d, %d, %d)", - srv->objects.objects_vector[i].roi.x_offset, srv->objects.objects_vector[i].roi.y_offset, - srv->objects.objects_vector[i].roi.width, srv->objects.objects_vector[i].roi.height); + ss << srv->objects.objects_vector[i].object.object_name << ": " + << srv->objects.objects_vector[i].object.probability * 100 << "%"; + RCLCPP_INFO(node->get_logger(), "%d: object: %s", i, srv->objects.objects_vector[i].object.object_name.c_str()); + RCLCPP_INFO(node->get_logger(), "prob: %f", srv->objects.objects_vector[i].object.probability); + RCLCPP_INFO(node->get_logger(), "location: (%d, %d, %d, %d)", srv->objects.objects_vector[i].roi.x_offset, + srv->objects.objects_vector[i].roi.y_offset, srv->objects.objects_vector[i].roi.width, + srv->objects.objects_vector[i].roi.height); int xmin = srv->objects.objects_vector[i].roi.x_offset; int ymin = srv->objects.objects_vector[i].roi.y_offset; @@ -80,10 +76,8 @@ int main(int argc, char ** argv) cv::Point left_top = cv::Point(xmin, ymin); cv::Point right_bottom = cv::Point(xmax, ymax); cv::rectangle(image, left_top, right_bottom, cv::Scalar(0, 255, 0), 1, 8, 0); - cv::rectangle(image, cv::Point(xmin, ymin), cv::Point(xmax, ymin + 20), cv::Scalar(0, 255, 0), - -1); - cv::putText(image, ss.str(), cv::Point(xmin + 5, ymin + 20), cv::FONT_HERSHEY_PLAIN, 1, - cv::Scalar(0, 0, 255), 1); + cv::rectangle(image, cv::Point(xmin, ymin), cv::Point(xmax, ymin + 20), cv::Scalar(0, 255, 0), -1); + cv::putText(image, ss.str(), cv::Point(xmin + 5, ymin + 20), cv::FONT_HERSHEY_PLAIN, 1, cv::Scalar(0, 0, 255), 1); } cv::imshow("image_detection", image); cv::waitKey(0); diff --git a/sample/src/image_object_server.cpp b/sample/src/image_object_server.cpp index 9b28edb5..b6dbb6e9 100644 --- a/sample/src/image_object_server.cpp +++ b/sample/src/image_object_server.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -13,24 +13,24 @@ // limitations under the License. #include -#include +#include #include #include #include -#include "dynamic_vino_lib/pipeline_manager.hpp" -#include "dynamic_vino_lib/services/frame_processing_server.hpp" -#include "dynamic_vino_lib/pipeline.hpp" -#include "dynamic_vino_lib/slog.hpp" -#include "dynamic_vino_lib/inputs/base_input.hpp" -#include "dynamic_vino_lib/inputs/image_input.hpp" -#include "inference_engine.hpp" -#if(defined(USE_OLD_E_PLUGIN_API)) +#include "openvino_wrapper_lib/pipeline_manager.hpp" +#include "openvino_wrapper_lib/services/frame_processing_server.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" +#include "openvino_wrapper_lib/slog.hpp" +#include "openvino_wrapper_lib/inputs/base_input.hpp" +#include "openvino_wrapper_lib/inputs/image_input.hpp" +#include "openvino/openvino.hpp" +#if (defined(USE_OLD_E_PLUGIN_API)) #include #endif #include "utility.hpp" -int main(int argc, char ** argv) +int main(int argc, char** argv) { rclcpp::init(argc, argv); @@ -38,14 +38,14 @@ int main(int argc, char ** argv) try { std::string service_name = "frame_processing_server"; - auto node = std::make_shared>(service_name, config_path); + auto node = std::make_shared>(service_name, + config_path); rclcpp::spin(node); - } catch (std::exception & e) { + } catch (std::exception& e) { std::cout << e.what() << std::endl; } catch (...) { - std::cout << "[ERROR] [frame_processing_server]: " << - "exception caught" << std::endl; + std::cout << "[ERROR] [frame_processing_server]: " + << "exception caught" << std::endl; } return 0; diff --git a/sample/src/image_people_client.cpp b/sample/src/image_people_client.cpp index 21adda4f..39739d4c 100644 --- a/sample/src/image_people_client.cpp +++ b/sample/src/image_people_client.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,33 +12,34 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include -#include +#include +#include #include -#include +#include #include #include #include #include #include -#include "dynamic_vino_lib/services/frame_processing_server.hpp" +#include "openvino_wrapper_lib/services/frame_processing_server.hpp" -int main(int argc, char ** argv) +int main(int argc, char** argv) { rclcpp::init(argc, argv); auto node = rclcpp::Node::make_shared("service_example_for_face"); if (argc != 2) { - RCLCPP_INFO(node->get_logger(), "Usage: ros2 run dynamic_vino_sample image_object_client" - ""); + RCLCPP_INFO(node->get_logger(), + "Usage: ros2 run openvino_node image_object_client" + ""); return -1; } std::string image_path = argv[1]; - auto client = node->create_client("/openvino_toolkit/service"); - auto request = std::make_shared(); + auto client = node->create_client("/openvino_toolkit/service"); + auto request = std::make_shared(); request->image_path = image_path; while (!client->wait_for_service(std::chrono::seconds(1))) { @@ -51,34 +52,27 @@ int main(int argc, char ** argv) auto result = client->async_send_request(request); - if (rclcpp::spin_until_future_complete(node, result) == - rclcpp::FutureReturnCode::SUCCESS) - { + if (rclcpp::spin_until_future_complete(node, result) == rclcpp::FutureReturnCode::SUCCESS) { auto people = result.get(); if (people->persons.emotions.size() == 0 && people->persons.agegenders.size() == 0 && - people->persons.headposes.size() == 0) - { + people->persons.headposes.size() == 0) { RCLCPP_INFO(node->get_logger(), "Get response, but no any person found."); return 0; } RCLCPP_INFO(node->get_logger(), "Found persons..."); for (unsigned int i = 0; i < people->persons.faces.size(); i++) { - RCLCPP_INFO(node->get_logger(), "%d: object: %s", i, - people->persons.faces[i].object.object_name.c_str()); - RCLCPP_INFO(node->get_logger(), "prob: %f", - people->persons.faces[i].object.probability); - RCLCPP_INFO( - node->get_logger(), "location: (%d, %d, %d, %d)", - people->persons.faces[i].roi.x_offset, people->persons.faces[i].roi.y_offset, - people->persons.faces[i].roi.width, people->persons.faces[i].roi.height); - RCLCPP_INFO(node->get_logger(), "Emotions: %s", - people->persons.emotions[i].emotion.c_str()); - RCLCPP_INFO(node->get_logger(), "Age: %f, Gender: %s", - people->persons.agegenders[i].age, people->persons.agegenders[i].gender.c_str()); + RCLCPP_INFO(node->get_logger(), "%d: object: %s", i, people->persons.faces[i].object.object_name.c_str()); + RCLCPP_INFO(node->get_logger(), "prob: %f", people->persons.faces[i].object.probability); + RCLCPP_INFO(node->get_logger(), "location: (%d, %d, %d, %d)", people->persons.faces[i].roi.x_offset, + people->persons.faces[i].roi.y_offset, people->persons.faces[i].roi.width, + people->persons.faces[i].roi.height); + RCLCPP_INFO(node->get_logger(), "Emotions: %s", people->persons.emotions[i].emotion.c_str()); + RCLCPP_INFO(node->get_logger(), "Age: %f, Gender: %s", people->persons.agegenders[i].age, + people->persons.agegenders[i].gender.c_str()); RCLCPP_INFO(node->get_logger(), "Yaw, Pitch and Roll for head pose is: (%f, %f, %f),", - people->persons.headposes[i].yaw, people->persons.headposes[i].pitch, - people->persons.headposes[i].roll); + people->persons.headposes[i].yaw, people->persons.headposes[i].pitch, + people->persons.headposes[i].roll); } } else { RCLCPP_WARN(node->get_logger(), "NO response received!!"); diff --git a/sample/src/image_people_server.cpp b/sample/src/image_people_server.cpp index c8e0ced8..845eaee9 100644 --- a/sample/src/image_people_server.cpp +++ b/sample/src/image_people_server.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,40 +12,40 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include +#include #include -#include +#include #include #include #include -#include "dynamic_vino_lib/pipeline_manager.hpp" -#include "dynamic_vino_lib/services/frame_processing_server.hpp" -#include "dynamic_vino_lib/pipeline.hpp" -#include "dynamic_vino_lib/slog.hpp" -#include "dynamic_vino_lib/inputs/base_input.hpp" -#include "dynamic_vino_lib/inputs/image_input.hpp" -#include "inference_engine.hpp" -#if(defined(USE_OLD_E_PLUGIN_API)) +#include "openvino_wrapper_lib/pipeline_manager.hpp" +#include "openvino_wrapper_lib/services/frame_processing_server.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" +#include "openvino_wrapper_lib/slog.hpp" +#include "openvino_wrapper_lib/inputs/base_input.hpp" +#include "openvino_wrapper_lib/inputs/image_input.hpp" +#include "openvino/openvino.hpp" +#if (defined(USE_OLD_E_PLUGIN_API)) #include #endif #include "utility.hpp" -int main(int argc, char ** argv) +int main(int argc, char** argv) { rclcpp::init(argc, argv); std::string config_path = getConfigPath(argc, argv); try { - auto node = std::make_shared>("service_people_detection", config_path); + auto node = std::make_shared>( + "service_people_detection", config_path); rclcpp::spin(node); - } catch (std::exception & e) { + } catch (std::exception& e) { std::cout << e.what() << std::endl; } catch (...) { - std::cout << "[ERROR] [service_people_detection]: " << - "exception caught" << std::endl; + std::cout << "[ERROR] [service_people_detection]: " + << "exception caught" << std::endl; } return 0; diff --git a/sample/src/parameters.cpp b/sample/src/parameters.cpp index 729633e1..26731712 100644 --- a/sample/src/parameters.cpp +++ b/sample/src/parameters.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -13,14 +13,14 @@ // limitations under the License. /** -* \brief A sample for vino_param_manager library. This sample performs -* getting/setting -* parameters for vino related functions. -* \file sample/parameters.cpp -*/ + * \brief A sample for vino_param_manager library. This sample performs + * getting/setting + * parameters for vino related functions. + * \file sample/parameters.cpp + */ -#include -#include +#include +#include #include #include #include @@ -31,12 +31,12 @@ #include #include "utility.hpp" -int main(int argc, char * argv[]) +int main(int argc, char* argv[]) { try { // ------Parsing and validation of input args--------- std::string config = getConfigPath(argc, argv); - if(config.empty()){ + if (config.empty()) { throw std::runtime_error("Config File is not correctly set."); return -1; } @@ -46,7 +46,7 @@ int main(int argc, char * argv[]) slog::info << "print again, should same as above....." << slog::endl; Params::ParamManager::getInstance().print(); - } catch (const std::exception & error) { + } catch (const std::exception& error) { slog::err << error.what() << slog::endl; return -1; } catch (...) { diff --git a/sample/src/pipeline_composite.cpp b/sample/src/pipeline_composite.cpp index d37c809f..959f7889 100644 --- a/sample/src/pipeline_composite.cpp +++ b/sample/src/pipeline_composite.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018-2019 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -13,14 +13,14 @@ // limitations under the License. /** -* \brief The is the composition version making the pipeline management into -* a rclcpp Node. -* \file pipeline_composite.cpp -*/ + * \brief The is the composition version making the pipeline management into + * a rclcpp Node. + * \file pipeline_composite.cpp + */ #include #include -#include +#include #include #include #include @@ -37,13 +37,13 @@ #include #include -#include "dynamic_vino_lib/pipeline.hpp" -#include "dynamic_vino_lib/pipeline_manager.hpp" -#include "dynamic_vino_lib/slog.hpp" -#if(defined(USE_OLD_E_PLUGIN_API)) +#include "openvino_wrapper_lib/pipeline.hpp" +#include "openvino_wrapper_lib/pipeline_manager.hpp" +#include "openvino_wrapper_lib/slog.hpp" +#if (defined(USE_OLD_E_PLUGIN_API)) #include #endif -#include "inference_engine.hpp" +#include "openvino/openvino.hpp" #include "librealsense2/rs.hpp" #include "opencv2/opencv.hpp" //#include "utility.hpp" @@ -61,8 +61,8 @@ void signalHandler(int signum) class ComposablePipeline : public rclcpp::Node { public: - ComposablePipeline(const rclcpp::NodeOptions & node_options=rclcpp::NodeOptions()) - : rclcpp::Node("composable_pipeline", "/", node_options) + ComposablePipeline(const rclcpp::NodeOptions& node_options = rclcpp::NodeOptions()) + : rclcpp::Node("composable_pipeline", "/", node_options) { initPipeline(); } @@ -85,20 +85,19 @@ class ComposablePipeline : public rclcpp::Node } std::shared_ptr node_handler(this); - // auto createPipeline = PipelineManager::getInstance().createPipeline; - for (auto & p : pipelines) { + for (auto& p : pipelines) { PipelineManager::getInstance().createPipeline(p, node_handler); } PipelineManager::getInstance().runAll(); - //PipelineManager::getInstance().joinAll(); } std::string getConfigPath() { - return rclcpp::Node::declare_parameter("config"); + // TODO: Fix api for humble + // return declare_parameter("config").get(); + return ""; } - }; #include "rclcpp_components/register_node_macro.hpp" diff --git a/sample/src/pipeline_with_params.cpp b/sample/src/pipeline_with_params.cpp index 7bfa8ca8..71a85b0d 100644 --- a/sample/src/pipeline_with_params.cpp +++ b/sample/src/pipeline_with_params.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018-2019 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -13,14 +13,14 @@ // limitations under the License. /** -* \brief A sample for this library. This sample performs face detection, + * \brief A sample for this library. This sample performs face detection, * emotions detection, age gender detection and head pose estimation. -* \file sample/pipeline_manager.cpp -*/ + * \file sample/pipeline_manager.cpp + */ #include #include -#include +#include #include #include #include @@ -37,14 +37,14 @@ #include #include -#include "dynamic_vino_lib/pipeline.hpp" -#include "dynamic_vino_lib/pipeline_manager.hpp" -#include "dynamic_vino_lib/services/pipeline_processing_server.hpp" -#include "dynamic_vino_lib/slog.hpp" -#if(defined(USE_OLD_E_PLUGIN_API)) +#include "openvino_wrapper_lib/pipeline.hpp" +#include "openvino_wrapper_lib/pipeline_manager.hpp" +#include "openvino_wrapper_lib/services/pipeline_processing_server.hpp" +#include "openvino_wrapper_lib/slog.hpp" +#if (defined(USE_OLD_E_PLUGIN_API)) #include #endif -#include "inference_engine.hpp" +#include "openvino/openvino.hpp" #include "librealsense2/rs.hpp" #include "opencv2/opencv.hpp" #include "utility.hpp" @@ -59,22 +59,22 @@ void signalHandler(int signum) // exit(signum); } -int main(int argc, char * argv[]) +int main(int argc, char* argv[]) { rclcpp::init(argc, argv); rclcpp::executors::SingleThreadedExecutor exec; rclcpp::Node::SharedPtr main_node = rclcpp::Node::make_shared("openvino_pipeline"); - rclcpp::Node::SharedPtr service_node = std::make_shared>("pipeline_service"); + rclcpp::Node::SharedPtr service_node = + std::make_shared>("pipeline_service"); // register signal SIGINT and signal handler - //signal(SIGINT, signalHandler); + // signal(SIGINT, signalHandler); try { - std::cout << "InferenceEngine: " << InferenceEngine::GetInferenceEngineVersion() << std::endl; + std::cout << "OpenVINO: " << ov::get_openvino_version() << std::endl; // ----- Parsing and validation of input args----------------------- std::string config = getConfigPath(argc, argv); - if(config.empty()){ + if (config.empty()) { throw std::runtime_error("Config File is not correctly set."); return -1; } @@ -86,21 +86,20 @@ int main(int argc, char * argv[]) if (pipelines.size() < 1) { throw std::logic_error("Pipeline parameters should be set!"); } - // auto createPipeline = PipelineManager::getInstance().createPipeline; - for (auto & p : pipelines) { + for (auto& p : pipelines) { PipelineManager::getInstance().createPipeline(p, main_node); } PipelineManager::getInstance().runAll(); - //rclcpp::spin(main_node); + // rclcpp::spin(main_node); exec.add_node(main_node); exec.add_node(service_node); exec.spin(); PipelineManager::getInstance().stopAll(); rclcpp::shutdown(); - } catch (const std::exception & error) { + } catch (const std::exception& error) { slog::err << error.what() << slog::endl; return -2; } catch (...) { diff --git a/script/viewer/service.py b/script/viewer/service.py index d0f99702..82d7d2e4 100644 --- a/script/viewer/service.py +++ b/script/viewer/service.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -from pipeline_srv_msgs.srv import * +from openvino_msgs.srv import * import rclpy import sys from pipeTree import TreeNode diff --git a/script/viewer/viewer.py b/script/viewer/viewer.py index 93b71918..a82ace37 100644 --- a/script/viewer/viewer.py +++ b/script/viewer/viewer.py @@ -3,7 +3,7 @@ from PyQt5.QtGui import QPainter,QPen,QBrush,QColor from PyQt5.QtCore import QRect from service import reqPipelineService,getTree -from pipeline_srv_msgs.srv import * +from openvino_msgs.srv import * from pipeTree import TreeNode diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 59509625..f6c1fde6 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,22 +14,22 @@ cmake_minimum_required(VERSION 3.5) -project(dynamic_vino_test) +project(openvino_test) list(APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake) #################################### -## to use C++14 -set(CMAKE_CXX_STANDARD 14) +## to use C++17 +set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD_REQUIRED ON) -set(CMAKE_CXX_FLAGS "-std=c++14 ${CMAKE_CXX_FLAGS}") +set(CMAKE_CXX_FLAGS "-std=c++17 ${CMAKE_CXX_FLAGS}") #################################### -message(STATUS "Looking for inference engine configuration file at: ${CMAKE_PREFIX_PATH}") -find_package(InferenceEngine) -if(NOT InferenceEngine_FOUND) +find_package(OpenVINO) +if(NOT OpenVINO_FOUND) message(FATAL_ERROR "") endif() +set(OpenVINO_LIBRARIES openvino::runtime) # Find OpenCV libray if exists find_package(OpenCV REQUIRED) @@ -51,12 +51,11 @@ find_package(rmw REQUIRED) find_package(std_msgs REQUIRED) find_package(sensor_msgs REQUIRED) find_package(object_msgs REQUIRED) -find_package(people_msgs REQUIRED) -find_package(pipeline_srv_msgs REQUIRED) +find_package(openvino_msgs REQUIRED) find_package(class_loader REQUIRED) -find_package(vino_param_lib REQUIRED) +find_package(openvino_param_lib REQUIRED) find_package(yaml_cpp_vendor REQUIRED) -find_package(dynamic_vino_lib REQUIRED) +find_package(openvino_wrapper_lib REQUIRED) set(CMAKE_BUILD_TYPE "Release") if("${CMAKE_BUILD_TYPE}" STREQUAL "") @@ -119,10 +118,7 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unused-parameter -Wno-deprecated-de include_directories( ${CMAKE_CURRENT_SOURCE_DIR}/common/format_reader ${CMAKE_CURRENT_SOURCE_DIR}/include - ${InferenceEngine_INCLUDE_DIRS} - ${InferenceEngine_INCLUDE_DIRS}/../samples - ${InferenceEngine_INCLUDE_DIRS}/../samples/extention - ${InferenceEngine_INCLUDE_DIRS}/../src + ${OpenVINO_DIRS} ${realsense2_INCLUDE_DIRS} ) @@ -147,17 +143,16 @@ if(BUILD_TESTING) if(TARGET ${target}) ament_target_dependencies(${target} "rclcpp" - "vino_param_lib" + "openvino_param_lib" "object_msgs" - "people_msgs" - "pipeline_srv_msgs" - "InferenceEngine" - "OpenCV" - "realsense2" + "openvino_msgs" + "OpenVINO" + "OpenCV" + "realsense2" "ament_index_cpp" "yaml_cpp_vendor" "class_loader" - "dynamic_vino_lib") + "openvino_wrapper_lib") endif() endmacro() diff --git a/tests/launch/image_object_service_test.launch.py b/tests/launch/image_object_service_test.launch.py index aa4e85e7..9b8ecb32 100644 --- a/tests/launch/image_object_service_test.launch.py +++ b/tests/launch/image_object_service_test.launch.py @@ -22,12 +22,12 @@ def generate_launch_description(): - default_yaml = os.path.join(get_package_share_directory('dynamic_vino_test'), 'param', + default_yaml = os.path.join(get_package_share_directory('openvino_test'), 'param', 'image_object_service_test.yaml') return LaunchDescription([ # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', node_executable='image_object_server', + package='openvino_node', node_executable='image_object_server', arguments=['-config', default_yaml], output='screen'), ]) diff --git a/tests/launch/image_people_service_test.launch.py b/tests/launch/image_people_service_test.launch.py index 6db0d65f..a9519dd4 100644 --- a/tests/launch/image_people_service_test.launch.py +++ b/tests/launch/image_people_service_test.launch.py @@ -22,12 +22,12 @@ def generate_launch_description(): - default_yaml = os.path.join(get_package_share_directory('dynamic_vino_test'), 'param', + default_yaml = os.path.join(get_package_share_directory('openvino_test'), 'param', 'image_people_service_test.yaml') return LaunchDescription([ # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', node_executable='image_people_server', + package='openvino_node', node_executable='image_people_server', arguments=['-config', default_yaml], output='screen'), ]) diff --git a/tests/launch/pipeline_face_reidentification_test.launch.py b/tests/launch/pipeline_face_reidentification_test.launch.py index cd4b0844..9515a86e 100644 --- a/tests/launch/pipeline_face_reidentification_test.launch.py +++ b/tests/launch/pipeline_face_reidentification_test.launch.py @@ -22,12 +22,12 @@ def generate_launch_description(): - default_yaml = os.path.join(get_package_share_directory('dynamic_vino_test'), 'param', + default_yaml = os.path.join(get_package_share_directory('openvino_test'), 'param', 'pipeline_face_reidentification_test.yaml') return LaunchDescription([ # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', node_executable='pipeline_with_params', + package='openvino_node', node_executable='pipeline_with_params', arguments=['-config', default_yaml], remappings=[ ('/openvino_toolkit/people/detected_objects', '/ros2_openvino_toolkit/face_detection'), diff --git a/tests/launch/pipeline_face_test.launch.py b/tests/launch/pipeline_face_test.launch.py index 9c08bd36..2311342e 100644 --- a/tests/launch/pipeline_face_test.launch.py +++ b/tests/launch/pipeline_face_test.launch.py @@ -22,12 +22,12 @@ def generate_launch_description(): - default_yaml = os.path.join(get_package_share_directory('dynamic_vino_test'), 'param', + default_yaml = os.path.join(get_package_share_directory('openvino_test'), 'param', 'pipeline_face_test.yaml') return LaunchDescription([ # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', node_executable='pipeline_with_params', + package='openvino_node', node_executable='pipeline_with_params', arguments=['-config', default_yaml], remappings=[ ('/openvino_toolkit/people/detected_objects', diff --git a/tests/launch/pipeline_image_test.launch.py b/tests/launch/pipeline_image_test.launch.py index 947667d1..e4a2b738 100644 --- a/tests/launch/pipeline_image_test.launch.py +++ b/tests/launch/pipeline_image_test.launch.py @@ -22,12 +22,12 @@ def generate_launch_description(): - default_yaml = os.path.join(get_package_share_directory('dynamic_vino_test'), 'param', + default_yaml = os.path.join(get_package_share_directory('openvino_test'), 'param', 'pipeline_image_test.yaml') return LaunchDescription([ # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', node_executable='pipeline_with_params', + package='openvino_node', node_executable='pipeline_with_params', arguments=['-config', default_yaml], remappings=[ ('/openvino_toolkit/people/detected_objects', diff --git a/tests/launch/pipeline_object_test.launch.py b/tests/launch/pipeline_object_test.launch.py index b324415d..752080c7 100644 --- a/tests/launch/pipeline_object_test.launch.py +++ b/tests/launch/pipeline_object_test.launch.py @@ -22,12 +22,12 @@ def generate_launch_description(): - default_yaml = os.path.join(get_package_share_directory('dynamic_vino_test'), 'param', + default_yaml = os.path.join(get_package_share_directory('openvino_test'), 'param', 'pipeline_object_test.yaml') return LaunchDescription([ # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', node_executable='pipeline_with_params', + package='openvino_node', node_executable='pipeline_with_params', arguments=['-config', default_yaml], remappings=[ ('/openvino_toolkit/object/detected_objects', diff --git a/tests/launch/pipeline_reidentification_test.launch.py b/tests/launch/pipeline_reidentification_test.launch.py index b29f0316..080d619c 100644 --- a/tests/launch/pipeline_reidentification_test.launch.py +++ b/tests/launch/pipeline_reidentification_test.launch.py @@ -22,12 +22,12 @@ def generate_launch_description(): - default_yaml = os.path.join(get_package_share_directory('dynamic_vino_test'), 'param', + default_yaml = os.path.join(get_package_share_directory('openvino_test'), 'param', 'pipeline_reidentification_test.yaml') return LaunchDescription([ # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', node_executable='pipeline_with_params', + package='openvino_node', node_executable='pipeline_with_params', arguments=['-config', default_yaml], remappings=[ ('/openvino_toolkit/object/detected_objects', diff --git a/tests/launch/pipeline_segmentation_test.launch.py b/tests/launch/pipeline_segmentation_test.launch.py index 613e5747..5cceb3eb 100644 --- a/tests/launch/pipeline_segmentation_test.launch.py +++ b/tests/launch/pipeline_segmentation_test.launch.py @@ -22,12 +22,12 @@ def generate_launch_description(): - default_yaml = os.path.join(get_package_share_directory('dynamic_vino_test'), 'param', + default_yaml = os.path.join(get_package_share_directory('openvino_test'), 'param', 'pipeline_segmentation_test.yaml') return LaunchDescription([ # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', node_executable='pipeline_with_params', + package='openvino_node', node_executable='pipeline_with_params', arguments=['-config', default_yaml], remappings=[ ('/openvino_toolkit/segmentation/segmented_obejcts', diff --git a/tests/launch/pipeline_vehicle_detection_test.launch.py b/tests/launch/pipeline_vehicle_detection_test.launch.py index b039ec8d..deb7cda3 100644 --- a/tests/launch/pipeline_vehicle_detection_test.launch.py +++ b/tests/launch/pipeline_vehicle_detection_test.launch.py @@ -22,12 +22,12 @@ def generate_launch_description(): - default_yaml = os.path.join(get_package_share_directory('dynamic_vino_test'), 'param', + default_yaml = os.path.join(get_package_share_directory('openvino_test'), 'param', 'pipeline_vehicle_detection_test.yaml') return LaunchDescription([ # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', node_executable='pipeline_with_params', + package='openvino_node', node_executable='pipeline_with_params', arguments=['-config', default_yaml], remappings=[ ('/openvino_toolkit/object/detected_license_plates', diff --git a/tests/package.xml b/tests/package.xml index 6a0e4085..53577f50 100644 --- a/tests/package.xml +++ b/tests/package.xml @@ -1,7 +1,7 @@ - dynamic_vino_test + openvino_test 0.9.0 a ROS2 wrapper package for Intel OpenVINO Weizhi Liu @@ -35,12 +35,11 @@ limitations under the License. class_loader cv_bridge object_msgs - people_msgs - pipeline_srv_msgs - vino_param_lib + openvino_msgs + openvino_param_lib realsense2 openvino_common - dynamic_vino_lib + openvino_wrapper_lib rosidl_default_runtime builtin_interfaces @@ -53,11 +52,10 @@ limitations under the License. class_loader cv_bridge object_msgs - people_msgs - pipeline_srv_msgs - vino_param_lib + openvino_msgs + openvino_param_lib realsense2 - dynamic_vino_lib + openvino_wrapper_lib ament_lint_auto ament_lint_common diff --git a/tests/param/image_object_service_test.yaml b/tests/param/image_object_service_test.yaml index 9a1ffd0b..baea4479 100644 --- a/tests/param/image_object_service_test.yaml +++ b/tests/param/image_object_service_test.yaml @@ -1,10 +1,10 @@ Pipelines: - name: object inputs: [Image] - input_path: "/opt/openvino_toolkit/ros2_overlay_ws/src/ros2_openvino_toolkit/data/images/car_vihecle.png" + input_path: to/be/set/image_path infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP16/mobilenet-ssd.xml + model: /opt/openvino_toolkit/models/convert/public/mobilenet-ssd/FP16/mobilenet-ssd.xml engine: CPU label: to/be/set/xxx.labels batch: 16 diff --git a/tests/param/image_people_service_test.yaml b/tests/param/image_people_service_test.yaml index ec579426..40f6513e 100644 --- a/tests/param/image_people_service_test.yaml +++ b/tests/param/image_people_service_test.yaml @@ -1,26 +1,27 @@ Pipelines: - name: people inputs: [Image] + input_path: to/be/set/image_path infers: - name: FaceDetection - model: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml engine: CPU - label: to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels batch: 16 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: AgeGenderRecognition - model: /opt/openvino_toolkit/models/age-gender-recognition/output/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml + model: /opt/openvino_toolkit/models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml engine: CPU label: to/be/set/xxx.labels batch: 16 - name: EmotionRecognition - model: /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml + model: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml engine: CPU - label: to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.labels batch: 16 - name: HeadPoseEstimation - model: /opt/openvino_toolkit/models/head-pose-estimation/output/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml engine: CPU label: to/be/set/xxx.labels batch: 16 @@ -36,6 +37,5 @@ Pipelines: right: [RosService] - left: HeadPoseEstimation right: [RosService] - input_path: "/home/intel/ros2_overlay_ws/src/ros2_openvino_toolkit/data/images/team.png" Common: diff --git a/tests/param/pipeline_anormal.yaml b/tests/param/pipeline_anormal.yaml index 8f5f5146..2e3a4214 100644 --- a/tests/param/pipeline_anormal.yaml +++ b/tests/param/pipeline_anormal.yaml @@ -3,7 +3,7 @@ Pipelines: inputs: [StandardCamera, Image, video] infers: - name: Objectdetection - model: /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP16/mobilenet-ssd.xml + model: /opt/openvino_toolkit/models/convert/public/mobilenet-ssd/FP16/mobilenet-ssd.xml engine: GPU label: to/be/set/xxx.labels batch: 16 @@ -21,7 +21,7 @@ Pipelines: inputs: [StandardCamera, Image, video] infers: - name: - model: /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP16/mobilenet-ssd.xml + model: /opt/openvino_toolkit/models/convert/public/mobilenet-ssd/FP16/mobilenet-ssd.xml engine: GPU label: to/be/set/xxx.labels batch: 16 diff --git a/tests/param/pipeline_face_reid_video.yaml b/tests/param/pipeline_face_reid_video.yaml index 517178a3..82986615 100644 --- a/tests/param/pipeline_face_reid_video.yaml +++ b/tests/param/pipeline_face_reid_video.yaml @@ -1,22 +1,22 @@ Pipelines: - name: people inputs: [Video] - input_path: /opt/openvino_toolkit/ros2_openvino_toolkit/tests/data/face_reid.mp4 + input_path: to/be/set/video_path infers: - name: FaceDetection - model: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml engine: CPU - label: /opt/openvino_toolkit/open_model_zoo/model_downloader/Transportation/object_detection/face/pruned_mobilenet_reduced_ssd_shared_weights/dldt/face-detection-adas-0001.labels + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels batch: 1 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: LandmarksDetection - model: /opt/openvino_toolkit/models/landmarks-regression/output/intel/landmarks-regression-retail-0009/FP32/landmarks-regression-retail-0009.xml + model: /opt/openvino_toolkit/models/intel/landmarks-regression-retail-0009/FP32/landmarks-regression-retail-0009.xml engine: CPU label: to/be/set/xxx.labels batch: 1 - name: FaceReidentification - model: /opt/openvino_toolkit/models/face-reidentification/output/intel/face-reidentification-retail-0095/FP32/face-reidentification-retail-0095.xml + model: /opt/openvino_toolkit/models/intel/face-reidentification-retail-0095/FP32/face-reidentification-retail-0095.xml engine: CPU label: to/be/set/xxx.labels batch: 1 diff --git a/tests/param/pipeline_face_reidentification_test.yaml b/tests/param/pipeline_face_reidentification_test.yaml index 6313811a..54ee42ee 100644 --- a/tests/param/pipeline_face_reidentification_test.yaml +++ b/tests/param/pipeline_face_reidentification_test.yaml @@ -3,19 +3,19 @@ Pipelines: inputs: [RealSenseCamera] infers: - name: FaceDetection - model: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml engine: CPU - label: /opt/openvino_toolkit/open_model_zoo/model_downloader/Transportation/object_detection/face/pruned_mobilenet_reduced_ssd_shared_weights/dldt/face-detection-adas-0001.labels + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels batch: 1 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: LandmarksDetection - model: /opt/openvino_toolkit/models/landmarks-regression/output/intel/landmarks-regression-retail-0009/FP32/landmarks-regression-retail-0009.xml + model: /opt/openvino_toolkit/models/intel/landmarks-regression-retail-0009/FP32/landmarks-regression-retail-0009.xml engine: CPU label: to/be/set/xxx.labels batch: 1 - name: FaceReidentification - model: /opt/openvino_toolkit/models/face-reidentification/output/intel/face-reidentification-retail-0095/FP32/face-reidentification-retail-0095.xml + model: /opt/openvino_toolkit/models/intel/face-reidentification-retail-0095/FP32/face-reidentification-retail-0095.xml engine: CPU label: to/be/set/xxx.labels batch: 1 diff --git a/tests/param/pipeline_face_test.yaml b/tests/param/pipeline_face_test.yaml index 3aca2024..68b395c9 100644 --- a/tests/param/pipeline_face_test.yaml +++ b/tests/param/pipeline_face_test.yaml @@ -1,27 +1,27 @@ Pipelines: - name: people inputs: [Video] - input_path: /opt/openvino_toolkit/ros2_openvino_toolkit/tests/data/people_detection.mp4 + input_path: to/be/set/video_path infers: - name: FaceDetection - model: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml engine: CPU - label: to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels batch: 1 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: AgeGenderRecognition - model: /opt/openvino_toolkit/models/age-gender-recognition/output/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml + model: /opt/openvino_toolkit/models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml engine: CPU label: to/be/set/xxx.labels batch: 16 - name: EmotionRecognition - model: /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml + model: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml engine: CPU - label: to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.labels batch: 16 - name: HeadPoseEstimation - model: /opt/openvino_toolkit/models/head-pose-estimation/output/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml engine: CPU label: to/be/set/xxx.labels batch: 16 diff --git a/tests/param/pipeline_image_test.yaml b/tests/param/pipeline_image_test.yaml index 50540acd..074cbd22 100644 --- a/tests/param/pipeline_image_test.yaml +++ b/tests/param/pipeline_image_test.yaml @@ -1,27 +1,27 @@ Pipelines: - name: people inputs: [Image] - input_path: /opt/openvino_toolkit/ros2_openvino_toolkit/data/images/team.jpg + input_path: to/be/set/image_path infers: - name: FaceDetection - model: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml engine: CPU - label: to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels batch: 1 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: AgeGenderRecognition - model: /opt/openvino_toolkit/models/age-gender-recognition/output/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml + model: /opt/openvino_toolkit/models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml engine: CPU label: to/be/set/xxx.labels batch: 16 - name: EmotionRecognition - model: /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml + model: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml engine: CPU - label: to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.labels batch: 16 - name: HeadPoseEstimation - model: /opt/openvino_toolkit/models/head-pose-estimation/output/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml engine: CPU label: to/be/set/xxx.labels batch: 16 diff --git a/tests/param/pipeline_object_test.yaml b/tests/param/pipeline_object_test.yaml index c45999ec..542d3142 100644 --- a/tests/param/pipeline_object_test.yaml +++ b/tests/param/pipeline_object_test.yaml @@ -3,7 +3,7 @@ Pipelines: inputs: [RealSenseCamera] infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP16/mobilenet-ssd.xml + model: /opt/openvino_toolkit/models/convert/public/mobilenet-ssd/FP16/mobilenet-ssd.xml engine: GPU label: to/be/set/xxx.labels batch: 16 diff --git a/tests/param/pipeline_object_yolo_test.yaml b/tests/param/pipeline_object_yolo_test.yaml index dfdbe15d..35fc06c9 100644 --- a/tests/param/pipeline_object_yolo_test.yaml +++ b/tests/param/pipeline_object_yolo_test.yaml @@ -3,9 +3,8 @@ Pipelines: inputs: [RealSenseCamera] infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/object_detection/YOLOv2-voc/tf/output/FP16/yolov2-voc.xml - #model: /opt/openvino_toolkit/darkflow/output/fp16/yolov2-voc.xml - model_type: yolov2 + model: /opt/openvino_toolkit/models/convert/public/yolov5n/FP32/yolov5n.xml + model_type: yolov5 engine: GPU label: to/be/set/xxx.labels batch: 1 diff --git a/tests/param/pipeline_reidentification_test.yaml b/tests/param/pipeline_reidentification_test.yaml index 9f854572..527742fe 100644 --- a/tests/param/pipeline_reidentification_test.yaml +++ b/tests/param/pipeline_reidentification_test.yaml @@ -1,17 +1,17 @@ Pipelines: - name: object inputs: [Video] - input_path: /opt/openvino_toolkit/ros2_openvino_toolkit/tests/data/people_reid.mp4 + input_path: to/be/set/video_path infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/person-detection/output/intel/person-detection-retail-0013/FP32/person-detection-retail-0013.xml + model: /opt/openvino_toolkit/models/intel/person-detection-retail-0013/FP32/person-detection-retail-0013.xml engine: CPU label: to/be/set/xxx.labels batch: 1 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: PersonReidentification - model: /opt/openvino_toolkit/models/person-reidentification/output/intel/person-reidentification-retail-0076/FP32/person-reidentification-retail-0076.xml + model: /opt/openvino_toolkit/models/intel/person-reidentification-retail-0277/FP32/person-reidentification-retail-0277.xml engine: CPU label: to/be/set/xxx.labels batch: 1 diff --git a/tests/param/pipeline_segmentation_test.yaml b/tests/param/pipeline_segmentation_test.yaml index 74d22368..7ba13e01 100644 --- a/tests/param/pipeline_segmentation_test.yaml +++ b/tests/param/pipeline_segmentation_test.yaml @@ -1,7 +1,7 @@ Pipelines: - name: segmentation inputs: [Video] - input_path: /opt/openvino_toolkit/ros2_openvino_toolkit/tests/data/segmentation.mp4 + input_path: to/be/set/video_path infers: - name: ObjectSegmentation model: /opt/openvino_toolkit/models/segmentation/output/FP16/frozen_inference_graph.xml diff --git a/tests/param/pipeline_vehicle_detection_test.yaml b/tests/param/pipeline_vehicle_detection_test.yaml index d425a36d..ae4c173b 100644 --- a/tests/param/pipeline_vehicle_detection_test.yaml +++ b/tests/param/pipeline_vehicle_detection_test.yaml @@ -1,21 +1,21 @@ Pipelines: - name: object inputs: [Video] - input_path: /opt/openvino_toolkit/ros2_openvino_toolkit/tests/data/vehicle_detection.mp4 + input_path: to/be/set/video_path infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/vehicle-license-plate-detection/output/intel/vehicle-license-plate-detection-barrier-0106/FP32/vehicle-license-plate-detection-barrier-0106.xml + model: /opt/openvino_toolkit/models/intel/vehicle-license-plate-detection-barrier-0106/FP32/vehicle-license-plate-detection-barrier-0106.xml engine: CPU - label: to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/vehicle-license-plate-detection-barrier-0106/FP32/vehicle-license-plate-detection-barrier-0106.labels batch: 1 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: VehicleAttribsDetection - model: /opt/openvino_toolkit/models/vehicle-attributes-recongnition/output/intel/vehicle-attributes-recognition-barrier-0039/FP32/vehicle-attributes-recognition-barrier-0039.xml + model: /opt/openvino_toolkit/models/intel/vehicle-attributes-recognition-barrier-0039/FP32/vehicle-attributes-recognition-barrier-0039.xml engine: CPU label: to/be/set/xxx.labels batch: 1 - name: LicensePlateDetection - model: /opt/openvino_toolkit/models/license-plate-recognition/output/intel/license-plate-recognition-barrier-0001/FP32/license-plate-recognition-barrier-0001.xml + model: /opt/openvino_toolkit/models/intel/license-plate-recognition-barrier-0001/FP32/license-plate-recognition-barrier-0001.xml engine: CPU label: to/be/set/xxx.labels batch: 1 diff --git a/tests/src/lib/unittest_createPipelineCheck.cpp b/tests/src/lib/unittest_createPipelineCheck.cpp index 2e48fb34..08b656d2 100644 --- a/tests/src/lib/unittest_createPipelineCheck.cpp +++ b/tests/src/lib/unittest_createPipelineCheck.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include @@ -32,27 +32,30 @@ #include #include -#include "dynamic_vino_lib/pipeline.hpp" -#include "dynamic_vino_lib/pipeline_manager.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" +#include "openvino_wrapper_lib/pipeline_manager.hpp" +#include "openvino_wrapper_lib/slog.hpp" std::string getConfigPath(std::string config_file) { std::string content; std::string prefix_path; - ament_index_cpp::get_resource("packages", "dynamic_vino_test", content, &prefix_path); - return prefix_path + "/share/dynamic_vino_test/param/" + config_file; + ament_index_cpp::get_resource("packages", "openvino_test", content, &prefix_path); + return prefix_path + "/share/openvino_test/param/" + config_file; } TEST(UnitTestCheckPipeline, testCreatePipeline) { auto node = rclcpp::Node::make_shared("pipeline_test"); - std::vector config_files = {"image_object_service_test.yaml", "pipeline_face_reid_video.yaml", - "pipeline_image_test.yaml", "pipeline_reidentification_test.yaml", - "pipeline_vehicle_detection_test.yaml", "image_people_service_test.yaml", - "pipeline_segmentation_test.yaml", "pipeline_face_test.yaml"}; - for (unsigned int i = 0; i < config_files.size(); i++) - { + std::vector config_files = { "image_object_service_test.yaml", + "pipeline_face_reid_video.yaml", + "pipeline_image_test.yaml", + "pipeline_reidentification_test.yaml", + "pipeline_vehicle_detection_test.yaml", + "image_people_service_test.yaml", + "pipeline_segmentation_test.yaml", + "pipeline_face_test.yaml" }; + for (unsigned int i = 0; i < config_files.size(); i++) { std::string config_file = getConfigPath(config_files[i]); EXPECT_TRUE(std::ifstream(config_file).is_open()); ASSERT_NO_THROW({ @@ -60,7 +63,7 @@ TEST(UnitTestCheckPipeline, testCreatePipeline) auto pipelines = Params::ParamManager::getInstance().getPipelines(); EXPECT_GT(pipelines.size(), 0); - for (auto & p : pipelines) { + for (auto& p : pipelines) { PipelineManager::getInstance().createPipeline(p, node); } }); @@ -78,7 +81,7 @@ TEST(UnitTestCheckPipeline, testCreatePipelineRealsense) auto pipelines = Params::ParamManager::getInstance().getPipelines(); EXPECT_GT(pipelines.size(), 0); - for (auto & p : pipelines) { + for (auto& p : pipelines) { PipelineManager::getInstance().createPipeline(p, node); } }); @@ -86,24 +89,23 @@ TEST(UnitTestCheckPipeline, testCreatePipelineRealsense) TEST(UnitTestCheckPipeline, testPipelineIncorrectConfig) { - auto node = rclcpp::Node::make_shared("pipeline_anormal_test"); + auto node = rclcpp::Node::make_shared("pipeline_anormal_test"); std::string config_file = getConfigPath("pipeline_anormal.yaml"); EXPECT_TRUE(std::ifstream(config_file).is_open()); - try{ + try { Params::ParamManager::getInstance().parse(config_file); auto pipelines = Params::ParamManager::getInstance().getPipelines(); EXPECT_GT(pipelines.size(), 0); - for (auto & p : pipelines) { - PipelineManager::getInstance().createPipeline(p,node); + for (auto& p : pipelines) { + PipelineManager::getInstance().createPipeline(p, node); } - } - catch (...) { + } catch (...) { SUCCEED(); } } -int main(int argc, char * argv[]) +int main(int argc, char* argv[]) { testing::InitGoogleTest(&argc, argv); rclcpp::init(argc, argv); diff --git a/tests/src/service/unittest_objectService.cpp b/tests/src/service/unittest_objectService.cpp index 90b66a12..31f4fac8 100644 --- a/tests/src/service/unittest_objectService.cpp +++ b/tests/src/service/unittest_objectService.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2017 Intel Corporation. All Rights Reserved +// Copyright (c) 2017-2022 Intel Corporation. All Rights Reserved // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -13,7 +13,7 @@ // limitations under the License. #include -#include +#include #include #include #include @@ -22,7 +22,7 @@ #include #include -#include "dynamic_vino_lib/services/frame_processing_server.hpp" +#include "openvino_wrapper_lib/services/frame_processing_server.hpp" std::string generate_file_path(std::string path) { @@ -39,7 +39,7 @@ TEST(UnitTestObject, testObject) auto client = node->create_client("/openvino_toolkit/service"); ASSERT_TRUE(client->wait_for_service(std::chrono::seconds(20))); - + auto request = std::make_shared(); std::string buffer = generate_file_path("data/images/car_vihecle.png"); @@ -48,9 +48,7 @@ TEST(UnitTestObject, testObject) auto result = client->async_send_request(request); - ASSERT_EQ( - rclcpp::FutureReturnCode::SUCCESS, - rclcpp::spin_until_future_complete(node, result)); + ASSERT_EQ(rclcpp::FutureReturnCode::SUCCESS, rclcpp::spin_until_future_complete(node, result)); auto srv = result.get(); @@ -61,25 +59,20 @@ TEST(UnitTestObject, testObject) } EXPECT_TRUE(srv->objects.objects_vector[0].roi.x_offset > 1080 && - srv->objects.objects_vector[0].roi.x_offset < 1720 && - srv->objects.objects_vector[0].roi.y_offset > 215 && - srv->objects.objects_vector[0].roi.y_offset < 480); - EXPECT_TRUE(srv->objects.objects_vector[1].roi.x_offset > 310 && - srv->objects.objects_vector[1].roi.x_offset < 785 && - srv->objects.objects_vector[1].roi.y_offset > 225 && - srv->objects.objects_vector[1].roi.y_offset < 460); - EXPECT_TRUE(srv->objects.objects_vector[2].roi.x_offset > 195 && - srv->objects.objects_vector[2].roi.x_offset < 405 && - srv->objects.objects_vector[2].roi.y_offset > 220 && - srv->objects.objects_vector[2].roi.y_offset < 345); + srv->objects.objects_vector[0].roi.x_offset < 1720 && srv->objects.objects_vector[0].roi.y_offset > 215 && + srv->objects.objects_vector[0].roi.y_offset < 480); + EXPECT_TRUE(srv->objects.objects_vector[1].roi.x_offset > 310 && srv->objects.objects_vector[1].roi.x_offset < 785 && + srv->objects.objects_vector[1].roi.y_offset > 225 && srv->objects.objects_vector[1].roi.y_offset < 460); + EXPECT_TRUE(srv->objects.objects_vector[2].roi.x_offset > 195 && srv->objects.objects_vector[2].roi.x_offset < 405 && + srv->objects.objects_vector[2].roi.y_offset > 220 && srv->objects.objects_vector[2].roi.y_offset < 345); } -int main(int argc, char ** argv) +int main(int argc, char** argv) { rclcpp::init(argc, argv); testing::InitGoogleTest(&argc, argv); auto offset = std::chrono::seconds(20); - system("ros2 launch dynamic_vino_test image_object_service_test.launch.py &"); + system("ros2 launch openvino_test image_object_service_test.launch.py &"); rclcpp::sleep_for(offset); int ret = RUN_ALL_TESTS(); system("killall -s SIGINT image_object_server &"); diff --git a/tests/src/service/unittest_peopleService.cpp b/tests/src/service/unittest_peopleService.cpp index 7e944ecc..69ff2bb2 100644 --- a/tests/src/service/unittest_peopleService.cpp +++ b/tests/src/service/unittest_peopleService.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2017 Intel Corporation. All Rights Reserved +// Copyright (c) 2017-2022 Intel Corporation. All Rights Reserved // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,10 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include -#include +#include +#include #include -#include +#include #include #include #include @@ -24,7 +24,7 @@ #include #include -#include "dynamic_vino_lib/services/frame_processing_server.hpp" +#include "openvino_wrapper_lib/services/frame_processing_server.hpp" std::string generate_file_path(std::string path) { @@ -38,11 +38,11 @@ TEST(UnitTestPeople, testPeople) { auto node = rclcpp::Node::make_shared("openvino_people_service_test"); - auto client = node->create_client("/openvino_toolkit/service"); - + auto client = node->create_client("/openvino_toolkit/service"); + ASSERT_TRUE(client->wait_for_service(std::chrono::seconds(20))); - - auto request = std::make_shared(); + + auto request = std::make_shared(); std::string buffer = generate_file_path("data/images/team.jpg"); std::cout << buffer << std::endl; @@ -50,9 +50,7 @@ TEST(UnitTestPeople, testPeople) auto result = client->async_send_request(request); - ASSERT_EQ( - rclcpp::FutureReturnCode::SUCCESS, - rclcpp::spin_until_future_complete(node, result)); + ASSERT_EQ(rclcpp::FutureReturnCode::SUCCESS, rclcpp::spin_until_future_complete(node, result)); auto srv = result.get(); @@ -66,12 +64,12 @@ TEST(UnitTestPeople, testPeople) } } -int main(int argc, char ** argv) +int main(int argc, char** argv) { rclcpp::init(argc, argv); testing::InitGoogleTest(&argc, argv); auto offset = std::chrono::seconds(20); - system("ros2 launch dynamic_vino_test image_people_service_test.launch.py &"); + system("ros2 launch openvino_test image_people_service_test.launch.py &"); rclcpp::sleep_for(offset); int ret = RUN_ALL_TESTS(); system("killall -s SIGINT image_people_server &"); diff --git a/tests/src/topic/unittest_faceDetectionCheck.cpp b/tests/src/topic/unittest_faceDetectionCheck.cpp index 95fc2118..4ff23634 100644 --- a/tests/src/topic/unittest_faceDetectionCheck.cpp +++ b/tests/src/topic/unittest_faceDetectionCheck.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,13 +14,13 @@ #include #include -#include -#include +#include +#include #include -#include -#include +#include +#include #include -#include +#include #include #include @@ -37,10 +37,10 @@ #include #include -#include "dynamic_vino_lib/pipeline.hpp" -#include "dynamic_vino_lib/pipeline_manager.hpp" -#include "dynamic_vino_lib/slog.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" +#include "openvino_wrapper_lib/pipeline_manager.hpp" +#include "openvino_wrapper_lib/slog.hpp" +#include "openvino/openvino.hpp" #include "librealsense2/rs.hpp" #include "opencv2/opencv.hpp" @@ -49,21 +49,18 @@ static bool emotion_test_pass = false; static bool ageGender_test_pass = false; static bool headPose_test_pass = false; -template -void wait_for_future( - rclcpp::Executor & executor, std::shared_future & future, - const DurationT & timeout) +template +void wait_for_future(rclcpp::Executor& executor, std::shared_future& future, const DurationT& timeout) { using rclcpp::FutureReturnCode; rclcpp::FutureReturnCode future_ret; auto start_time = std::chrono::steady_clock::now(); future_ret = executor.spin_until_future_complete(future, timeout); auto elapsed_time = std::chrono::steady_clock::now() - start_time; - EXPECT_EQ(FutureReturnCode::SUCCESS, future_ret) << - "the usb camera don't publish data to topic\n" << - "future failed to be set after: " << - std::chrono::duration_cast(elapsed_time).count() << - " milliseconds\n"; + EXPECT_EQ(FutureReturnCode::SUCCESS, future_ret) + << "the usb camera don't publish data to topic\n" + << "future failed to be set after: " + << std::chrono::duration_cast(elapsed_time).count() << " milliseconds\n"; } TEST(UnitTestFaceDetection, testFaceDetection) @@ -73,18 +70,17 @@ TEST(UnitTestFaceDetection, testFaceDetection) std::promise sub_called; std::shared_future sub_called_future(sub_called.get_future()); - auto openvino_faceDetection_callback = - [&sub_called](const object_msgs::msg::ObjectsInBoxes::SharedPtr msg) -> void { - face_test_pass = true; - sub_called.set_value(true); - }; + auto openvino_faceDetection_callback = [&sub_called](const object_msgs::msg::ObjectsInBoxes::SharedPtr msg) -> void { + face_test_pass = true; + sub_called.set_value(true); + }; rclcpp::executors::SingleThreadedExecutor executor; executor.add_node(node); { - auto sub1 = node->create_subscription( - "/ros2_openvino_toolkit/face_detection", qos, openvino_faceDetection_callback); + auto sub1 = node->create_subscription("/ros2_openvino_toolkit/face_detection", + qos, openvino_faceDetection_callback); executor.spin_once(std::chrono::seconds(0)); @@ -102,17 +98,17 @@ TEST(UnitTestFaceDetection, testEmotionDetection) std::shared_future sub_called_future(sub_called.get_future()); auto openvino_emotionRecognition_callback = - [&sub_called](const people_msgs::msg::EmotionsStamped::SharedPtr msg) -> void { - emotion_test_pass = true; - sub_called.set_value(true); - }; + [&sub_called](const object_msgs::msg::EmotionsStamped::SharedPtr msg) -> void { + emotion_test_pass = true; + sub_called.set_value(true); + }; rclcpp::executors::SingleThreadedExecutor executor; executor.add_node(node); { - auto sub2 = node->create_subscription( - "/ros2_openvino_toolkit/emotions_recognition", qos, openvino_emotionRecognition_callback); + auto sub2 = node->create_subscription( + "/ros2_openvino_toolkit/emotions_recognition", qos, openvino_emotionRecognition_callback); executor.spin_once(std::chrono::seconds(0)); @@ -129,18 +125,17 @@ TEST(UnitTestFaceDetection, testageGenderDetection) std::promise sub_called; std::shared_future sub_called_future(sub_called.get_future()); - auto openvino_ageGender_callback = - [&sub_called](const people_msgs::msg::AgeGenderStamped::SharedPtr msg) -> void { - ageGender_test_pass = true; - sub_called.set_value(true); - }; + auto openvino_ageGender_callback = [&sub_called](const object_msgs::msg::AgeGenderStamped::SharedPtr msg) -> void { + ageGender_test_pass = true; + sub_called.set_value(true); + }; rclcpp::executors::SingleThreadedExecutor executor; executor.add_node(node); { - auto sub3 = node->create_subscription( - "/ros2_openvino_toolkit/age_genders_Recognition", qos, openvino_ageGender_callback); + auto sub3 = node->create_subscription( + "/ros2_openvino_toolkit/age_genders_Recognition", qos, openvino_ageGender_callback); executor.spin_once(std::chrono::seconds(0)); @@ -157,18 +152,17 @@ TEST(UnitTestFaceDetection, testheadPoseDetection) std::promise sub_called; std::shared_future sub_called_future(sub_called.get_future()); - auto openvino_headPose_callback = - [&sub_called](const people_msgs::msg::HeadPoseStamped::SharedPtr msg) -> void { - headPose_test_pass = true; - sub_called.set_value(true); - }; + auto openvino_headPose_callback = [&sub_called](const object_msgs::msg::HeadPoseStamped::SharedPtr msg) -> void { + headPose_test_pass = true; + sub_called.set_value(true); + }; rclcpp::executors::SingleThreadedExecutor executor; executor.add_node(node); { - auto sub4 = node->create_subscription( - "/ros2_openvino_toolkit/headposes_estimation", qos, openvino_headPose_callback); + auto sub4 = node->create_subscription( + "/ros2_openvino_toolkit/headposes_estimation", qos, openvino_headPose_callback); executor.spin_once(std::chrono::seconds(0)); @@ -178,12 +172,12 @@ TEST(UnitTestFaceDetection, testheadPoseDetection) } } -int main(int argc, char * argv[]) +int main(int argc, char* argv[]) { testing::InitGoogleTest(&argc, argv); rclcpp::init(argc, argv); auto offset = std::chrono::seconds(30); - system("ros2 launch dynamic_vino_test pipeline_face_test.launch.py &"); + system("ros2 launch openvino_test pipeline_face_test.launch.py &"); int ret = RUN_ALL_TESTS(); rclcpp::sleep_for(offset); system("killall -s SIGINT pipeline_with_params &"); diff --git a/tests/src/topic/unittest_face_reidentification.cpp b/tests/src/topic/unittest_face_reidentification.cpp index 395e1bb4..1e8fd402 100644 --- a/tests/src/topic/unittest_face_reidentification.cpp +++ b/tests/src/topic/unittest_face_reidentification.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,12 +14,12 @@ #include #include -#include -#include -#include -#include +#include +#include +#include +#include #include -#include +#include #include #include @@ -36,10 +36,10 @@ #include #include -#include "dynamic_vino_lib/pipeline.hpp" -#include "dynamic_vino_lib/pipeline_manager.hpp" -#include "dynamic_vino_lib/slog.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" +#include "openvino_wrapper_lib/pipeline_manager.hpp" +#include "openvino_wrapper_lib/slog.hpp" +#include "openvino/openvino.hpp" #include "librealsense2/rs.hpp" #include "opencv2/opencv.hpp" @@ -48,21 +48,18 @@ static bool face_detection = false; static bool landmark_detection = false; static bool test_pass = false; -template -void wait_for_future( - rclcpp::Executor & executor, std::shared_future & future, - const DurationT & timeout) +template +void wait_for_future(rclcpp::Executor& executor, std::shared_future& future, const DurationT& timeout) { using rclcpp::FutureReturnCode; rclcpp::FutureReturnCode future_ret; auto start_time = std::chrono::steady_clock::now(); future_ret = executor.spin_until_future_complete(future, timeout); auto elapsed_time = std::chrono::steady_clock::now() - start_time; - EXPECT_EQ(FutureReturnCode::SUCCESS, future_ret) << - "the usb camera don't publish data to topic\n" << - "future failed to be set after: " << - std::chrono::duration_cast(elapsed_time).count() << - " milliseconds\n"; + EXPECT_EQ(FutureReturnCode::SUCCESS, future_ret) + << "the usb camera don't publish data to topic\n" + << "future failed to be set after: " + << std::chrono::duration_cast(elapsed_time).count() << " milliseconds\n"; } TEST(UnitTestFaceReidentification, testFaceDetection) @@ -72,18 +69,17 @@ TEST(UnitTestFaceReidentification, testFaceDetection) std::promise sub_called; std::shared_future sub_called_future(sub_called.get_future()); - auto openvino_face_detection_callback = - [&sub_called](const object_msgs::msg::ObjectsInBoxes::SharedPtr msg) -> void { - face_detection = true; - sub_called.set_value(true); - }; + auto openvino_face_detection_callback = [&sub_called](const object_msgs::msg::ObjectsInBoxes::SharedPtr msg) -> void { + face_detection = true; + sub_called.set_value(true); + }; rclcpp::executors::SingleThreadedExecutor executor; executor.add_node(node); { auto sub1 = node->create_subscription( - "/ros2_openvino_toolkit/face_detection", rclcpp::QoS(1), openvino_face_detection_callback); + "/ros2_openvino_toolkit/face_detection", rclcpp::QoS(1), openvino_face_detection_callback); executor.spin_once(std::chrono::seconds(0)); @@ -100,18 +96,18 @@ TEST(UnitTestFaceReidentification, testLandmarkDetection) std::shared_future sub_called_future(sub_called.get_future()); auto openvino_landmark_detection_callback = - [&sub_called](const people_msgs::msg::LandmarkStamped::SharedPtr msg) -> void { - if(msg->landmarks.size() > 0) - landmark_detection = true; - sub_called.set_value(true); - }; + [&sub_called](const object_msgs::msg::LandmarkStamped::SharedPtr msg) -> void { + if (msg->landmarks.size() > 0) + landmark_detection = true; + sub_called.set_value(true); + }; rclcpp::executors::SingleThreadedExecutor executor; executor.add_node(node); { - auto sub1 = node->create_subscription( - "/ros2_openvino_toolkit/detected_landmarks", qos, openvino_landmark_detection_callback); + auto sub1 = node->create_subscription( + "/ros2_openvino_toolkit/detected_landmarks", qos, openvino_landmark_detection_callback); executor.spin_once(std::chrono::seconds(0)); @@ -128,18 +124,18 @@ TEST(UnitTestFaceReidentification, testReidentification) std::shared_future sub_called_future(sub_called.get_future()); auto openvino_face_reidentification_callback = - [&sub_called](const people_msgs::msg::ReidentificationStamped::SharedPtr msg) -> void { - if(msg->reidentified_vector.size() > 0) - test_pass = true; - sub_called.set_value(true); - }; + [&sub_called](const object_msgs::msg::ReidentificationStamped::SharedPtr msg) -> void { + if (msg->reidentified_vector.size() > 0) + test_pass = true; + sub_called.set_value(true); + }; rclcpp::executors::SingleThreadedExecutor executor; executor.add_node(node); { - auto sub1 = node->create_subscription( - "/ros2_openvino_toolkit/reidentified_faces", qos, openvino_face_reidentification_callback); + auto sub1 = node->create_subscription( + "/ros2_openvino_toolkit/reidentified_faces", qos, openvino_face_reidentification_callback); executor.spin_once(std::chrono::seconds(0)); @@ -149,12 +145,12 @@ TEST(UnitTestFaceReidentification, testReidentification) } } -int main(int argc, char * argv[]) +int main(int argc, char* argv[]) { testing::InitGoogleTest(&argc, argv); rclcpp::init(argc, argv); auto offset = std::chrono::seconds(30); - system("ros2 launch dynamic_vino_test pipeline_face_reidentification_test.launch.py &"); + system("ros2 launch openvino_test pipeline_face_reidentification_test.launch.py &"); int ret = RUN_ALL_TESTS(); rclcpp::sleep_for(offset); system("killall -s SIGINT pipeline_with_params &"); diff --git a/tests/src/topic/unittest_imageCheck.cpp b/tests/src/topic/unittest_imageCheck.cpp index 7f4a333a..5ef8091f 100644 --- a/tests/src/topic/unittest_imageCheck.cpp +++ b/tests/src/topic/unittest_imageCheck.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,13 +14,13 @@ #include #include -#include -#include +#include +#include #include -#include -#include +#include +#include #include -#include +#include #include #include @@ -37,10 +37,10 @@ #include #include -#include "dynamic_vino_lib/pipeline.hpp" -#include "dynamic_vino_lib/pipeline_manager.hpp" -#include "dynamic_vino_lib/slog.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" +#include "openvino_wrapper_lib/pipeline_manager.hpp" +#include "openvino_wrapper_lib/slog.hpp" +#include "openvino/openvino.hpp" #include "librealsense2/rs.hpp" #include "opencv2/opencv.hpp" @@ -49,21 +49,18 @@ static bool emotion_test_pass = false; static bool ageGender_test_pass = false; static bool headPose_test_pass = false; -template -void wait_for_future( - rclcpp::Executor & executor, std::shared_future & future, - const DurationT & timeout) +template +void wait_for_future(rclcpp::Executor& executor, std::shared_future& future, const DurationT& timeout) { using rclcpp::FutureReturnCode; rclcpp::FutureReturnCode future_ret; auto start_time = std::chrono::steady_clock::now(); future_ret = executor.spin_until_future_complete(future, timeout); auto elapsed_time = std::chrono::steady_clock::now() - start_time; - EXPECT_EQ(FutureReturnCode::SUCCESS, future_ret) << - "the usb camera don't publish data to topic\n" << - "future failed to be set after: " << - std::chrono::duration_cast(elapsed_time).count() << - " milliseconds\n"; + EXPECT_EQ(FutureReturnCode::SUCCESS, future_ret) + << "the usb camera don't publish data to topic\n" + << "future failed to be set after: " + << std::chrono::duration_cast(elapsed_time).count() << " milliseconds\n"; } TEST(UnitTestFaceDetection, testFaceDetection) @@ -73,18 +70,17 @@ TEST(UnitTestFaceDetection, testFaceDetection) std::promise sub_called; std::shared_future sub_called_future(sub_called.get_future()); - auto openvino_faceDetection_callback = - [&sub_called](const object_msgs::msg::ObjectsInBoxes::SharedPtr msg) -> void { - face_test_pass = true; - sub_called.set_value(true); - }; + auto openvino_faceDetection_callback = [&sub_called](const object_msgs::msg::ObjectsInBoxes::SharedPtr msg) -> void { + face_test_pass = true; + sub_called.set_value(true); + }; rclcpp::executors::SingleThreadedExecutor executor; executor.add_node(node); { - auto sub1 = node->create_subscription( - "/ros2_openvino_toolkit/face_detection", qos, openvino_faceDetection_callback); + auto sub1 = node->create_subscription("/ros2_openvino_toolkit/face_detection", + qos, openvino_faceDetection_callback); executor.spin_once(std::chrono::seconds(0)); @@ -102,17 +98,17 @@ TEST(UnitTestFaceDetection, testEmotionDetection) std::shared_future sub_called_future(sub_called.get_future()); auto openvino_emotionRecognition_callback = - [&sub_called](const people_msgs::msg::EmotionsStamped::SharedPtr msg) -> void { - emotion_test_pass = true; - sub_called.set_value(true); - }; + [&sub_called](const object_msgs::msg::EmotionsStamped::SharedPtr msg) -> void { + emotion_test_pass = true; + sub_called.set_value(true); + }; rclcpp::executors::SingleThreadedExecutor executor; executor.add_node(node); { - auto sub2 = node->create_subscription( - "/ros2_openvino_toolkit/emotions_recognition", qos, openvino_emotionRecognition_callback); + auto sub2 = node->create_subscription( + "/ros2_openvino_toolkit/emotions_recognition", qos, openvino_emotionRecognition_callback); executor.spin_once(std::chrono::seconds(0)); @@ -129,18 +125,17 @@ TEST(UnitTestFaceDetection, testageGenderDetection) std::promise sub_called; std::shared_future sub_called_future(sub_called.get_future()); - auto openvino_ageGender_callback = - [&sub_called](const people_msgs::msg::AgeGenderStamped::SharedPtr msg) -> void { - ageGender_test_pass = true; - sub_called.set_value(true); - }; + auto openvino_ageGender_callback = [&sub_called](const object_msgs::msg::AgeGenderStamped::SharedPtr msg) -> void { + ageGender_test_pass = true; + sub_called.set_value(true); + }; rclcpp::executors::SingleThreadedExecutor executor; executor.add_node(node); { - auto sub3 = node->create_subscription( - "/ros2_openvino_toolkit/age_genders_Recognition", qos, openvino_ageGender_callback); + auto sub3 = node->create_subscription( + "/ros2_openvino_toolkit/age_genders_Recognition", qos, openvino_ageGender_callback); executor.spin_once(std::chrono::seconds(0)); @@ -157,18 +152,17 @@ TEST(UnitTestFaceDetection, testheadPoseDetection) std::promise sub_called; std::shared_future sub_called_future(sub_called.get_future()); - auto openvino_headPose_callback = - [&sub_called](const people_msgs::msg::HeadPoseStamped::SharedPtr msg) -> void { - headPose_test_pass = true; - sub_called.set_value(true); - }; + auto openvino_headPose_callback = [&sub_called](const object_msgs::msg::HeadPoseStamped::SharedPtr msg) -> void { + headPose_test_pass = true; + sub_called.set_value(true); + }; rclcpp::executors::SingleThreadedExecutor executor; executor.add_node(node); { - auto sub4 = node->create_subscription( - "/ros2_openvino_toolkit/headposes_estimation", qos, openvino_headPose_callback); + auto sub4 = node->create_subscription( + "/ros2_openvino_toolkit/headposes_estimation", qos, openvino_headPose_callback); executor.spin_once(std::chrono::seconds(0)); @@ -178,12 +172,12 @@ TEST(UnitTestFaceDetection, testheadPoseDetection) } } -int main(int argc, char * argv[]) +int main(int argc, char* argv[]) { testing::InitGoogleTest(&argc, argv); rclcpp::init(argc, argv); auto offset = std::chrono::seconds(30); - system("ros2 launch dynamic_vino_test pipeline_image_test.launch.py &"); + system("ros2 launch openvino_test pipeline_image_test.launch.py &"); int ret = RUN_ALL_TESTS(); rclcpp::sleep_for(offset); system("killall -s SIGINT pipeline_with_params &"); diff --git a/tests/src/topic/unittest_objectDetectionCheck.cpp b/tests/src/topic/unittest_objectDetectionCheck.cpp index ad5facf2..8307c9db 100644 --- a/tests/src/topic/unittest_objectDetectionCheck.cpp +++ b/tests/src/topic/unittest_objectDetectionCheck.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include @@ -32,30 +32,27 @@ #include #include -#include "dynamic_vino_lib/pipeline.hpp" -#include "dynamic_vino_lib/pipeline_manager.hpp" -#include "dynamic_vino_lib/slog.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" +#include "openvino_wrapper_lib/pipeline_manager.hpp" +#include "openvino_wrapper_lib/slog.hpp" +#include "openvino/openvino.hpp" #include "librealsense2/rs.hpp" #include "opencv2/opencv.hpp" static bool test_pass = false; -template -void wait_for_future( - rclcpp::Executor & executor, std::shared_future & future, - const DurationT & timeout) +template +void wait_for_future(rclcpp::Executor& executor, std::shared_future& future, const DurationT& timeout) { using rclcpp::FutureReturnCode; rclcpp::FutureReturnCode future_ret; auto start_time = std::chrono::steady_clock::now(); future_ret = executor.spin_until_future_complete(future, timeout); auto elapsed_time = std::chrono::steady_clock::now() - start_time; - EXPECT_EQ(FutureReturnCode::SUCCESS, future_ret) << - "the usb camera don't publish data to topic\n" << - "future failed to be set after: " << - std::chrono::duration_cast(elapsed_time).count() << - " milliseconds\n"; + EXPECT_EQ(FutureReturnCode::SUCCESS, future_ret) + << "the usb camera don't publish data to topic\n" + << "future failed to be set after: " + << std::chrono::duration_cast(elapsed_time).count() << " milliseconds\n"; } TEST(UnitTestObjectDetection, testObjectDetection) @@ -65,18 +62,17 @@ TEST(UnitTestObjectDetection, testObjectDetection) std::promise sub_called; std::shared_future sub_called_future(sub_called.get_future()); - auto openvino_faceDetection_callback = - [&sub_called](const object_msgs::msg::ObjectsInBoxes::SharedPtr msg) -> void { - test_pass = true; - sub_called.set_value(true); - }; + auto openvino_faceDetection_callback = [&sub_called](const object_msgs::msg::ObjectsInBoxes::SharedPtr msg) -> void { + test_pass = true; + sub_called.set_value(true); + }; rclcpp::executors::SingleThreadedExecutor executor; executor.add_node(node); { - auto sub1 = node->create_subscription( - "/ros2_openvino_toolkit/detected_objects", qos, openvino_faceDetection_callback); + auto sub1 = node->create_subscription("/ros2_openvino_toolkit/detected_objects", + qos, openvino_faceDetection_callback); executor.spin_once(std::chrono::seconds(0)); @@ -86,12 +82,12 @@ TEST(UnitTestObjectDetection, testObjectDetection) } } -int main(int argc, char * argv[]) +int main(int argc, char* argv[]) { testing::InitGoogleTest(&argc, argv); rclcpp::init(argc, argv); auto offset = std::chrono::seconds(60); - system("ros2 launch dynamic_vino_test pipeline_object_test.launch.py &"); + system("ros2 launch openvino_test pipeline_object_test.launch.py &"); int ret = RUN_ALL_TESTS(); rclcpp::sleep_for(offset); system("killall -s SIGINT pipeline_with_params &"); diff --git a/tests/src/topic/unittest_reidentification.cpp b/tests/src/topic/unittest_reidentification.cpp index 05c10b11..abf4857f 100644 --- a/tests/src/topic/unittest_reidentification.cpp +++ b/tests/src/topic/unittest_reidentification.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,10 +14,10 @@ #include #include -#include -#include +#include +#include #include -#include +#include #include #include @@ -34,31 +34,28 @@ #include #include -#include "dynamic_vino_lib/pipeline.hpp" -#include "dynamic_vino_lib/pipeline_manager.hpp" -#include "dynamic_vino_lib/slog.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" +#include "openvino_wrapper_lib/pipeline_manager.hpp" +#include "openvino_wrapper_lib/slog.hpp" +#include "openvino/openvino.hpp" #include "librealsense2/rs.hpp" #include "opencv2/opencv.hpp" #define MAX_SIZE 300 static bool test_pass = false; -template -void wait_for_future( - rclcpp::Executor & executor, std::shared_future & future, - const DurationT & timeout) +template +void wait_for_future(rclcpp::Executor& executor, std::shared_future& future, const DurationT& timeout) { using rclcpp::FutureReturnCode; rclcpp::FutureReturnCode future_ret; auto start_time = std::chrono::steady_clock::now(); future_ret = executor.spin_until_future_complete(future, timeout); auto elapsed_time = std::chrono::steady_clock::now() - start_time; - EXPECT_EQ(FutureReturnCode::SUCCESS, future_ret) << - "the usb camera don't publish data to topic\n" << - "future failed to be set after: " << - std::chrono::duration_cast(elapsed_time).count() << - " milliseconds\n"; + EXPECT_EQ(FutureReturnCode::SUCCESS, future_ret) + << "the usb camera don't publish data to topic\n" + << "future failed to be set after: " + << std::chrono::duration_cast(elapsed_time).count() << " milliseconds\n"; } TEST(UnitTestPersonReidentification, testReidentification) @@ -69,17 +66,17 @@ TEST(UnitTestPersonReidentification, testReidentification) std::shared_future sub_called_future(sub_called.get_future()); auto openvino_reidentification_callback = - [&sub_called](const people_msgs::msg::ReidentificationStamped::SharedPtr msg) -> void { - test_pass = true; - sub_called.set_value(true); - }; + [&sub_called](const object_msgs::msg::ReidentificationStamped::SharedPtr msg) -> void { + test_pass = true; + sub_called.set_value(true); + }; rclcpp::executors::SingleThreadedExecutor executor; executor.add_node(node); { - auto sub1 = node->create_subscription( - "/ros2_openvino_toolkit/reidentified_persons", qos, openvino_reidentification_callback); + auto sub1 = node->create_subscription( + "/ros2_openvino_toolkit/reidentified_persons", qos, openvino_reidentification_callback); executor.spin_once(std::chrono::seconds(0)); @@ -89,12 +86,12 @@ TEST(UnitTestPersonReidentification, testReidentification) } } -int main(int argc, char * argv[]) +int main(int argc, char* argv[]) { testing::InitGoogleTest(&argc, argv); rclcpp::init(argc, argv); auto offset = std::chrono::seconds(30); - system("ros2 launch dynamic_vino_test pipeline_reidentification_test.launch.py &"); + system("ros2 launch openvino_test pipeline_reidentification_test.launch.py &"); int ret = RUN_ALL_TESTS(); system("killall -s SIGINT pipeline_with_params &"); rclcpp::shutdown(); diff --git a/tests/src/topic/unittest_segmentationCheck.cpp b/tests/src/topic/unittest_segmentationCheck.cpp index 52d6e278..68996709 100644 --- a/tests/src/topic/unittest_segmentationCheck.cpp +++ b/tests/src/topic/unittest_segmentationCheck.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,9 +14,9 @@ #include #include -#include +#include #include -#include +#include #include #include @@ -33,30 +33,27 @@ #include #include -#include "dynamic_vino_lib/pipeline.hpp" -#include "dynamic_vino_lib/pipeline_manager.hpp" -#include "dynamic_vino_lib/slog.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" +#include "openvino_wrapper_lib/pipeline_manager.hpp" +#include "openvino_wrapper_lib/slog.hpp" +#include "openvino/openvino.hpp" #include "librealsense2/rs.hpp" #include "opencv2/opencv.hpp" static bool test_pass = false; -template -void wait_for_future( - rclcpp::Executor & executor, std::shared_future & future, - const DurationT & timeout) +template +void wait_for_future(rclcpp::Executor& executor, std::shared_future& future, const DurationT& timeout) { using rclcpp::FutureReturnCode; rclcpp::FutureReturnCode future_ret; auto start_time = std::chrono::steady_clock::now(); future_ret = executor.spin_until_future_complete(future, timeout); auto elapsed_time = std::chrono::steady_clock::now() - start_time; - EXPECT_EQ(FutureReturnCode::SUCCESS, future_ret) << - "the usb camera don't publish data to topic\n" << - "future failed to be set after: " << - std::chrono::duration_cast(elapsed_time).count() << - " milliseconds\n"; + EXPECT_EQ(FutureReturnCode::SUCCESS, future_ret) + << "the usb camera don't publish data to topic\n" + << "future failed to be set after: " + << std::chrono::duration_cast(elapsed_time).count() << " milliseconds\n"; } TEST(UnitTestObjectDetection, testObjectDetection) @@ -66,18 +63,17 @@ TEST(UnitTestObjectDetection, testObjectDetection) std::promise sub_called; std::shared_future sub_called_future(sub_called.get_future()); - auto openvino_faceDetection_callback = - [&sub_called](const people_msgs::msg::ObjectsInMasks::SharedPtr msg) -> void { - test_pass = true; - sub_called.set_value(true); - }; + auto openvino_faceDetection_callback = [&sub_called](const object_msgs::msg::ObjectsInMasks::SharedPtr msg) -> void { + test_pass = true; + sub_called.set_value(true); + }; rclcpp::executors::SingleThreadedExecutor executor; executor.add_node(node); { - auto sub1 = node->create_subscription( - "/ros2_openvino_toolkit/segmented_obejcts", qos, openvino_faceDetection_callback); + auto sub1 = node->create_subscription("/ros2_openvino_toolkit/segmented_obejcts", + qos, openvino_faceDetection_callback); executor.spin_once(std::chrono::seconds(0)); @@ -87,12 +83,12 @@ TEST(UnitTestObjectDetection, testObjectDetection) } } -int main(int argc, char * argv[]) +int main(int argc, char* argv[]) { testing::InitGoogleTest(&argc, argv); rclcpp::init(argc, argv); auto offset = std::chrono::seconds(60); - system("ros2 launch dynamic_vino_test pipeline_segmentation_test.launch.py &"); + system("ros2 launch openvino_test pipeline_segmentation_test.launch.py &"); int ret = RUN_ALL_TESTS(); rclcpp::sleep_for(offset); system("killall -s SIGINT pipeline_with_params &"); diff --git a/tests/src/topic/unittest_vehicleDetectionCheck.cpp b/tests/src/topic/unittest_vehicleDetectionCheck.cpp index e325ba31..cc37175a 100644 --- a/tests/src/topic/unittest_vehicleDetectionCheck.cpp +++ b/tests/src/topic/unittest_vehicleDetectionCheck.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,12 +14,12 @@ #include #include -#include -#include -#include -#include +#include +#include +#include +#include #include -#include +#include #include #include @@ -36,31 +36,28 @@ #include #include -#include "dynamic_vino_lib/pipeline.hpp" -#include "dynamic_vino_lib/pipeline_manager.hpp" -#include "dynamic_vino_lib/slog.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" +#include "openvino_wrapper_lib/pipeline_manager.hpp" +#include "openvino_wrapper_lib/slog.hpp" +#include "openvino/openvino.hpp" #include "librealsense2/rs.hpp" #include "opencv2/opencv.hpp" #define MAX_SIZE 300 static bool test_pass = false; -template -void wait_for_future( - rclcpp::Executor & executor, std::shared_future & future, - const DurationT & timeout) +template +void wait_for_future(rclcpp::Executor& executor, std::shared_future& future, const DurationT& timeout) { using rclcpp::FutureReturnCode; rclcpp::FutureReturnCode future_ret; auto start_time = std::chrono::steady_clock::now(); future_ret = executor.spin_until_future_complete(future, timeout); auto elapsed_time = std::chrono::steady_clock::now() - start_time; - EXPECT_EQ(FutureReturnCode::SUCCESS, future_ret) << - "the usb camera don't publish data to topic\n" << - "future failed to be set after: " << - std::chrono::duration_cast(elapsed_time).count() << - " milliseconds\n"; + EXPECT_EQ(FutureReturnCode::SUCCESS, future_ret) + << "the usb camera don't publish data to topic\n" + << "future failed to be set after: " + << std::chrono::duration_cast(elapsed_time).count() << " milliseconds\n"; } TEST(UnitTestPersonReidentification, testReidentification) @@ -70,18 +67,17 @@ TEST(UnitTestPersonReidentification, testReidentification) std::promise sub_called; std::shared_future sub_called_future(sub_called.get_future()); - auto openvino_vehicle_callback = - [&sub_called](const people_msgs::msg::LicensePlateStamped::SharedPtr msg) -> void { - test_pass = true; - sub_called.set_value(true); - }; + auto openvino_vehicle_callback = [&sub_called](const object_msgs::msg::LicensePlateStamped::SharedPtr msg) -> void { + test_pass = true; + sub_called.set_value(true); + }; rclcpp::executors::SingleThreadedExecutor executor; executor.add_node(node); { - auto sub1 = node->create_subscription( - "/ros2_openvino_toolkit/detected_license_plates", qos, openvino_vehicle_callback); + auto sub1 = node->create_subscription( + "/ros2_openvino_toolkit/detected_license_plates", qos, openvino_vehicle_callback); executor.spin_once(std::chrono::seconds(0)); @@ -91,12 +87,12 @@ TEST(UnitTestPersonReidentification, testReidentification) } } -int main(int argc, char * argv[]) +int main(int argc, char* argv[]) { testing::InitGoogleTest(&argc, argv); rclcpp::init(argc, argv); auto offset = std::chrono::seconds(30); - system("ros2 launch dynamic_vino_test pipeline_vehicle_detection_test.launch.py &"); + system("ros2 launch openvino_test pipeline_vehicle_detection_test.launch.py &"); int ret = RUN_ALL_TESTS(); system("killall -s SIGINT pipeline_with_params &"); rclcpp::shutdown(); diff --git a/vino_param_lib/include/vino_param_lib/slog.hpp b/vino_param_lib/include/vino_param_lib/slog.hpp deleted file mode 120000 index f8aaab99..00000000 --- a/vino_param_lib/include/vino_param_lib/slog.hpp +++ /dev/null @@ -1 +0,0 @@ -../../../dynamic_vino_lib/include/dynamic_vino_lib/slog.hpp \ No newline at end of file