aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorzongwave <wei.zong@intel.com>2019-04-01 09:21:50 +0800
committerZong Wei <wei.zong@intel.com>2019-04-01 18:24:54 +0800
commit2354fd550437a6a017439ce15e46c1c2aa7ab4a1 (patch)
tree4fa2fcde16e49d70d8fc58c3a6a7e42edb71821e
parentcf02947a61da589d5fca5f0d1677a0ab276a88d3 (diff)
downloadlibxcam-2354fd550437a6a017439ce15e46c1c2aa7ab4a1.tar.gz
dnn: abstract dnn inference engine class and derive object detection class
* clone openvino inference engine git clone https://github.com/opencv/dldt.git * build openvino inference engien https://github.com/opencv/dldt/blob/2018/inference-engine/README.md * export openvino path to environment varibles $ export OPENVINO_IE_INC_PATH=${OPENVINO_INFERENCE_ENGINE_DIR} $ export OPENVINO_IE_LIBS_PATH=${OPENVINO_INFERENCE_ENGINE_DIR}/bin/intel64/Release/lib # NOTE: Fill in the ${OPENVINO_INFERENCE_ENGINE_DIR} - e.g. /home/usr/openvino/dldt/inference-engine/ * test command $ ./test-dnn-inference --input cars_on_road.jpg --model-file vehicle-detection-adas-0002.xml
-rw-r--r--configure.ac8
-rw-r--r--modules/dnn/Makefile.am10
-rwxr-xr-xmodules/dnn/dnn_inference_engine.cpp174
-rwxr-xr-xmodules/dnn/dnn_inference_engine.h25
-rw-r--r--modules/dnn/dnn_object_detection.cpp264
-rw-r--r--modules/dnn/dnn_object_detection.h53
-rw-r--r--tests/Makefile.am3
-rw-r--r--tests/test-dnn-inference.cpp3
8 files changed, 384 insertions, 156 deletions
diff --git a/configure.ac b/configure.ac
index 0922366..6c8c363 100644
--- a/configure.ac
+++ b/configure.ac
@@ -179,12 +179,12 @@ fi
ENABLE_DNN=0
if test "$enable_dnn" = "yes"; then
- if test -z $OPENVINO_IE_CXXFLAGS; then
- AC_MSG_WARN(Please export OPENVINO_IE_CXXFLAGS environment variables)
+ if test -z $OPENVINO_IE_INC_PATH; then
+ AC_MSG_WARN(Please export OPENVINO_IE_INC_PATH environment variables)
AC_MSG_ERROR(OpenVino inc path has not been set ... disable DNN module!)
fi
- if test -z $OPENVINO_IE_LIBS; then
- AC_MSG_WARN(Please export OPENVINO_IE_LIBS environment variables)
+ if test -z $OPENVINO_IE_LIBS_PATH; then
+ AC_MSG_WARN(Please export OPENVINO_IE_LIBS_PATH environment variables)
AC_MSG_ERROR(OpenVino library path has not been set ... disable DNN module!)
fi
ENABLE_DNN=1
diff --git a/modules/dnn/Makefile.am b/modules/dnn/Makefile.am
index b75bc5d..bcad5fd 100644
--- a/modules/dnn/Makefile.am
+++ b/modules/dnn/Makefile.am
@@ -4,12 +4,14 @@ XCAM_DNN_CXXFLAGS = \
$(XCAM_CXXFLAGS) \
-I$(top_srcdir)/xcore \
-I$(top_srcdir)/modules \
- $(OPENVINO_IE_CXXFLAGS) \
+ -I$(OPENVINO_IE_INC_PATH)/include \
+ -I$(OPENVINO_IE_INC_PATH)/src/extension \
+ -I$(OPENVINO_IE_INC_PATH)/samples/common/format_reader \
$(NULL)
XCAM_DNN_LIBS = \
$(top_builddir)/xcore/libxcam_core.la \
- -L$(OPENVINO_IE_LIBS) \
+ -L$(OPENVINO_IE_LIBS_PATH) \
-linference_engine \
-lclDNN64 \
-lclDNNPlugin \
@@ -20,6 +22,7 @@ XCAM_DNN_LIBS = \
xcam_dnn_sources = \
dnn_inference_engine.cpp \
+ dnn_object_detection.cpp \
$(NULL)
libxcam_dnn_la_SOURCES = \
@@ -36,13 +39,14 @@ libxcam_dnn_la_LIBADD = \
libxcam_dnn_la_LDFLAGS = \
$(XCAM_LT_LDFLAGS) \
- -Wl,-rpath,$(OPENVINO_IE_LIBS) \
+ -Wl,-rpath,$(OPENVINO_IE_LIBS_PATH) \
$(NULL)
libxcam_dnnincludedir = $(includedir)/xcam/dnn
nobase_libxcam_dnninclude_HEADERS = \
dnn_inference_engine.h \
+ dnn_object_detection.h \
$(NULL)
libxcam_dnn_la_LIBTOOLFLAGS = --tag=disable-static
diff --git a/modules/dnn/dnn_inference_engine.cpp b/modules/dnn/dnn_inference_engine.cpp
index 256b2d8..8453161 100755
--- a/modules/dnn/dnn_inference_engine.cpp
+++ b/modules/dnn/dnn_inference_engine.cpp
@@ -18,7 +18,11 @@
* Author: Zong Wei <wei.zong@intel.com>
*/
+#include <iomanip>
+
#include <format_reader_ptr.h>
+#include <ext_list.hpp>
+
#include "dnn_inference_engine.h"
using namespace std;
@@ -62,10 +66,8 @@ DnnInferenceEngine::create_model (DnnInferConfig& config)
_network_reader.ReadNetwork (get_filename_prefix (config.model_filename) + ".xml");
_network_reader.ReadWeights (get_filename_prefix (config.model_filename) + ".bin");
- // 2. Prepare inputs and outputs format
+ // 2. read network from model
_network = _network_reader.getNetwork ();
- _inputs_info = _network.getInputsInfo ();
- _outputs_info = _network.getOutputsInfo ();
// 3. Select Plugin - Select the plugin on which to load your network.
// 3.1. Create the plugin with the InferenceEngine::PluginDispatcher load helper class.
@@ -241,13 +243,14 @@ DnnInferenceEngine::set_input_presion (uint32_t idx, DnnInferPrecisionType preci
}
uint32_t id = 0;
+ InputsDataMap inputs_info (_network.getInputsInfo ());
- if (idx > _inputs_info.size ()) {
+ if (idx > inputs_info.size ()) {
XCAM_LOG_ERROR ("Input is out of range");
return XCAM_RETURN_ERROR_PARAM;
}
- for (auto & item : _inputs_info) {
+ for (auto & item : inputs_info) {
if (id == idx) {
Precision input_precision = convert_precision_type (precision);
item.second->setPrecision (input_precision);
@@ -268,13 +271,14 @@ DnnInferenceEngine::get_input_presion (uint32_t idx)
}
uint32_t id = 0;
+ InputsDataMap inputs_info (_network.getInputsInfo ());
- if (idx > _inputs_info.size ()) {
+ if (idx > inputs_info.size ()) {
XCAM_LOG_ERROR ("Input is out of range");
return DnnInferPrecisionUnspecified;
}
- /** Iterating over all input blobs **/
- for (auto & item : _inputs_info) {
+
+ for (auto & item : inputs_info) {
if (id == idx) {
Precision input_precision = item.second->getPrecision ();
return convert_precision_type (input_precision);
@@ -293,13 +297,14 @@ DnnInferenceEngine::set_output_presion (uint32_t idx, DnnInferPrecisionType prec
}
uint32_t id = 0;
+ OutputsDataMap outputs_info (_network.getOutputsInfo ());
- if (idx > _outputs_info.size ()) {
+ if (idx > outputs_info.size ()) {
XCAM_LOG_ERROR ("Output is out of range");
return XCAM_RETURN_ERROR_PARAM;
}
- for (auto & item : _outputs_info) {
+ for (auto & item : outputs_info) {
if (id == idx) {
Precision output_precision = convert_precision_type (precision);
item.second->setPrecision (output_precision);
@@ -320,13 +325,14 @@ DnnInferenceEngine::get_output_presion (uint32_t idx)
}
uint32_t id = 0;
+ OutputsDataMap outputs_info (_network.getOutputsInfo ());
- if (idx > _outputs_info.size ()) {
+ if (idx > outputs_info.size ()) {
XCAM_LOG_ERROR ("Input is out of range");
return DnnInferPrecisionUnspecified;
}
- /** Iterating over all output blobs **/
- for (auto & item : _outputs_info) {
+
+ for (auto & item : outputs_info) {
if (id == idx) {
Precision output_precision = item.second->getPrecision ();
return convert_precision_type (output_precision);
@@ -344,13 +350,14 @@ DnnInferenceEngine::set_input_layout (uint32_t idx, DnnInferLayoutType layout)
return XCAM_RETURN_ERROR_ORDER;
}
uint32_t id = 0;
+ InputsDataMap inputs_info (_network.getInputsInfo ());
- if (idx > _inputs_info.size ()) {
+ if (idx > inputs_info.size ()) {
XCAM_LOG_ERROR ("Input is out of range");
return XCAM_RETURN_ERROR_PARAM;
}
- /** Iterating over all input blobs **/
- for (auto & item : _inputs_info) {
+
+ for (auto & item : inputs_info) {
if (id == idx) {
/** Creating first input blob **/
Layout input_layout = convert_layout_type (layout);
@@ -372,13 +379,14 @@ DnnInferenceEngine::set_output_layout (uint32_t idx, DnnInferLayoutType layout)
}
uint32_t id = 0;
+ OutputsDataMap outputs_info (_network.getOutputsInfo ());
- if (idx > _outputs_info.size ()) {
+ if (idx > outputs_info.size ()) {
XCAM_LOG_ERROR ("Output is out of range");
return XCAM_RETURN_ERROR_PARAM;
}
- /** Iterating over all output blobs **/
- for (auto & item : _outputs_info) {
+
+ for (auto & item : outputs_info) {
if (id == idx) {
Layout output_layout = convert_layout_type (layout);
item.second->setLayout (output_layout);
@@ -391,36 +399,6 @@ DnnInferenceEngine::set_output_layout (uint32_t idx, DnnInferLayoutType layout)
}
XCamReturn
-DnnInferenceEngine::get_model_input_info (DnnInferInputOutputInfo& info)
-{
- if (!_model_created) {
- XCAM_LOG_ERROR ("Please create the model firstly!");
- return XCAM_RETURN_ERROR_ORDER;
- }
-
- int id = 0;
- for (auto & item : _inputs_info) {
- auto& input = item.second;
- const InferenceEngine::SizeVector input_dims = input->getDims ();
-
- info.width[id] = input_dims[0];
- info.height[id] = input_dims[1];
- info.channels[id] = input_dims[2];
- info.object_size[id] = input_dims[3];
- info.precision[id] = convert_precision_type (input->getPrecision());
- info.layout[id] = convert_layout_type (input->getLayout());
-
- item.second->setPrecision(Precision::U8);
-
- id++;
- }
- info.batch_size = get_batch_size ();
- info.numbers = _inputs_info.size ();
-
- return XCAM_RETURN_NO_ERROR;
-}
-
-XCamReturn
DnnInferenceEngine::set_model_input_info (DnnInferInputOutputInfo& info)
{
XCAM_LOG_DEBUG ("DnnInferenceEngine::set_model_input_info");
@@ -430,13 +408,15 @@ DnnInferenceEngine::set_model_input_info (DnnInferInputOutputInfo& info)
return XCAM_RETURN_ERROR_ORDER;
}
- if (info.numbers != _inputs_info.size ()) {
+ InputsDataMap inputs_info (_network.getInputsInfo ());
+
+ if (info.numbers != inputs_info.size ()) {
XCAM_LOG_ERROR ("Input size is not matched with model info numbers %d !", info.numbers);
return XCAM_RETURN_ERROR_PARAM;
}
int id = 0;
- for (auto & item : _inputs_info) {
+ for (auto & item : inputs_info) {
Precision precision = convert_precision_type (info.precision[id]);
item.second->setPrecision (precision);
Layout layout = convert_layout_type (info.layout[id]);
@@ -448,43 +428,6 @@ DnnInferenceEngine::set_model_input_info (DnnInferInputOutputInfo& info)
}
XCamReturn
-DnnInferenceEngine::get_model_output_info (DnnInferInputOutputInfo& info)
-{
- if (!_model_created) {
- XCAM_LOG_ERROR ("Please create the model firstly!");
- return XCAM_RETURN_ERROR_ORDER;
- }
-
- int id = 0;
- std::string output_name;
- DataPtr output_info;
- for (const auto& out : _outputs_info) {
- if (out.second->creatorLayer.lock()->type == "DetectionOutput") {
- output_name = out.first;
- output_info = out.second;
- }
- }
- if (output_info.get ()) {
- const InferenceEngine::SizeVector output_dims = output_info->getTensorDesc().getDims();
-
- info.width[id] = output_dims[0];
- info.height[id] = output_dims[1];
- info.channels[id] = output_dims[2];
- info.object_size[id] = output_dims[3];
-
- info.precision[id] = convert_precision_type (output_info->getPrecision());
- info.layout[id] = convert_layout_type (output_info->getLayout());
-
- info.batch_size = 1;
- info.numbers = _outputs_info.size ();
- } else {
- XCAM_LOG_ERROR ("Get output info error!");
- return XCAM_RETURN_ERROR_UNKNOWN;
- }
- return XCAM_RETURN_NO_ERROR;
-}
-
-XCamReturn
DnnInferenceEngine::set_model_output_info (DnnInferInputOutputInfo& info)
{
if (!_model_created) {
@@ -492,13 +435,15 @@ DnnInferenceEngine::set_model_output_info (DnnInferInputOutputInfo& info)
return XCAM_RETURN_ERROR_ORDER;
}
- if (info.numbers != _outputs_info.size()) {
+ OutputsDataMap outputs_info (_network.getOutputsInfo ());
+
+ if (info.numbers != outputs_info.size()) {
XCAM_LOG_ERROR ("Output size is not matched with model!");
return XCAM_RETURN_ERROR_PARAM;
}
int id = 0;
- for (auto & item : _outputs_info) {
+ for (auto & item : outputs_info) {
Precision precision = convert_precision_type (info.precision[id]);
item.second->setPrecision (precision);
Layout layout = convert_layout_type (info.layout[id]);
@@ -514,13 +459,14 @@ DnnInferenceEngine::set_input_blob (uint32_t idx, DnnInferData& data)
{
unsigned int id = 0;
std::string item_name;
+ InputsDataMap inputs_info (_network.getInputsInfo ());
- if (idx > _inputs_info.size()) {
+ if (idx > inputs_info.size()) {
XCAM_LOG_ERROR ("Input is out of range");
return XCAM_RETURN_ERROR_PARAM;
}
- for (auto & item : _inputs_info) {
+ for (auto & item : inputs_info) {
if (id == idx) {
item_name = item.first;
break;
@@ -565,6 +511,8 @@ DnnInferenceEngine::set_inference_data (std::vector<std::string> images)
}
uint32_t idx = 0;
+ InputsDataMap inputs_info (_network.getInputsInfo ());
+
for (auto & i : images) {
FormatReader::ReaderPtr reader (i.c_str ());
if (reader.get () == NULL) {
@@ -578,9 +526,9 @@ DnnInferenceEngine::set_inference_data (std::vector<std::string> images)
uint32_t image_width = 0;
uint32_t image_height = 0;
- for (auto & item : _inputs_info) {
- image_width = _inputs_info[item.first]->getDims()[0];
- image_height = _inputs_info[item.first]->getDims()[1];
+ for (auto & item : inputs_info) {
+ image_width = inputs_info[item.first]->getDims()[0];
+ image_height = inputs_info[item.first]->getDims()[1];
}
std::shared_ptr<unsigned char> data (reader->getData (image_width, image_height));
@@ -634,42 +582,6 @@ DnnInferenceEngine::read_inference_image (std::string image)
}
}
-void*
-DnnInferenceEngine::get_inference_results (uint32_t idx, uint32_t& size)
-{
- if (! _model_created || ! _model_loaded) {
- XCAM_LOG_ERROR ("Please create and load the model firstly!");
- return NULL;
- }
- uint32_t id = 0;
- std::string item_name;
-
- if (idx > _outputs_info.size ()) {
- XCAM_LOG_ERROR ("Output is out of range");
- return NULL;
- }
-
- for (auto & item : _outputs_info) {
- if (item.second->creatorLayer.lock()->type == "DetectionOutput") {
- item_name = item.first;
- break;
- }
- id++;
- }
-
- if (item_name.empty ()) {
- XCAM_LOG_ERROR ("item name is empty!");
- return NULL;
- }
-
- const Blob::Ptr blob = _infer_request.GetBlob (item_name);
- float* output_result = static_cast<PrecisionTrait<Precision::FP32>::value_type*>(blob->buffer ());
-
- size = blob->byteSize ();
-
- return (reinterpret_cast<void *>(output_result));
-}
-
InferenceEngine::TargetDevice
DnnInferenceEngine::get_device_from_string (const std::string &device_name)
{
diff --git a/modules/dnn/dnn_inference_engine.h b/modules/dnn/dnn_inference_engine.h
index eebf93d..ed8ed36 100755
--- a/modules/dnn/dnn_inference_engine.h
+++ b/modules/dnn/dnn_inference_engine.h
@@ -25,10 +25,7 @@
#include <vector>
#include <string>
-#include <fstream>
-#include <iomanip>
#include <inference_engine.hpp>
-#include <ext_list.hpp>
#include <xcam_std.h>
@@ -229,14 +226,14 @@ public:
return _input_image_width;
};
- XCamReturn set_model_input_info (DnnInferInputOutputInfo& info);
- XCamReturn get_model_input_info (DnnInferInputOutputInfo& info);
+ virtual XCamReturn set_model_input_info (DnnInferInputOutputInfo& info) = 0;
+ virtual XCamReturn get_model_input_info (DnnInferInputOutputInfo& info) = 0;
- XCamReturn set_model_output_info (DnnInferInputOutputInfo& info);
- XCamReturn get_model_output_info (DnnInferInputOutputInfo& info);
+ virtual XCamReturn set_model_output_info (DnnInferInputOutputInfo& info) = 0;
+ virtual XCamReturn get_model_output_info (DnnInferInputOutputInfo& info) = 0;
- XCamReturn set_inference_data (std::vector<std::string> images);
- void* get_inference_results (uint32_t idx, uint32_t& size);
+ virtual XCamReturn set_inference_data (std::vector<std::string> images) = 0;
+ virtual void* get_inference_results (uint32_t idx, uint32_t& size) = 0;
std::shared_ptr<uint8_t> read_inference_image (std::string image);
void print_log (uint32_t flag);
@@ -257,8 +254,9 @@ protected:
void print_performance_counts (const std::map<std::string, InferenceEngine::InferenceEngineProfileInfo>& performance_map);
-private:
XCamReturn set_input_blob (uint32_t idx, DnnInferData& data);
+
+private:
template <typename T> XCamReturn copy_image_to_blob (const DnnInferData& data, InferenceEngine::Blob::Ptr& blob, int batch_index);
template <typename T> XCamReturn copy_data_to_blob (const DnnInferData& data, InferenceEngine::Blob::Ptr& blob, int batch_index);
@@ -267,18 +265,13 @@ protected:
bool _model_created;
bool _model_loaded;
- InferenceEngine::InferencePlugin _plugin;
-
- InferenceEngine::InputsDataMap _inputs_info;
- InferenceEngine::OutputsDataMap _outputs_info;
-
uint32_t _input_image_width;
uint32_t _input_image_height;
+ InferenceEngine::InferencePlugin _plugin;
InferenceEngine::CNNNetReader _network_reader;
InferenceEngine::CNNNetwork _network;
InferenceEngine::InferRequest _infer_request;
-
std::vector<InferenceEngine::CNNLayerPtr> _layers;
};
diff --git a/modules/dnn/dnn_object_detection.cpp b/modules/dnn/dnn_object_detection.cpp
new file mode 100644
index 0000000..42bf29d
--- /dev/null
+++ b/modules/dnn/dnn_object_detection.cpp
@@ -0,0 +1,264 @@
+/*
+ * dnn_object_detection.cpp - object detection
+ *
+ * Copyright (c) 2019 Intel Corporation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Author: Zong Wei <wei.zong@intel.com>
+ */
+
+#include <format_reader_ptr.h>
+
+#include "dnn_object_detection.h"
+
+using namespace std;
+using namespace InferenceEngine;
+
+namespace XCam {
+
+DnnObjectDetection::DnnObjectDetection (DnnInferConfig& config)
+ : DnnInferenceEngine (config)
+{
+ XCAM_LOG_DEBUG ("DnnObjectDetection::DnnObjectDetection");
+
+ create_model (config);
+}
+
+
+DnnObjectDetection::~DnnObjectDetection ()
+{
+
+}
+
+XCamReturn
+DnnObjectDetection::get_model_input_info (DnnInferInputOutputInfo& info)
+{
+ if (!_model_created) {
+ XCAM_LOG_ERROR ("Please create the model firstly!");
+ return XCAM_RETURN_ERROR_ORDER;
+ }
+
+ int id = 0;
+ InputsDataMap inputs_info (_network.getInputsInfo ());
+
+ for (auto & item : inputs_info) {
+ auto& input = item.second;
+ const InferenceEngine::SizeVector input_dims = input->getDims ();
+
+ info.width[id] = input_dims[0];
+ info.height[id] = input_dims[1];
+ info.channels[id] = input_dims[2];
+ info.object_size[id] = input_dims[3];
+ info.precision[id] = convert_precision_type (input->getPrecision());
+ info.layout[id] = convert_layout_type (input->getLayout());
+
+ item.second->setPrecision(Precision::U8);
+
+ id++;
+ }
+ info.batch_size = get_batch_size ();
+ info.numbers = inputs_info.size ();
+
+ return XCAM_RETURN_NO_ERROR;
+}
+
+XCamReturn
+DnnObjectDetection::set_model_input_info (DnnInferInputOutputInfo& info)
+{
+ XCAM_LOG_DEBUG ("DnnObjectDetection::set_model_input_info");
+
+ if (!_model_created) {
+ XCAM_LOG_ERROR ("Please create the model firstly!");
+ return XCAM_RETURN_ERROR_ORDER;
+ }
+
+ InputsDataMap inputs_info (_network.getInputsInfo ());
+ if (info.numbers != inputs_info.size ()) {
+ XCAM_LOG_ERROR ("Input size is not matched with model info numbers %d !", info.numbers);
+ return XCAM_RETURN_ERROR_PARAM;
+ }
+ int id = 0;
+
+ for (auto & item : inputs_info) {
+ Precision precision = convert_precision_type (info.precision[id]);
+ item.second->setPrecision (precision);
+ Layout layout = convert_layout_type (info.layout[id]);
+ item.second->setLayout (layout);
+ id++;
+ }
+
+ return XCAM_RETURN_NO_ERROR;
+}
+
+XCamReturn
+DnnObjectDetection::get_model_output_info (DnnInferInputOutputInfo& info)
+{
+ if (!_model_created) {
+ XCAM_LOG_ERROR ("Please create the model firstly!");
+ return XCAM_RETURN_ERROR_ORDER;
+ }
+
+ int id = 0;
+ std::string output_name;
+ OutputsDataMap outputs_info (_network.getOutputsInfo ());
+ DataPtr output_info;
+ for (const auto& out : outputs_info) {
+ if (out.second->creatorLayer.lock()->type == "DetectionOutput") {
+ output_name = out.first;
+ output_info = out.second;
+ break;
+ }
+ }
+ if (output_info.get ()) {
+ const InferenceEngine::SizeVector output_dims = output_info->getTensorDesc().getDims();
+
+ info.width[id] = output_dims[0];
+ info.height[id] = output_dims[1];
+ info.channels[id] = output_dims[2];
+ info.object_size[id] = output_dims[3];
+
+ info.precision[id] = convert_precision_type (output_info->getPrecision());
+ info.layout[id] = convert_layout_type (output_info->getLayout());
+
+ info.batch_size = 1;
+ info.numbers = outputs_info.size ();
+ } else {
+ XCAM_LOG_ERROR ("Get output info error!");
+ return XCAM_RETURN_ERROR_UNKNOWN;
+ }
+ return XCAM_RETURN_NO_ERROR;
+}
+
+XCamReturn
+DnnObjectDetection::set_model_output_info (DnnInferInputOutputInfo& info)
+{
+ if (!_model_created) {
+ XCAM_LOG_ERROR ("Please create the model firstly!");
+ return XCAM_RETURN_ERROR_ORDER;
+ }
+
+ OutputsDataMap outputs_info (_network.getOutputsInfo ());
+ if (info.numbers != outputs_info.size ()) {
+ XCAM_LOG_ERROR ("Output size is not matched with model!");
+ return XCAM_RETURN_ERROR_PARAM;
+ }
+
+ int id = 0;
+ for (auto & item : outputs_info) {
+ Precision precision = convert_precision_type (info.precision[id]);
+ item.second->setPrecision (precision);
+ Layout layout = convert_layout_type (info.layout[id]);
+ item.second->setLayout (layout);
+ id++;
+ }
+
+ return XCAM_RETURN_NO_ERROR;
+}
+
+XCamReturn
+DnnObjectDetection::set_inference_data (std::vector<std::string> images)
+{
+ if (!_model_created) {
+ XCAM_LOG_ERROR ("Please create the model firstly!");
+ return XCAM_RETURN_ERROR_ORDER;
+ }
+
+ uint32_t idx = 0;
+ InputsDataMap inputs_info (_network.getInputsInfo ());
+
+ for (auto & i : images) {
+ FormatReader::ReaderPtr reader (i.c_str ());
+ if (reader.get () == NULL) {
+ XCAM_LOG_WARNING ("Image %d cannot be read!", i);
+ continue;
+ }
+
+ _input_image_width = reader->width ();
+ _input_image_height = reader->height ();
+
+ uint32_t image_width = 0;
+ uint32_t image_height = 0;
+
+ for (auto & item : inputs_info) {
+ image_width = inputs_info[item.first]->getDims ()[0];
+ image_height = inputs_info[item.first]->getDims ()[1];
+ }
+
+ std::shared_ptr<unsigned char> data (reader->getData (image_width, image_height));
+
+ if (data.get () != NULL) {
+ DnnInferData image;
+ image.width = image_width;
+ image.height = image_height;
+ image.width_stride = image_width;
+ image.height_stride = image_height;
+ image.buffer = data.get ();
+ image.channel_num = 3;
+ image.batch_idx = idx;
+ image.image_format = DnnInferImageFormatBGRPacked;
+
+ // set precision & data type
+ image.precision = get_input_presion (idx);
+ image.data_type = DnnInferDataTypeImage;
+
+ set_input_blob (idx, image);
+
+ idx ++;
+ } else {
+ XCAM_LOG_WARNING ("Valid input images were not found!");
+ continue;
+ }
+ }
+
+ return XCAM_RETURN_NO_ERROR;
+}
+
+void*
+DnnObjectDetection::get_inference_results (uint32_t idx, uint32_t& size)
+{
+ if (! _model_created || ! _model_loaded) {
+ XCAM_LOG_ERROR ("Please create and load the model firstly!");
+ return NULL;
+ }
+ uint32_t id = 0;
+ std::string item_name;
+
+ OutputsDataMap outputs_info (_network.getOutputsInfo ());
+ if (idx > outputs_info.size ()) {
+ XCAM_LOG_ERROR ("Output is out of range");
+ return NULL;
+ }
+
+ for (auto & item : outputs_info) {
+ if (item.second->creatorLayer.lock()->type == "DetectionOutput") {
+ item_name = item.first;
+ break;
+ }
+ id++;
+ }
+
+ if (item_name.empty ()) {
+ XCAM_LOG_ERROR ("item name is empty!");
+ return NULL;
+ }
+
+ const Blob::Ptr blob = _infer_request.GetBlob (item_name);
+ float* output_result = static_cast<PrecisionTrait<Precision::FP32>::value_type*>(blob->buffer ());
+
+ size = blob->byteSize ();
+
+ return (reinterpret_cast<void *>(output_result));
+}
+
+} // namespace XCam
diff --git a/modules/dnn/dnn_object_detection.h b/modules/dnn/dnn_object_detection.h
new file mode 100644
index 0000000..6980858
--- /dev/null
+++ b/modules/dnn/dnn_object_detection.h
@@ -0,0 +1,53 @@
+/*
+ * dnn_object_detection.h - object detection
+ *
+ * Copyright (c) 2019 Intel Corporation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Author: Zong Wei <wei.zong@intel.com>
+ */
+
+#ifndef _XCMA_DNN_OBJECT_DETECTION_H_
+#define _XCMA_DNN_OBJECT_DETECTION_H_
+
+#pragma once
+
+#include <string>
+#include <inference_engine.hpp>
+
+#include <xcam_std.h>
+#include "dnn_inference_engine.h"
+
+namespace XCam {
+
+class DnnObjectDetection
+ : public DnnInferenceEngine
+{
+public:
+ explicit DnnObjectDetection (DnnInferConfig& config);
+ virtual ~DnnObjectDetection ();
+
+ XCamReturn set_model_input_info (DnnInferInputOutputInfo& info);
+ XCamReturn get_model_input_info (DnnInferInputOutputInfo& info);
+
+ XCamReturn set_model_output_info (DnnInferInputOutputInfo& info);
+ XCamReturn get_model_output_info (DnnInferInputOutputInfo& info);
+
+ XCamReturn set_inference_data (std::vector<std::string> images);
+ void* get_inference_results (uint32_t idx, uint32_t& size);
+};
+
+} // namespace XCam
+
+#endif //_XCMA_DNN_INFERENCE_ENGINE_H_
diff --git a/tests/Makefile.am b/tests/Makefile.am
index d16c72e..eb27a9e 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -241,7 +241,8 @@ TEST_DNN_INFERENCE_LA = $(top_builddir)/modules/dnn/libxcam_dnn.la
test_dnn_inference_SOURCES = test-dnn-inference.cpp
test_dnn_inference_CXXFLAGS = \
$(TEST_BASE_CXXFLAGS) \
- $(OPENVINO_IE_CXXFLAGS) \
+ -I$(OPENVINO_IE_INC_PATH)/include \
+ -I$(OPENVINO_IE_INC_PATH)/src/extension \
$(NULL)
test_dnn_inference_LDADD = \
diff --git a/tests/test-dnn-inference.cpp b/tests/test-dnn-inference.cpp
index f8f7662..d07e27c 100644
--- a/tests/test-dnn-inference.cpp
+++ b/tests/test-dnn-inference.cpp
@@ -29,6 +29,7 @@
#include "test_common.h"
#include <dnn/dnn_inference_engine.h>
+#include <dnn/dnn_object_detection.h>
using namespace XCam;
using namespace InferenceEngine;
@@ -346,7 +347,7 @@ int main (int argc, char *argv[])
XCAM_LOG_DEBUG ("2. Create inference engine");
infer_config.perf_counter = 0;
- SmartPtr<DnnInferenceEngine> infer_engine = new DnnInferenceEngine (infer_config);
+ SmartPtr<DnnInferenceEngine> infer_engine = new DnnObjectDetection (infer_config);
DnnInferenceEngineInfo infer_info;
CHECK (