| From 4d5e7db268a4f816e24449e8ad011e35890f0c7e Mon Sep 17 00:00:00 2001 |
| From: Qin Su <qsu@ti.com> |
| Date: Fri, 22 Feb 2019 13:39:09 -0500 |
| Subject: [PATCH] armnn mobilenet test example |
| |
| Upstream-Status: Inappropriate [TI only test code] |
| Signed-off-by: Qin Su <qsu@ti.com> |
| |
| tests/ArmnnExamples/ArmnnExamples.cpp | 654 ++++++++++++++++++++++++++++++++++ |
| 1 file changed, 654 insertions(+) |
| create mode 100644 tests/ArmnnExamples/ArmnnExamples.cpp |
| |
| diff --git a/tests/ArmnnExamples/ArmnnExamples.cpp b/tests/ArmnnExamples/ArmnnExamples.cpp |
| new file mode 100644 |
| index 0000000..53a11cc |
| |
| |
| @@ -0,0 +1,654 @@ |
| +/****************************************************************************** |
| + * Copyright (c) 2018, Texas Instruments Incorporated - http://www.ti.com/ |
| + * All rights reserved. |
| + * |
| + * Redistribution and use in source and binary forms, with or without |
| + * modification, are permitted provided that the following conditions are met: |
| + * * Redistributions of source code must retain the above copyright |
| + * notice, this list of conditions and the following disclaimer. |
| + * * Redistributions in binary form must reproduce the above copyright |
| + * notice, this list of conditions and the following disclaimer in the |
| + * documentation and/or other materials provided with the distribution. |
| + * * Neither the name of Texas Instruments Incorporated nor the |
| + * names of its contributors may be used to endorse or promote products |
| + * derived from this software without specific prior written permission. |
| + * |
| + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
| + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
| + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
| + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF |
| + * THE POSSIBILITY OF SUCH DAMAGE. |
| + *****************************************************************************/// |
| +// Copyright © 2017 Arm Ltd. All rights reserved. |
| +// See LICENSE file in the project root for full license information. |
| +// |
| +#include <armnn/ArmNN.hpp> |
| + |
| +#include <utility> |
| +#include <armnn/TypesUtils.hpp> |
| + |
| +#if defined(ARMNN_CAFFE_PARSER) |
| +#include "armnnCaffeParser/ICaffeParser.hpp" |
| +#endif |
| +#if defined(ARMNN_TF_PARSER) |
| +#include "armnnTfParser/ITfParser.hpp" |
| +#endif |
| +#if defined(ARMNN_TF_LITE_PARSER) |
| +#include "armnnTfLiteParser/ITfLiteParser.hpp" |
| +#endif |
| +#if defined(ARMNN_ONNX_PARSER) |
| +#include "armnnOnnxParser/IOnnxParser.hpp" |
| +#endif |
| +#include "CsvReader.hpp" |
| +#include "../InferenceTest.hpp" |
| +#include <Logging.hpp> |
| +#include <Profiling.hpp> |
| + |
| +#include <boost/algorithm/string/trim.hpp> |
| +#include <boost/algorithm/string/split.hpp> |
| +#include <boost/algorithm/string/classification.hpp> |
| +#include <boost/program_options.hpp> |
| + |
| +#include <iostream> |
| +#include <fstream> |
| +#include <functional> |
| +#include <future> |
| +#include <algorithm> |
| +#include <iterator> |
| +#include<vector> |
| + |
| +#include <signal.h> |
| +#include "opencv2/core.hpp" |
| +#include "opencv2/imgproc.hpp" |
| +#include "opencv2/highgui.hpp" |
| +#include "opencv2/videoio.hpp" |
| +#include <time.h> |
| + |
| +using namespace cv; |
| + |
| +#define INPUT_IMAGE 0 |
| +#define INPUT_VIDEO 1 |
| +#define INPUT_CAMERA 2 |
| + |
| +Mat test_image; |
| +Rect rectCrop; |
| + |
| +time_point<high_resolution_clock> predictStart; |
| +time_point<high_resolution_clock> predictEnd; |
| + |
| +void imagenetCallBackFunc(int event, int x, int y, int flags, void* userdata) |
| +{ |
| + if ( event == EVENT_RBUTTONDOWN ) |
| + { |
| + std::cout << "Right button of the mouse is clicked - position (" << x << ", " << y << ")" << " ... prepare to exit!" << std::endl; |
| + exit(0); |
| + } |
| +} |
| + |
| +inline float Lerpfloat(float a, float b, float w) |
| +{ |
| + return w * b + (1.f - w) * a; |
| +} |
| + |
| +// Load a single image |
| +struct ImageData |
| +{ |
| + unsigned int m_width; |
| + unsigned int m_height; |
| + unsigned int m_chnum; |
| + unsigned int m_size; |
| + std::vector<uint8_t> m_image; |
| +}; |
| +// Load a single image |
| +std::unique_ptr<ImageData> loadImageData(std::string image_path, VideoCapture &cap, cv::Mat img, int input_type) |
| +{ |
| + //cv::Mat img; |
| + if (input_type == INPUT_IMAGE) |
| + { |
| + /* use OpenCV to get the image */ |
| + img = cv::imread(image_path, CV_LOAD_IMAGE_COLOR); |
| + } |
| + cv::cvtColor(img, img, CV_BGR2RGB); //convert image format from BGR(openCV format) to RGB (armnn required format). |
| + |
| + // store image and label in output Image |
| + std::unique_ptr<ImageData> ret(new ImageData); |
| + ret->m_width = static_cast<unsigned int>(img.cols); |
| + ret->m_height = static_cast<unsigned int>(img.rows); |
| + ret->m_chnum = static_cast<unsigned int>(img.channels()); |
| + ret->m_size = static_cast<unsigned int>(img.cols*img.rows*img.channels()); |
| + ret->m_image.resize(ret->m_size); |
| + |
| + for (unsigned int i = 0; i < ret->m_size; i++) |
| + { |
| + ret->m_image[i] = static_cast<uint8_t>(img.data[i]); |
| + } |
| + return ret; |
| +} |
| +// to resize input tensor size |
| +std::vector<float> ResizeBilinear(std::vector<uint8_t> input, |
| + const unsigned int inWidth, |
| + const unsigned int inHeight, |
| + const unsigned int inChnum, |
| + const unsigned int outputWidth, |
| + const unsigned int outputHeight) |
| +{ |
| + std::vector<float> out; |
| + out.resize(outputWidth * outputHeight * 3); |
| + |
| + // We follow the definition of TensorFlow and AndroidNN: the top-left corner of a texel in the output |
| + // image is projected into the input image to figure out the interpolants and weights. Note that this |
| + // will yield different results than if projecting the centre of output texels. |
| + |
| + const unsigned int inputWidth = inWidth; |
| + const unsigned int inputHeight = inHeight; |
| + |
| + // How much to scale pixel coordinates in the output image to get the corresponding pixel coordinates |
| + // in the input image. |
| + const float scaleY = boost::numeric_cast<float>(inputHeight) / boost::numeric_cast<float>(outputHeight); |
| + const float scaleX = boost::numeric_cast<float>(inputWidth) / boost::numeric_cast<float>(outputWidth); |
| + |
| + uint8_t rgb_x0y0[3]; |
| + uint8_t rgb_x1y0[3]; |
| + uint8_t rgb_x0y1[3]; |
| + uint8_t rgb_x1y1[3]; |
| + unsigned int pixelOffset00, pixelOffset10, pixelOffset01, pixelOffset11; |
| + for (unsigned int y = 0; y < outputHeight; ++y) |
| + { |
| + // Corresponding real-valued height coordinate in input image. |
| + const float iy = boost::numeric_cast<float>(y) * scaleY; |
| + // Discrete height coordinate of top-left texel (in the 2x2 texel area used for interpolation). |
| + const float fiy = floorf(iy); |
| + const unsigned int y0 = boost::numeric_cast<unsigned int>(fiy); |
| + |
| + // Interpolation weight (range [0,1]) |
| + const float yw = iy - fiy; |
| + |
| + for (unsigned int x = 0; x < outputWidth; ++x) |
| + { |
| + // Real-valued and discrete width coordinates in input image. |
| + const float ix = boost::numeric_cast<float>(x) * scaleX; |
| + const float fix = floorf(ix); |
| + const unsigned int x0 = boost::numeric_cast<unsigned int>(fix); |
| + |
| + // Interpolation weight (range [0,1]). |
| + const float xw = ix - fix; |
| + |
| + // Discrete width/height coordinates of texels below and to the right of (x0, y0). |
| + const unsigned int x1 = std::min(x0 + 1, inputWidth - 1u); |
| + const unsigned int y1 = std::min(y0 + 1, inputHeight - 1u); |
| + |
| + pixelOffset00 = x0 * inChnum + y0 * inputWidth * inChnum; |
| + pixelOffset10 = x1 * inChnum + y0 * inputWidth * inChnum; |
| + pixelOffset01 = x0 * inChnum + y1 * inputWidth * inChnum; |
| + pixelOffset11 = x1 * inChnum + y1 * inputWidth * inChnum; |
| + for (unsigned int c = 0; c < 3; ++c) |
| + { |
| + rgb_x0y0[c] = input[pixelOffset00+c]; |
| + rgb_x1y0[c] = input[pixelOffset10+c]; |
| + rgb_x0y1[c] = input[pixelOffset01+c]; |
| + rgb_x1y1[c] = input[pixelOffset11+c]; |
| + } |
| + |
| + for (unsigned c=0; c<3; ++c) |
| + { |
| + const float ly0 = Lerpfloat(float(rgb_x0y0[c]), float(rgb_x1y0[c]), xw); |
| + const float ly1 = Lerpfloat(float(rgb_x0y1[c]), float(rgb_x1y1[c]), xw); |
| + const float l = Lerpfloat(ly0, ly1, yw); |
| + out[(3*((y*outputWidth)+x)) + c] = static_cast<float>(l)/255.0f; |
| + } |
| + } |
| + } |
| + return out; |
| +} |
| + |
| +namespace |
| +{ |
| + |
| + // Configure boost::program_options for command-line parsing and validation. |
| + namespace po = boost::program_options; |
| + |
| + template<typename T, typename TParseElementFunc> |
| + std::vector<T> ParseArrayImpl(std::istream& stream, TParseElementFunc parseElementFunc) |
| + { |
| + std::vector<T> result; |
| + // Processes line-by-line. |
| + std::string line; |
| + while (std::getline(stream, line)) |
| + { |
| + std::vector<std::string> tokens; |
| + try |
| + { |
| + // Coverity fix: boost::split() may throw an exception of type boost::bad_function_call. |
| + boost::split(tokens, line, boost::algorithm::is_any_of("\t ,;:"), boost::token_compress_on); |
| + } |
| + catch (const std::exception& e) |
| + { |
| + BOOST_LOG_TRIVIAL(error) << "An error occurred when splitting tokens: " << e.what(); |
| + continue; |
| + } |
| + for (const std::string& token : tokens) |
| + { |
| + if (!token.empty()) |
| + { |
| + try |
| + { |
| + result.push_back(parseElementFunc(token)); |
| + } |
| + catch (const std::exception&) |
| + { |
| + BOOST_LOG_TRIVIAL(error) << "'" << token << "' is not a valid number. It has been ignored."; |
| + } |
| + } |
| + } |
| + } |
| + |
| + return result; |
| + } |
| + |
| + template<typename T> |
| + std::vector<T> ParseArray(std::istream& stream); |
| + template<> |
| + std::vector<unsigned int> ParseArray(std::istream& stream) |
| + { |
| + return ParseArrayImpl<unsigned int>(stream, |
| + [](const std::string& s) { return boost::numeric_cast<unsigned int>(std::stoi(s)); }); |
| + } |
| + void RemoveDuplicateDevices(std::vector<armnn::BackendId>& computeDevices) |
| + { |
| + // Mark the duplicate devices as 'Undefined'. |
| + for (auto i = computeDevices.begin(); i != computeDevices.end(); ++i) |
| + { |
| + for (auto j = std::next(i); j != computeDevices.end(); ++j) |
| + { |
| + if (*j == *i) |
| + { |
| + *j = armnn::Compute::Undefined; |
| + } |
| + } |
| + } |
| + |
| + // Remove 'Undefined' devices. |
| + computeDevices.erase(std::remove(computeDevices.begin(), computeDevices.end(), armnn::Compute::Undefined), |
| + computeDevices.end()); |
| + } |
| +} // namespace |
| + |
| +template<typename TParser, typename TDataType> |
| +int MainImpl(const char* modelPath, |
| + bool isModelBinary, |
| + const std::vector<armnn::BackendId>& computeDevices, |
| + const char* inputName, |
| + const armnn::TensorShape* inputTensorShape, |
| + const char* inputTensorDataFilePath, |
| + const char* outputName, |
| + bool enableProfiling, |
| + const size_t number_frame, |
| + const std::shared_ptr<armnn::IRuntime>& runtime = nullptr) |
| +{ |
| + // Loads input tensor. |
| + std::vector<uint8_t> input; |
| + std::vector<float> input_resized; |
| + using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>; |
| + |
| + try |
| + { |
| + // Creates an InferenceModel, which will parse the model and load it into an IRuntime. |
| + typename InferenceModel<TParser, TDataType>::Params params; |
| + //const armnn::TensorShape inputTensorShape({ 1, 224, 224 3}); |
| + |
| + params.m_ModelPath = modelPath; |
| + params.m_IsModelBinary = isModelBinary; |
| + params.m_ComputeDevices = computeDevices; |
| + params.m_InputBindings = { inputName }; |
| + params.m_InputShapes = { *inputTensorShape }; |
| + params.m_OutputBindings = { outputName }; |
| + //params.m_EnableProfiling = enableProfiling; |
| + params.m_SubgraphId = 0; |
| + InferenceModel<TParser, TDataType> model(params, enableProfiling, runtime); |
| + |
| + VideoCapture cap; |
| + int input_type = INPUT_IMAGE; |
| + std::string filename = inputTensorDataFilePath; |
| + |
| + size_t i = filename.rfind("camera_live_input", filename.length()); |
| + if (i != string::npos) |
| + { |
| + cap = VideoCapture(1); |
| + namedWindow("ARMNN MobileNet Example", WINDOW_AUTOSIZE | CV_GUI_NORMAL); |
| + input_type = INPUT_CAMERA; //camera input |
| + } |
| + else if((filename.substr(filename.find_last_of(".") + 1) == "mp4") || |
| + (filename.substr(filename.find_last_of(".") + 1) == "mov") || |
| + (filename.substr(filename.find_last_of(".") + 1) == "avi") ) |
| + { |
| + cap = VideoCapture(inputTensorDataFilePath); |
| + if (! cap.isOpened()) |
| + { |
| + std::cout << "Cannot open video input: " << inputTensorDataFilePath << std::endl; |
| + return (-1); |
| + } |
| + |
| + namedWindow("ARMNN MobileNet Example", WINDOW_AUTOSIZE | CV_GUI_NORMAL); |
| + input_type = INPUT_VIDEO; //video clip input |
| + } |
| + if (input_type != INPUT_IMAGE) |
| + { |
| + //set the callback function for any mouse event. Used for right click mouse to exit the program. |
| + setMouseCallback("ARMNN MobileNet Example", imagenetCallBackFunc, NULL); |
| + } |
| + |
| + for (unsigned int i=0; i < number_frame; i++) |
| + { |
| + if (input_type != INPUT_IMAGE) |
| + { |
| + cap.grab(); |
| + cap.retrieve(test_image); |
| + } |
| + std::unique_ptr<ImageData> inputData = loadImageData(inputTensorDataFilePath, cap, test_image, input_type); |
| + input.resize(inputData->m_size); |
| + |
| + input = std::move(inputData->m_image); |
| + input_resized = ResizeBilinear(input, inputData->m_width, inputData->m_height, inputData->m_chnum, 224, 224); |
| + |
| + // Set up input data container |
| + std::vector<TContainer> inputDataContainer(1, std::move(input_resized)); |
| + |
| + // Set up output data container |
| + std::vector<TContainer> outputDataContainers; |
| + outputDataContainers.push_back(std::vector<float>(model.GetOutputSize())); |
| + |
| + //profile start |
| + predictStart = high_resolution_clock::now(); |
| + // Execute model |
| + model.Run(inputDataContainer, outputDataContainers); |
| + //profile end |
| + predictEnd = high_resolution_clock::now(); |
| + |
| + double timeTakenS = duration<double>(predictEnd - predictStart).count(); |
| + double preformance_ret = static_cast<double>(1.0/timeTakenS); |
| + |
| + //retrieve output |
| + std::vector<float>& outputData = (boost::get<std::vector<float>>(outputDataContainers[0])); |
| + //output TOP predictions |
| + std::string predict_target_name; |
| + // find the out with the highest confidence |
| + int label = static_cast<int>(std::distance(outputData.begin(), std::max_element(outputData.begin(), outputData.end()))); |
| + std::fstream file("/usr/share/arm/armnn/models/labels.txt"); |
| + //std::string predict_target_name; |
| + for (int i=0; i <= label; i++) |
| + { |
| + std::getline(file, predict_target_name); |
| + } |
| + //get the probability of the top prediction |
| + float prob = 100*outputData.data()[label]; |
| + //clean the top one so as to find the second top prediction |
| + outputData.data()[label] = 0; |
| + std::cout << "Top(1) prediction is " << predict_target_name << " with confidence: " << prob << "%" << std::endl; |
| + //output next TOP 4 predictions |
| + for (int ii=1; ii<5; ii++) |
| + { |
| + std::string predict_target_name_n; |
| + // find the out with the highest confidence |
| + int label = static_cast<int>(std::distance(outputData.begin(), std::max_element(outputData.begin(), outputData.end()))); |
| + std::fstream file("/usr/share/arm/armnn/models/labels.txt"); |
| + //std::string predict_target_name; |
| + for (int i=0; i <= label; i++) |
| + { |
| + std::getline(file, predict_target_name_n); |
| + } |
| + //get the probability of the prediction |
| + float prob = 100*outputData.data()[label]; |
| + //clean the top one so as to find the second top prediction |
| + outputData.data()[label] = 0; |
| + |
| + std::cout << "Top(" << (ii+1) << ") prediction is " << predict_target_name_n << " with confidence: " << prob << "%" << std::endl; |
| + } |
| + std::cout << "Performance (FPS): " << preformance_ret << std::endl; |
| + |
| + if (input_type != INPUT_IMAGE) |
| + { |
| + //convert image format back to BGR for OpenCV imshow from RGB format required by armnn. |
| + cv::cvtColor(test_image, test_image, CV_RGB2BGR); |
| + // output identified object name on top of input image |
| + cv::putText(test_image, predict_target_name, |
| + cv::Point(rectCrop.x + 5,rectCrop.y + 20), // Coordinates |
| + cv::FONT_HERSHEY_COMPLEX_SMALL, // Font |
| + 1.0, // Scale. 2.0 = 2x bigger |
| + cv::Scalar(0,0,255), // Color |
| + 1, // Thickness |
| + 8); // Line type |
| + |
| + // output preformance in FPS on top of input image |
| + std::string preformance_ret_string = "Performance (FPS): " + boost::lexical_cast<std::string>(preformance_ret); |
| + cv::putText(test_image, preformance_ret_string, |
| + cv::Point(rectCrop.x + 5,rectCrop.y + 40), // Coordinates |
| + cv::FONT_HERSHEY_COMPLEX_SMALL, // Font |
| + 1.0, // Scale. 2.0 = 2x bigger |
| + cv::Scalar(0,0,255), // Color |
| + 1, // Thickness |
| + 8); // Line type |
| + |
| + cv::imshow("ARMNN MobileNet Example", test_image); |
| + waitKey(2); |
| + } |
| + } |
| + } |
| + catch (armnn::Exception const& e) |
| + { |
| + BOOST_LOG_TRIVIAL(fatal) << "Armnn Error: " << e.what(); |
| + return EXIT_FAILURE; |
| + } |
| + return EXIT_SUCCESS; |
| +} |
| + |
| +// This will run a test |
| +int RunTest(const std::string& modelFormat, |
| + const std::string& inputTensorShapeStr, |
| + const vector<armnn::BackendId>& computeDevice, |
| + const std::string& modelPath, |
| + const std::string& inputName, |
| + const std::string& inputTensorDataFilePath, |
| + const std::string& outputName, |
| + bool enableProfiling, |
| + const size_t subgraphId, |
| + const std::shared_ptr<armnn::IRuntime>& runtime = nullptr) |
| +{ |
| + // Parse model binary flag from the model-format string we got from the command-line |
| + bool isModelBinary; |
| + if (modelFormat.find("bin") != std::string::npos) |
| + { |
| + isModelBinary = true; |
| + } |
| + else if (modelFormat.find("txt") != std::string::npos || modelFormat.find("text") != std::string::npos) |
| + { |
| + isModelBinary = false; |
| + } |
| + else |
| + { |
| + BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat << "'. Please include 'binary' or 'text'"; |
| + return EXIT_FAILURE; |
| + } |
| + |
| + // Parse input tensor shape from the string we got from the command-line. |
| + std::unique_ptr<armnn::TensorShape> inputTensorShape; |
| + if (!inputTensorShapeStr.empty()) |
| + { |
| + std::stringstream ss(inputTensorShapeStr); |
| + std::vector<unsigned int> dims = ParseArray<unsigned int>(ss); |
| + try |
| + { |
| + // Coverity fix: An exception of type armnn::InvalidArgumentException is thrown and never caught. |
| + inputTensorShape = std::make_unique<armnn::TensorShape>(dims.size(), dims.data()); |
| + } |
| + catch (const armnn::InvalidArgumentException& e) |
| + { |
| + BOOST_LOG_TRIVIAL(fatal) << "Cannot create tensor shape: " << e.what(); |
| + return EXIT_FAILURE; |
| + } |
| + } |
| + // Forward to implementation based on the parser type |
| + if (modelFormat.find("caffe") != std::string::npos) |
| + { |
| +#if defined(ARMNN_CAFFE_PARSER) |
| + return MainImpl<armnnCaffeParser::ICaffeParser, float>(modelPath.c_str(), isModelBinary, computeDevice, |
| + inputName.c_str(), inputTensorShape.get(), |
| + inputTensorDataFilePath.c_str(), outputName.c_str(), |
| + enableProfiling, subgraphId, runtime); |
| +#else |
| + BOOST_LOG_TRIVIAL(fatal) << "Not built with Caffe parser support."; |
| + return EXIT_FAILURE; |
| +#endif |
| + } |
| + else if (modelFormat.find("onnx") != std::string::npos) |
| + { |
| +#if defined(ARMNN_ONNX_PARSER) |
| + return MainImpl<armnnOnnxParser::IOnnxParser, float>(modelPath.c_str(), isModelBinary, computeDevice, |
| + inputName.c_str(), inputTensorShape.get(), |
| + inputTensorDataFilePath.c_str(), outputName.c_str(), |
| + enableProfiling, subgraphId, runtime); |
| +#else |
| + BOOST_LOG_TRIVIAL(fatal) << "Not built with Onnx parser support."; |
| + return EXIT_FAILURE; |
| +#endif |
| + } |
| + else if (modelFormat.find("tensorflow") != std::string::npos) |
| + { |
| +#if defined(ARMNN_TF_PARSER) |
| + return MainImpl<armnnTfParser::ITfParser, float>(modelPath.c_str(), isModelBinary, computeDevice, |
| + inputName.c_str(), inputTensorShape.get(), |
| + inputTensorDataFilePath.c_str(), outputName.c_str(), |
| + enableProfiling, subgraphId, runtime); |
| +#else |
| + BOOST_LOG_TRIVIAL(fatal) << "Not built with Tensorflow parser support."; |
| + return EXIT_FAILURE; |
| +#endif |
| + } |
| + else if(modelFormat.find("tflite") != std::string::npos) |
| + { |
| +#if defined(ARMNN_TF_LITE_PARSER) |
| + if (! isModelBinary) |
| + { |
| + BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat << "'. Only 'binary' format supported \ |
| + for tflite files"; |
| + return EXIT_FAILURE; |
| + } |
| + return MainImpl<armnnTfLiteParser::ITfLiteParser, float>(modelPath.c_str(), isModelBinary, computeDevice, |
| + inputName.c_str(), inputTensorShape.get(), |
| + inputTensorDataFilePath.c_str(), outputName.c_str(), |
| + enableProfiling, subgraphId, runtime); |
| +#else |
| + BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat << |
| + "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'"; |
| + return EXIT_FAILURE; |
| +#endif |
| + } |
| + else |
| + { |
| + BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat << |
| + "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'"; |
| + return EXIT_FAILURE; |
| + } |
| +} |
| + |
| +int main(int argc, const char* argv[]) |
| +{ |
| + // Configures logging for both the ARMNN library and this test program. |
| +#ifdef NDEBUG |
| + armnn::LogSeverity level = armnn::LogSeverity::Info; |
| +#else |
| + armnn::LogSeverity level = armnn::LogSeverity::Debug; |
| +#endif |
| + armnn::ConfigureLogging(true, true, level); |
| + armnnUtils::ConfigureLogging(boost::log::core::get().get(), true, true, level); |
| + |
| + std::string testCasesFile; |
| + |
| + std::string modelFormat = "tensorflow-binary"; |
| + std::string modelPath = "/usr/share/arm/armnn/models/mobilenet_v1_1.0_224_frozen.pb"; |
| + std::string inputName = "input"; |
| + std::string inputTensorShapeStr = "1 224 224 3"; |
| + std::string inputTensorDataFilePath = "/usr/share/arm/armnn/testvecs/test2.mp4"; |
| + std::string outputName = "MobilenetV1/Predictions/Reshape_1"; |
| + std::vector<armnn::BackendId> computeDevices = {armnn::Compute::CpuAcc}; |
| + // Catch ctrl-c to ensure a clean exit |
| + signal(SIGABRT, exit); |
| + signal(SIGTERM, exit); |
| + |
| + if (argc == 1) |
| + { |
| + return RunTest(modelFormat, inputTensorShapeStr, computeDevices, |
| + modelPath, inputName, inputTensorDataFilePath, outputName, false, 1000); |
| + } |
| + else |
| + { |
| + size_t subgraphId = 0; |
| + po::options_description desc("Options"); |
| + try |
| + { |
| + desc.add_options() |
| + ("help", "Display usage information") |
| + ("test-cases,t", po::value(&testCasesFile), "Path to a CSV file containing test cases to run. " |
| + "If set, further parameters -- with the exception of compute device and concurrency -- will be ignored, " |
| + "as they are expected to be defined in the file for each test in particular.") |
| + ("concurrent,n", po::bool_switch()->default_value(false), |
| + "Whether or not the test cases should be executed in parallel") |
| + ("model-format,f", po::value(&modelFormat), |
| + "caffe-binary, caffe-text, onnx-binary, onnx-text, tflite-binary, tensorflow-binary or tensorflow-text.") |
| + ("model-path,m", po::value(&modelPath), "Path to model file, e.g. .caffemodel, .prototxt," |
| + " .tflite, .onnx") |
| + ("compute,c", po::value<std::vector<armnn::BackendId>>()->multitoken(), |
| + "The preferred order of devices to run layers on by default. Possible choices: CpuAcc, CpuRef, GpuAcc") |
| + ("input-name,i", po::value(&inputName), "Identifier of the input tensor in the network.") |
| + ("input-tensor-shape,s", po::value(&inputTensorShapeStr), |
| + "The shape of the input tensor in the network as a flat array of integers separated by whitespace. " |
| + "This parameter is optional, depending on the network.") |
| + ("input-tensor-data,d", po::value(&inputTensorDataFilePath), |
| + "Input test file name. It can be image/video clip file name or use 'camera_live_input' to select camera input.") |
| + ("output-name,o", po::value(&outputName), "Identifier of the output tensor in the network.") |
| + ("event-based-profiling,e", po::bool_switch()->default_value(false), |
| + "Enables built in profiler. If unset, defaults to off.") |
| + ("number_frame", po::value<size_t>(&subgraphId)->default_value(1), "Number of frames to process."); |
| + } |
| + catch (const std::exception& e) |
| + { |
| + // Coverity points out that default_value(...) can throw a bad_lexical_cast, |
| + // and that desc.add_options() can throw boost::io::too_few_args. |
| + // They really won't in any of these cases. |
| + BOOST_ASSERT_MSG(false, "Caught unexpected exception"); |
| + BOOST_LOG_TRIVIAL(fatal) << "Fatal internal error: " << e.what(); |
| + return EXIT_FAILURE; |
| + } |
| + |
| + // Parses the command-line. |
| + po::variables_map vm; |
| + try |
| + { |
| + po::store(po::parse_command_line(argc, argv, desc), vm); |
| + po::notify(vm); |
| + } |
| + catch (const po::error& e) |
| + { |
| + std::cerr << e.what() << std::endl << std::endl; |
| + std::cerr << desc << std::endl; |
| + return EXIT_FAILURE; |
| + } |
| + |
| + // Run single test |
| + // Get the preferred order of compute devices. |
| + std::vector<armnn::BackendId> computeDevices = vm["compute"].as<std::vector<armnn::BackendId>>(); |
| + bool enableProfiling = vm["event-based-profiling"].as<bool>(); |
| + |
| + // Remove duplicates from the list of compute devices. |
| + RemoveDuplicateDevices(computeDevices); |
| + |
| + return RunTest(modelFormat, inputTensorShapeStr, computeDevices, |
| + modelPath, inputName, inputTensorDataFilePath, outputName, enableProfiling, subgraphId); |
| + } |
| +} |
| + |
| -- |
| 1.9.1 |
| |