Loading [MathJax]/extensions/tex2jax.js
  • <xmp id="om0om">
  • <table id="om0om"><noscript id="om0om"></noscript></table>

  • DriveWorks SDK Reference
    5.6.215 Release
    For Test and Development only

    All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
    DNN.h
    Go to the documentation of this file.
    1
    2//
    3// Notice
    4// ALL NVIDIA DESIGN SPECIFICATIONS AND CODE ("MATERIALS") ARE PROVIDED "AS IS" NVIDIA MAKES
    5// NO REPRESENTATIONS, WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
    6// THE MATERIALS, AND EXPRESSLY DISCLAIMS ANY IMPLIED WARRANTIES OF NONINFRINGEMENT,
    7// MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
    8//
    9// NVIDIA CORPORATION & AFFILIATES assumes no responsibility for the consequences of use of such
    10// information or for any infringement of patents or other rights of third parties that may
    11// result from its use. No license is granted by implication or otherwise under any patent
    12// or patent rights of NVIDIA CORPORATION & AFFILIATES. No third party distribution is allowed unless
    13// expressly authorized by NVIDIA. Details are subject to change without notice.
    14// This code supersedes and replaces all information previously supplied.
    15// NVIDIA CORPORATION & AFFILIATES products are not authorized for use as critical
    16// components in life support devices or systems without express written approval of
    17// NVIDIA CORPORATION & AFFILIATES.
    18//
    19// SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
    20// SPDX-License-Identifier: LicenseRef-NvidiaProprietary
    21//
    22// NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
    23// property and proprietary rights in and to this material, related
    24// documentation and any modifications thereto. Any use, reproduction,
    25// disclosure or distribution of this material and related documentation
    26// without an express license agreement from NVIDIA CORPORATION or
    27// its affiliates is strictly prohibited.
    28//
    30
    46#ifndef DW_DNN_H_
    47#define DW_DNN_H_
    48
    50#include <dw/core/Config.h>
    51#include <dw/core/base/Status.h>
    54#include <driver_types.h>
    55
    56#ifdef __cplusplus
    57extern "C" {
    58#endif
    59
    62typedef struct dwDNNObject* dwDNNHandle_t;
    63typedef struct dwDNNObject const* dwConstDNNHandle_t;
    64
    67typedef struct
    68{
    71
    74typedef struct
    75{
    76 const char8_t* pluginLibraryPath;
    77 const char8_t* layerName;
    79
    82typedef struct
    83{
    87
    125dwStatus dwDNN_initializeTensorRTFromFile(dwDNNHandle_t* const network, const char8_t* const modelFilename,
    126 const dwDNNPluginConfiguration* const pluginConfiguration,
    127 dwProcessorType const processorType, dwContextHandle_t const context);
    128
    147dwStatus dwDNN_initializeTensorRTFromFileWithEngineId(dwDNNHandle_t* const network, const char8_t* const modelFilename,
    148 const dwDNNPluginConfiguration* const pluginConfiguration,
    149 dwProcessorType const processorType, uint32_t engineId,
    150 dwContextHandle_t const context);
    151
    174 const char8_t* const modelContent,
    175 uint32_t const modelContentSize,
    176 const dwDNNPluginConfiguration* const pluginConfiguration,
    177 dwProcessorType const processorType, dwContextHandle_t const context);
    178
    199 const char8_t* const modelContent,
    200 uint32_t const modelContentSize,
    201 const dwDNNPluginConfiguration* const pluginConfiguration,
    202 dwProcessorType const processorType, uint32_t engineId,
    203 dwContextHandle_t const context);
    214
    225
    251dwStatus dwDNN_inferSIO(float32_t* const dOutput, const float32_t* const dInput, uint32_t const batchsize,
    252 dwDNNHandle_t const network);
    253
    277dwStatus dwDNN_inferRaw(float32_t* const* const dOutput, const float32_t* const* const dInput,
    278 uint32_t const batchsize, dwDNNHandle_t const network);
    279
    292dwStatus dwDNN_setCUDAStream(cudaStream_t const stream, dwDNNHandle_t const network);
    293
    304dwStatus dwDNN_getCUDAStream(cudaStream_t* const stream, dwDNNHandle_t const network);
    305
    318dwStatus dwDNN_getInputSize(dwBlobSize* const blobSize, uint32_t const blobIndex, dwDNNHandle_t const network);
    319
    332dwStatus dwDNN_getOutputSize(dwBlobSize* const blobSize, uint32_t const blobIndex, dwDNNHandle_t const network);
    333
    346dwStatus dwDNN_getInputTensorProperties(dwDNNTensorProperties* const tensorProps, uint32_t const blobIndex, dwDNNHandle_t const network);
    347
    360dwStatus dwDNN_getOutputTensorProperties(dwDNNTensorProperties* const tensorProps, uint32_t const blobIndex, dwDNNHandle_t const network);
    361
    372dwStatus dwDNN_getInputBlobCount(uint32_t* const count, dwDNNHandle_t const network);
    373
    383dwStatus dwDNN_getOutputBlobCount(uint32_t* const count, dwDNNHandle_t const network);
    384
    397dwStatus dwDNN_getInputIndex(uint32_t* const blobIndex, const char8_t* const blobName, dwDNNHandle_t const network);
    398
    411dwStatus dwDNN_getOutputIndex(uint32_t* const blobIndex, const char8_t* const blobName, dwDNNHandle_t const network);
    412
    425
    438dwStatus dwDNN_infer(dwDNNTensorHandle_t* const outputTensors, uint32_t const outputTensorCount,
    439 dwConstDNNTensorHandle_t* const inputTensors, uint32_t const inputTensorCount, dwDNNHandle_t const network);
    440
    441#ifdef __cplusplus
    442}
    443#endif
    445#endif // DW_DNN_H_
    NVIDIA DriveWorks API: Data Conditioner Methods
    NVIDIA DriveWorks API: Core Status Methods
    NVIDIA DriveWorks API: Core Methods
    struct dwContextObject * dwContextHandle_t
    Context handle.
    Definition: Context.h:79
    #define DW_API_PUBLIC
    Definition: Exports.h:54
    dwStatus
    Status definition.
    Definition: Status.h:170
    dwProcessorType
    Processor type definitions.
    Definition: Types.h:168
    float float32_t
    Specifies POD types.
    Definition: Types.h:70
    Holds blob dimensions.
    Definition: Types.h:662
    const char8_t * pluginLibraryPath
    Path to a plugin shared object.
    Definition: DNN.h:76
    size_t numCustomLayers
    Number of custom layers.
    Definition: DNN.h:85
    const dwDNNCustomLayer * customLayers
    Array of custom layers.
    Definition: DNN.h:84
    dwDataConditionerParams dataConditionerParams
    DataConditioner parameters for running this network.
    Definition: DNN.h:69
    const char8_t * layerName
    Name of the custom layer.
    Definition: DNN.h:77
    struct dwDNNObject * dwDNNHandle_t
    Handles representing Deep Neural Network interface.
    Definition: DNN.h:62
    DW_API_PUBLIC dwStatus dwDNN_infer(dwDNNTensorHandle_t *const outputTensors, uint32_t const outputTensorCount, dwConstDNNTensorHandle_t *const inputTensors, uint32_t const inputTensorCount, dwDNNHandle_t const network)
    Runs inference pipeline on the given input.
    DW_API_PUBLIC dwStatus dwDNN_getInputBlobCount(uint32_t *const count, dwDNNHandle_t const network)
    Gets the input blob count.
    DW_API_PUBLIC dwStatus dwDNN_getCUDAStream(cudaStream_t *const stream, dwDNNHandle_t const network)
    Gets the CUDA stream used by the feature list.
    DW_API_PUBLIC dwStatus dwDNN_inferRaw(float32_t *const *const dOutput, const float32_t *const *const dInput, uint32_t const batchsize, dwDNNHandle_t const network)
    Forwards pass from all input blobs to all output blobs.
    DW_API_PUBLIC dwStatus dwDNN_getOutputTensorProperties(dwDNNTensorProperties *const tensorProps, uint32_t const blobIndex, dwDNNHandle_t const network)
    Gets the output tensor properties at blobIndex.
    DW_API_PUBLIC dwStatus dwDNN_initializeTensorRTFromFile(dwDNNHandle_t *const network, const char8_t *const modelFilename, const dwDNNPluginConfiguration *const pluginConfiguration, dwProcessorType const processorType, dwContextHandle_t const context)
    Creates and initializes a TensorRT Network from file.
    DW_API_PUBLIC dwStatus dwDNN_initializeTensorRTFromMemory(dwDNNHandle_t *const network, const char8_t *const modelContent, uint32_t const modelContentSize, const dwDNNPluginConfiguration *const pluginConfiguration, dwProcessorType const processorType, dwContextHandle_t const context)
    Creates and initializes a TensorRT Network from memory.
    DW_API_PUBLIC dwStatus dwDNN_inferSIO(float32_t *const dOutput, const float32_t *const dInput, uint32_t const batchsize, dwDNNHandle_t const network)
    Forwards pass from the first input blob to the first output blob (a shortcut for a single input - sin...
    DW_API_PUBLIC dwStatus dwDNN_setCUDAStream(cudaStream_t const stream, dwDNNHandle_t const network)
    Sets the CUDA stream for infer operations.
    DW_API_PUBLIC dwStatus dwDNN_getInputTensorProperties(dwDNNTensorProperties *const tensorProps, uint32_t const blobIndex, dwDNNHandle_t const network)
    Gets the input tensor properties at blobIndex.
    DW_API_PUBLIC dwStatus dwDNN_getOutputSize(dwBlobSize *const blobSize, uint32_t const blobIndex, dwDNNHandle_t const network)
    Gets the output blob size at blobIndex.
    DW_API_PUBLIC dwStatus dwDNN_getMetaData(dwDNNMetaData *const metaData, dwDNNHandle_t const network)
    Returns the metadata for the associated network model.
    DW_API_PUBLIC dwStatus dwDNN_getOutputBlobCount(uint32_t *const count, dwDNNHandle_t const network)
    Gets the output blob count.
    DW_API_PUBLIC dwStatus dwDNN_getInputSize(dwBlobSize *const blobSize, uint32_t const blobIndex, dwDNNHandle_t const network)
    Gets the input blob size at blobIndex.
    DW_API_PUBLIC dwStatus dwDNN_initializeTensorRTFromFileWithEngineId(dwDNNHandle_t *const network, const char8_t *const modelFilename, const dwDNNPluginConfiguration *const pluginConfiguration, dwProcessorType const processorType, uint32_t engineId, dwContextHandle_t const context)
    Creates and initializes a TensorRT Network from file with DLA Engine ID.
    DW_API_PUBLIC dwStatus dwDNN_initializeTensorRTFromMemoryWithEngineId(dwDNNHandle_t *const network, const char8_t *const modelContent, uint32_t const modelContentSize, const dwDNNPluginConfiguration *const pluginConfiguration, dwProcessorType const processorType, uint32_t engineId, dwContextHandle_t const context)
    Creates and initializes a TensorRT Network from memory with DLA Engine ID.
    DW_API_PUBLIC dwStatus dwDNN_getOutputIndex(uint32_t *const blobIndex, const char8_t *const blobName, dwDNNHandle_t const network)
    Gets the index of an output blob with a given blob name.
    struct dwDNNObject const * dwConstDNNHandle_t
    Definition: DNN.h:63
    DW_API_PUBLIC dwStatus dwDNN_reset(dwDNNHandle_t const network)
    Resets a given network.
    DW_API_PUBLIC dwStatus dwDNN_getInputIndex(uint32_t *const blobIndex, const char8_t *const blobName, dwDNNHandle_t const network)
    Gets the index of an input blob with a given blob name.
    DW_API_PUBLIC dwStatus dwDNN_release(dwDNNHandle_t const network)
    Releases a given network.
    Specifies plugin configuration.
    Definition: DNN.h:75
    Specifies TensorRT model header.
    Definition: DNN.h:68
    Specified plugin configuration.
    Definition: DNN.h:83
    struct dwDNNTensorObject * dwDNNTensorHandle_t
    Handles representing Deep Neural Network interface.
    Definition: Tensor.h:57
    struct dwDNNTensorObject const * dwConstDNNTensorHandle_t
    Definition: Tensor.h:58
    Specifies DNNTensor properties.
    Definition: Tensor.h:99
    NVIDIA DriveWorks API: DNNTensor Structures and Methods
    人人超碰97caoporen国产