Loading [MathJax]/extensions/tex2jax.js
  • <xmp id="om0om">
  • <table id="om0om"><noscript id="om0om"></noscript></table>

  • DriveWorks SDK Reference
    5.20.37 Release
    For Test and Development only

    All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
    DNN.h
    Go to the documentation of this file.
    1
    2//
    3// Notice
    4// ALL NVIDIA DESIGN SPECIFICATIONS AND CODE ("MATERIALS") ARE PROVIDED "AS IS" NVIDIA MAKES
    5// NO REPRESENTATIONS, WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
    6// THE MATERIALS, AND EXPRESSLY DISCLAIMS ANY IMPLIED WARRANTIES OF NONINFRINGEMENT,
    7// MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
    8//
    9// NVIDIA CORPORATION & AFFILIATES assumes no responsibility for the consequences of use of such
    10// information or for any infringement of patents or other rights of third parties that may
    11// result from its use. No license is granted by implication or otherwise under any patent
    12// or patent rights of NVIDIA CORPORATION & AFFILIATES. No third party distribution is allowed unless
    13// expressly authorized by NVIDIA. Details are subject to change without notice.
    14// This code supersedes and replaces all information previously supplied.
    15// NVIDIA CORPORATION & AFFILIATES products are not authorized for use as critical
    16// components in life support devices or systems without express written approval of
    17// NVIDIA CORPORATION & AFFILIATES.
    18//
    19// SPDX-FileCopyrightText: Copyright (c) 2016-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
    20// SPDX-License-Identifier: LicenseRef-NvidiaProprietary
    21//
    22// NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
    23// property and proprietary rights in and to this material, related
    24// documentation and any modifications thereto. Any use, reproduction,
    25// disclosure or distribution of this material and related documentation
    26// without an express license agreement from NVIDIA CORPORATION or
    27// its affiliates is strictly prohibited.
    28//
    30
    46#ifndef DW_DNN_H_
    47#define DW_DNN_H_
    48
    50#include <dw/core/base/Config.h>
    51#include <dw/core/base/Status.h>
    55#include <driver_types.h>
    56
    57#ifdef __cplusplus
    58extern "C" {
    59#endif
    60
    63typedef struct dwDNNObject* dwDNNHandle_t;
    64typedef struct dwDNNObject const* dwConstDNNHandle_t;
    65
    68typedef struct
    69{
    72
    112dwStatus dwDNN_initializeTensorRTFromFile(dwDNNHandle_t* const network, const char8_t* const modelFilename,
    113 const dwDNNPluginConfiguration* const pluginConfiguration,
    114 dwProcessorType const processorType, dwContextHandle_t const context);
    115
    138dwStatus dwDNN_initializeTensorRTFromFileWithEngineId(dwDNNHandle_t* const network, const char8_t* const modelFilename,
    139 const dwDNNPluginConfiguration* const pluginConfiguration,
    140 dwProcessorType const processorType, uint32_t engineId,
    141 dwContextHandle_t const context);
    142
    169 const char8_t* const modelContent,
    170 uint32_t const modelContentSize,
    171 const dwDNNPluginConfiguration* const pluginConfiguration,
    172 dwProcessorType const processorType, dwContextHandle_t const context);
    173
    198 const char8_t* const modelContent,
    199 uint32_t const modelContentSize,
    200 const dwDNNPluginConfiguration* const pluginConfiguration,
    201 dwProcessorType const processorType, uint32_t engineId,
    202 dwContextHandle_t const context);
    217
    232
    262dwStatus dwDNN_inferSIO(float32_t* const dOutput, const float32_t* const dInput, uint32_t const batchsize,
    263 dwDNNHandle_t const network);
    264
    292dwStatus dwDNN_inferRaw(float32_t* const* const dOutput, const float32_t* const* const dInput,
    293 uint32_t const batchsize, dwDNNHandle_t const network);
    294
    311dwStatus dwDNN_setCUDAStream(cudaStream_t const stream, dwDNNHandle_t const network);
    312
    327dwStatus dwDNN_getCUDAStream(cudaStream_t* const stream, dwDNNHandle_t const network);
    328
    345dwStatus dwDNN_getInputSize(dwBlobSize* const blobSize, uint32_t const blobIndex, dwDNNHandle_t const network);
    346
    363dwStatus dwDNN_getOutputSize(dwBlobSize* const blobSize, uint32_t const blobIndex, dwDNNHandle_t const network);
    364
    381dwStatus dwDNN_getInputTensorProperties(dwDNNTensorProperties* const tensorProps, uint32_t const blobIndex, dwDNNHandle_t const network);
    382
    399dwStatus dwDNN_getOutputTensorProperties(dwDNNTensorProperties* const tensorProps, uint32_t const blobIndex, dwDNNHandle_t const network);
    400
    415dwStatus dwDNN_getInputBlobCount(uint32_t* const count, dwDNNHandle_t const network);
    416
    430dwStatus dwDNN_getOutputBlobCount(uint32_t* const count, dwDNNHandle_t const network);
    431
    448dwStatus dwDNN_getInputIndex(uint32_t* const blobIndex, const char8_t* const blobName, dwDNNHandle_t const network);
    449
    466dwStatus dwDNN_getOutputIndex(uint32_t* const blobIndex, const char8_t* const blobName, dwDNNHandle_t const network);
    467
    484
    501dwStatus dwDNN_infer(dwDNNTensorHandle_t* const outputTensors, uint32_t const outputTensorCount,
    502 dwConstDNNTensorHandle_t* const inputTensors, uint32_t const inputTensorCount, dwDNNHandle_t const network);
    503
    504#ifdef __cplusplus
    505}
    506#endif
    508#endif // DW_DNN_H_
    NVIDIA DriveWorks API: Core Methods
    NVIDIA DriveWorks: DNN Plugin Interface
    NVIDIA DriveWorks API: Data Conditioner Methods
    dwStatus
    Status definition.
    Definition: ErrorDefs.h:44
    NVIDIA DriveWorks API: Core Status Methods
    NVIDIA DriveWorks API: DNNTensor Structures and Methods
    float float32_t
    Specifies POD types.
    Definition: BasicTypes.h:59
    struct dwContextObject * dwContextHandle_t
    Context handle.
    Definition: Context.h:85
    #define DW_API_PUBLIC
    Definition: Exports.h:56
    dwProcessorType
    Processor type definitions.
    Definition: TypesExtra.h:126
    Holds blob dimensions.
    Definition: TypesExtra.h:212
    Struct representing parameters for DataConditioner.
    dwDataConditionerParams dataConditionerParams
    DataConditioner parameters for running this network.
    Definition: DNN.h:70
    struct dwDNNObject * dwDNNHandle_t
    Handles representing Deep Neural Network interface.
    Definition: DNN.h:63
    DW_API_PUBLIC dwStatus dwDNN_infer(dwDNNTensorHandle_t *const outputTensors, uint32_t const outputTensorCount, dwConstDNNTensorHandle_t *const inputTensors, uint32_t const inputTensorCount, dwDNNHandle_t const network)
    Runs inference pipeline on the given input.
    DW_API_PUBLIC dwStatus dwDNN_getInputBlobCount(uint32_t *const count, dwDNNHandle_t const network)
    Gets the input blob count.
    DW_API_PUBLIC dwStatus dwDNN_getCUDAStream(cudaStream_t *const stream, dwDNNHandle_t const network)
    Gets the CUDA stream used by the feature list.
    DW_API_PUBLIC dwStatus dwDNN_inferRaw(float32_t *const *const dOutput, const float32_t *const *const dInput, uint32_t const batchsize, dwDNNHandle_t const network)
    Forwards pass from all input blobs to all output blobs.
    DW_API_PUBLIC dwStatus dwDNN_getOutputTensorProperties(dwDNNTensorProperties *const tensorProps, uint32_t const blobIndex, dwDNNHandle_t const network)
    Gets the output tensor properties at blobIndex.
    DW_API_PUBLIC dwStatus dwDNN_initializeTensorRTFromFile(dwDNNHandle_t *const network, const char8_t *const modelFilename, const dwDNNPluginConfiguration *const pluginConfiguration, dwProcessorType const processorType, dwContextHandle_t const context)
    Creates and initializes a TensorRT Network from file.
    DW_API_PUBLIC dwStatus dwDNN_initializeTensorRTFromMemory(dwDNNHandle_t *const network, const char8_t *const modelContent, uint32_t const modelContentSize, const dwDNNPluginConfiguration *const pluginConfiguration, dwProcessorType const processorType, dwContextHandle_t const context)
    Creates and initializes a TensorRT Network from memory.
    DW_API_PUBLIC dwStatus dwDNN_inferSIO(float32_t *const dOutput, const float32_t *const dInput, uint32_t const batchsize, dwDNNHandle_t const network)
    Forwards pass from the first input blob to the first output blob (a shortcut for a single input - sin...
    DW_API_PUBLIC dwStatus dwDNN_setCUDAStream(cudaStream_t const stream, dwDNNHandle_t const network)
    Sets the CUDA stream for infer operations.
    DW_API_PUBLIC dwStatus dwDNN_getInputTensorProperties(dwDNNTensorProperties *const tensorProps, uint32_t const blobIndex, dwDNNHandle_t const network)
    Gets the input tensor properties at blobIndex.
    DW_API_PUBLIC dwStatus dwDNN_getOutputSize(dwBlobSize *const blobSize, uint32_t const blobIndex, dwDNNHandle_t const network)
    Gets the output blob size at blobIndex.
    DW_API_PUBLIC dwStatus dwDNN_getMetaData(dwDNNMetaData *const metaData, dwDNNHandle_t const network)
    Returns the metadata for the associated network model.
    DW_API_PUBLIC dwStatus dwDNN_getOutputBlobCount(uint32_t *const count, dwDNNHandle_t const network)
    Gets the output blob count.
    DW_API_PUBLIC dwStatus dwDNN_getInputSize(dwBlobSize *const blobSize, uint32_t const blobIndex, dwDNNHandle_t const network)
    Gets the input blob size at blobIndex.
    DW_API_PUBLIC dwStatus dwDNN_initializeTensorRTFromFileWithEngineId(dwDNNHandle_t *const network, const char8_t *const modelFilename, const dwDNNPluginConfiguration *const pluginConfiguration, dwProcessorType const processorType, uint32_t engineId, dwContextHandle_t const context)
    Creates and initializes a TensorRT Network from file with DLA Engine ID.
    DW_API_PUBLIC dwStatus dwDNN_initializeTensorRTFromMemoryWithEngineId(dwDNNHandle_t *const network, const char8_t *const modelContent, uint32_t const modelContentSize, const dwDNNPluginConfiguration *const pluginConfiguration, dwProcessorType const processorType, uint32_t engineId, dwContextHandle_t const context)
    Creates and initializes a TensorRT Network from memory with DLA Engine ID.
    DW_API_PUBLIC dwStatus dwDNN_getOutputIndex(uint32_t *const blobIndex, const char8_t *const blobName, dwDNNHandle_t const network)
    Gets the index of an output blob with a given blob name.
    struct dwDNNObject const * dwConstDNNHandle_t
    Definition: DNN.h:64
    DW_API_PUBLIC dwStatus dwDNN_reset(dwDNNHandle_t const network)
    Resets a given network.
    DW_API_PUBLIC dwStatus dwDNN_getInputIndex(uint32_t *const blobIndex, const char8_t *const blobName, dwDNNHandle_t const network)
    Gets the index of an input blob with a given blob name.
    DW_API_PUBLIC dwStatus dwDNN_release(dwDNNHandle_t const network)
    Releases a given network.
    Specifies TensorRT model header.
    Definition: DNN.h:69
    Specified plugin configuration.
    Definition: DNNPlugin.h:66
    struct dwDNNTensorObject * dwDNNTensorHandle_t
    Handles representing Deep Neural Network interface.
    Definition: Tensor.h:57
    struct dwDNNTensorObject const * dwConstDNNTensorHandle_t
    Definition: Tensor.h:58
    Specifies DNNTensor properties.
    Definition: Tensor.h:97
    人人超碰97caoporen国产