@ -3,297 +3,280 @@
# define benchmark
# define benchmark
DCSP_CORE : : DCSP_CORE ( )
DCSP_CORE : : DCSP_CORE ( ) {
{
}
}
DCSP_CORE : : ~ DCSP_CORE ( )
DCSP_CORE : : ~ DCSP_CORE ( ) {
{
delete session ;
delete session ;
}
}
# ifdef USE_CUDA
namespace Ort
namespace Ort
{
{
template < >
template < >
struct TypeToTensorType < half > { static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16 ; } ;
struct TypeToTensorType < half > { static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16 ; } ;
}
}
# endif
template < typename T >
template < typename T >
char * BlobFromImage ( cv : : Mat & iImg , T & iBlob )
char * BlobFromImage ( cv : : Mat & iImg , T & iBlob ) {
{
int channels = iImg . channels ( ) ;
int channels = iImg . channels ( ) ;
int imgHeight = iImg . rows ;
int imgHeight = iImg . rows ;
int imgWidth = iImg . cols ;
int imgWidth = iImg . cols ;
for ( int c = 0 ; c < channels ; c + + ) {
for ( int c = 0 ; c < channels ; c + + )
for ( int h = 0 ; h < imgHeight ; h + + ) {
{
for ( int w = 0 ; w < imgWidth ; w + + ) {
for ( int h = 0 ; h < imgHeight ; h + + )
iBlob [ c * imgWidth * imgHeight + h * imgWidth + w ] = typename std : : remove_pointer < T > : : type (
{
( iImg . at < cv : : Vec3b > ( h , w ) [ c ] ) / 255.0f ) ;
for ( int w = 0 ; w < imgWidth ; w + + )
}
{
}
iBlob [ c * imgWidth * imgHeight + h * imgWidth + w ] = typename std : : remove_pointer < T > : : type ( ( iImg . at < cv : : Vec3b > ( h , w ) [ c ] ) / 255.0f ) ;
}
}
return RET_OK ;
}
}
return RET_OK ;
}
}
char * PostProcess ( cv : : Mat & iImg , std : : vector < int > iImgSize , cv : : Mat & oImg )
char * PostProcess ( cv : : Mat & iImg , std : : vector < int > iImgSize , cv : : Mat & oImg ) {
{
cv : : Mat img = iImg . clone ( ) ;
cv : : Mat img = iImg . clone ( ) ;
cv : : resize ( iImg , oImg , cv : : Size ( iImgSize . at ( 0 ) , iImgSize . at ( 1 ) ) ) ;
cv : : resize ( iImg , oImg , cv : : Size ( iImgSize . at ( 0 ) , iImgSize . at ( 1 ) ) ) ;
if ( img . channels ( ) = = 1 )
if ( img . channels ( ) = = 1 ) {
{
cv : : cvtColor ( oImg , oImg , cv : : COLOR_GRAY2BGR ) ;
cv : : cvtColor ( oImg , oImg , cv : : COLOR_GRAY2BGR ) ;
}
}
cv : : cvtColor ( oImg , oImg , cv : : COLOR_BGR2RGB ) ;
cv : : cvtColor ( oImg , oImg , cv : : COLOR_BGR2RGB ) ;
return RET_OK ;
return RET_OK ;
}
}
char * DCSP_CORE : : CreateSession ( DCSP_INIT_PARAM & iParams )
char * DCSP_CORE : : CreateSession ( DCSP_INIT_PARAM & iParams ) {
{
char * Ret = RET_OK ;
char * Ret = RET_OK ;
std : : regex pattern ( " [ \u4e00 - \u9fa5 ] " ) ;
std : : regex pattern ( " [ \u4e00 - \u9fa5 ] " ) ;
bool result = std : : regex_search ( iParams . ModelPath , pattern ) ;
bool result = std : : regex_search ( iParams . ModelPath , pattern ) ;
if ( result ) {
if ( result )
Ret = " [DCSP_ONNX]:Model path error.Change your model path without chinese characters. " ;
{
std : : cout < < Ret < < std : : endl ;
Ret = " [DCSP_ONNX]:Model path error.Change your model path without chinese characters. " ;
return Ret ;
std : : cout < < Ret < < std : : endl ;
}
return Ret ;
try {
}
rectConfidenceThreshold = iParams . RectConfidenceThreshold ;
try
iouThreshold = iParams . iouThreshold ;
{
imgSize = iParams . imgSize ;
rectConfidenceThreshold = iParams . RectConfidenceThreshold ;
modelType = iParams . ModelType ;
iouThreshold = iParams . iouThreshold ;
env = Ort : : Env ( ORT_LOGGING_LEVEL_WARNING , " Yolo " ) ;
imgSize = iParams . imgSize ;
Ort : : SessionOptions sessionOption ;
modelType = iParams . ModelType ;
if ( iParams . CudaEnable ) {
env = Ort : : Env ( ORT_LOGGING_LEVEL_WARNING , " Yolo " ) ;
cudaEnable = iParams . CudaEnable ;
Ort : : SessionOptions sessionOption ;
OrtCUDAProviderOptions cudaOption ;
if ( iParams . CudaEnable )
cudaOption . device_id = 0 ;
{
sessionOption . AppendExecutionProvider_CUDA ( cudaOption ) ;
cudaEnable = iParams . CudaEnable ;
}
OrtCUDAProviderOptions cudaOption ;
sessionOption . SetGraphOptimizationLevel ( GraphOptimizationLevel : : ORT_ENABLE_ALL ) ;
cudaOption . device_id = 0 ;
sessionOption . SetIntraOpNumThreads ( iParams . IntraOpNumThreads ) ;
sessionOption . AppendExecutionProvider_CUDA ( cudaOption ) ;
sessionOption . SetLogSeverityLevel ( iParams . LogSeverityLevel ) ;
}
sessionOption . SetGraphOptimizationLevel ( GraphOptimizationLevel : : ORT_ENABLE_ALL ) ;
sessionOption . SetIntraOpNumThreads ( iParams . IntraOpNumThreads ) ;
sessionOption . SetLogSeverityLevel ( iParams . LogSeverityLevel ) ;
# ifdef _WIN32
# ifdef _WIN32
int ModelPathSize = MultiByteToWideChar ( CP_UTF8 , 0 , iParams . ModelPath . c_str ( ) , static_cast < int > ( iParams . ModelPath . length ( ) ) , nullptr , 0 ) ;
int ModelPathSize = MultiByteToWideChar ( CP_UTF8 , 0 , iParams . ModelPath . c_str ( ) , static_cast < int > ( iParams . ModelPath . length ( ) ) , nullptr , 0 ) ;
wchar_t * wide_cstr = new wchar_t [ ModelPathSize + 1 ] ;
wchar_t * wide_cstr = new wchar_t [ ModelPathSize + 1 ] ;
MultiByteToWideChar ( CP_UTF8 , 0 , iParams . ModelPath . c_str ( ) , static_cast < int > ( iParams . ModelPath . length ( ) ) , wide_cstr , ModelPathSize ) ;
MultiByteToWideChar ( CP_UTF8 , 0 , iParams . ModelPath . c_str ( ) , static_cast < int > ( iParams . ModelPath . length ( ) ) , wide_cstr , ModelPathSize ) ;
wide_cstr [ ModelPathSize ] = L ' \0 ' ;
wide_cstr [ ModelPathSize ] = L ' \0 ' ;
const wchar_t * modelPath = wide_cstr ;
const wchar_t * modelPath = wide_cstr ;
# else
# else
const char * modelPath = iParams . ModelPath . c_str ( ) ;
const char * modelPath = iParams . ModelPath . c_str ( ) ;
# endif // _WIN32
# endif // _WIN32
session = new Ort : : Session ( env , modelPath , sessionOption ) ;
session = new Ort : : Session ( env , modelPath , sessionOption ) ;
Ort : : AllocatorWithDefaultOptions allocator ;
Ort : : AllocatorWithDefaultOptions allocator ;
size_t inputNodesNum = session - > GetInputCount ( ) ;
size_t inputNodesNum = session - > GetInputCount ( ) ;
for ( size_t i = 0 ; i < inputNodesNum ; i + + )
for ( size_t i = 0 ; i < inputNodesNum ; i + + ) {
{
Ort : : AllocatedStringPtr input_node_name = session - > GetInputNameAllocated ( i , allocator ) ;
Ort : : AllocatedStringPtr input_node_name = session - > GetInputNameAllocated ( i , allocator ) ;
char * temp_buf = new char [ 50 ] ;
char * temp_buf = new char [ 50 ] ;
strcpy ( temp_buf , input_node_name . get ( ) ) ;
strcpy ( temp_buf , input_node_name . get ( ) ) ;
inputNodeNames . push_back ( temp_buf ) ;
inputNodeNames . push_back ( temp_buf ) ;
}
}
size_t OutputNodesNum = session - > GetOutputCount ( ) ;
size_t OutputNodesNum = session - > GetOutputCount ( ) ;
for ( size_t i = 0 ; i < OutputNodesNum ; i + + ) {
for ( size_t i = 0 ; i < OutputNodesNum ; i + + )
Ort : : AllocatedStringPtr output_node_name = session - > GetOutputNameAllocated ( i , allocator ) ;
{
char * temp_buf = new char [ 10 ] ;
Ort : : AllocatedStringPtr output_node_name = session - > GetOutputNameAllocated ( i , allocator ) ;
strcpy ( temp_buf , output_node_name . get ( ) ) ;
char * temp_buf = new char [ 10 ] ;
outputNodeNames . push_back ( temp_buf ) ;
strcpy ( temp_buf , output_node_name . get ( ) ) ;
}
outputNodeNames . push_back ( temp_buf ) ;
options = Ort : : RunOptions { nullptr } ;
}
WarmUpSession ( ) ;
options = Ort : : RunOptions { nullptr } ;
return RET_OK ;
WarmUpSession ( ) ;
}
return RET_OK ;
catch ( const std : : exception & e ) {
}
const char * str1 = " [DCSP_ONNX]: " ;
catch ( const std : : exception & e )
const char * str2 = e . what ( ) ;
{
std : : string result = std : : string ( str1 ) + std : : string ( str2 ) ;
const char * str1 = " [DCSP_ONNX]: " ;
char * merged = new char [ result . length ( ) + 1 ] ;
const char * str2 = e . what ( ) ;
std : : strcpy ( merged , result . c_str ( ) ) ;
std : : string result = std : : string ( str1 ) + std : : string ( str2 ) ;
std : : cout < < merged < < std : : endl ;
char * merged = new char [ result . length ( ) + 1 ] ;
delete [ ] merged ;
std : : strcpy ( merged , result . c_str ( ) ) ;
return " [DCSP_ONNX]:Create session failed. " ;
std : : cout < < merged < < std : : endl ;
}
delete [ ] merged ;
return " [DCSP_ONNX]:Create session failed. " ;
}
}
}
char * DCSP_CORE : : RunSession ( cv : : Mat & iImg , std : : vector < DCSP_RESULT > & oResult )
char * DCSP_CORE : : RunSession ( cv : : Mat & iImg , std : : vector < DCSP_RESULT > & oResult ) {
{
# ifdef benchmark
# ifdef benchmark
clock_t starttime_1 = clock ( ) ;
clock_t starttime_1 = clock ( ) ;
# endif // benchmark
# endif // benchmark
char * Ret = RET_OK ;
char * Ret = RET_OK ;
cv : : Mat processedImg ;
cv : : Mat processedImg ;
PostProcess ( iImg , imgSize , processedImg ) ;
PostProcess ( iImg , imgSize , processedImg ) ;
if ( modelType < 4 )
if ( modelType < 4 ) {
{
float * blob = new float [ processedImg . total ( ) * 3 ] ;
float * blob = new float [ processedImg . total ( ) * 3 ] ;
BlobFromImage ( processedImg , blob ) ;
BlobFromImage ( processedImg , blob ) ;
std : : vector < int64_t > inputNodeDims = { 1 , 3 , imgSize . at ( 0 ) , imgSize . at ( 1 ) } ;
std : : vector < int64_t > inputNodeDims = { 1 , 3 , imgSize . at ( 0 ) , imgSize . at ( 1 ) } ;
TensorProcess ( starttime_1 , iImg , blob , inputNodeDims , oResult ) ;
TensorProcess ( starttime_1 , iImg , blob , inputNodeDims , oResult ) ;
} else {
}
# ifdef USE_CUDA
else
half * blob = new half [ processedImg . total ( ) * 3 ] ;
{
BlobFromImage ( processedImg , blob ) ;
half * blob = new half [ processedImg . total ( ) * 3 ] ;
std : : vector < int64_t > inputNodeDims = { 1 , 3 , imgSize . at ( 0 ) , imgSize . at ( 1 ) } ;
BlobFromImage ( processedImg , blob ) ;
TensorProcess ( starttime_1 , iImg , blob , inputNodeDims , oResult ) ;
std : : vector < int64_t > inputNodeDims = { 1 , 3 , imgSize . at ( 0 ) , imgSize . at ( 1 ) } ;
# endif
TensorProcess ( starttime_1 , iImg , blob , inputNodeDims , oResult ) ;
}
}
return Ret ;
return Ret ;
}
}
template < typename N >
template < typename N >
char * DCSP_CORE : : TensorProcess ( clock_t & starttime_1 , cv : : Mat & iImg , N & blob , std : : vector < int64_t > & inputNodeDims , std : : vector < DCSP_RESULT > & oResult )
char * DCSP_CORE : : TensorProcess ( clock_t & starttime_1 , cv : : Mat & iImg , N & blob , std : : vector < int64_t > & inputNodeDims ,
{
std : : vector < DCSP_RESULT > & oResult ) {
Ort : : Value inputTensor = Ort : : Value : : CreateTensor < typename std : : remove_pointer < N > : : type > ( Ort : : MemoryInfo : : CreateCpu ( OrtDeviceAllocator , OrtMemTypeCPU ) , blob , 3 * imgSize . at ( 0 ) * imgSize . at ( 1 ) , inputNodeDims . data ( ) , inputNodeDims . size ( ) ) ;
Ort : : Value inputTensor = Ort : : Value : : CreateTensor < typename std : : remove_pointer < N > : : type > (
Ort : : MemoryInfo : : CreateCpu ( OrtDeviceAllocator , OrtMemTypeCPU ) , blob , 3 * imgSize . at ( 0 ) * imgSize . at ( 1 ) ,
inputNodeDims . data ( ) , inputNodeDims . size ( ) ) ;
# ifdef benchmark
# ifdef benchmark
clock_t starttime_2 = clock ( ) ;
clock_t starttime_2 = clock ( ) ;
# endif // benchmark
# endif // benchmark
auto outputTensor = session - > Run ( options , inputNodeNames . data ( ) , & inputTensor , 1 , outputNodeNames . data ( ) , outputNodeNames . size ( ) ) ;
auto outputTensor = session - > Run ( options , inputNodeNames . data ( ) , & inputTensor , 1 , outputNodeNames . data ( ) ,
outputNodeNames . size ( ) ) ;
# ifdef benchmark
# ifdef benchmark
clock_t starttime_3 = clock ( ) ;
clock_t starttime_3 = clock ( ) ;
# endif // benchmark
# endif // benchmark
Ort : : TypeInfo typeInfo = outputTensor . front ( ) . GetTypeInfo ( ) ;
Ort : : TypeInfo typeInfo = outputTensor . front ( ) . GetTypeInfo ( ) ;
auto tensor_info = typeInfo . GetTensorTypeAndShapeInfo ( ) ;
auto tensor_info = typeInfo . GetTensorTypeAndShapeInfo ( ) ;
std : : vector < int64_t > outputNodeDims = tensor_info . GetShape ( ) ;
std : : vector < int64_t > outputNodeDims = tensor_info . GetShape ( ) ;
auto output = outputTensor . front ( ) . GetTensorMutableData < typename std : : remove_pointer < N > : : type > ( ) ;
auto output = outputTensor . front ( ) . GetTensorMutableData < typename std : : remove_pointer < N > : : type > ( ) ;
delete blob ;
delete blob ;
switch ( modelType )
switch ( modelType ) {
{
case 1 : //V8_ORIGIN_FP32
case 1 : //V8_ORIGIN_FP32
case 4 : //V8_ORIGIN_FP16
case 4 : //V8_ORIGIN_FP16
{
{
int strideNum = outputNodeDims [ 2 ] ;
int strideNum = outputNodeDims [ 2 ] ;
int signalResultNum = outputNodeDims [ 1 ] ;
int signalResultNum = outputNodeDims [ 1 ] ;
std : : vector < int > class_ids ;
std : : vector < int > class_ids ;
std : : vector < float > confidences ;
std : : vector < float > confidences ;
std : : vector < cv : : Rect > boxes ;
std : : vector < cv : : Rect > boxes ;
cv : : Mat rowData ( signalResultNum , strideNum , CV_32F , output ) ;
cv : : Mat rowData ( signalResultNum , strideNum , CV_32F , output ) ;
rowData = rowData . t ( ) ;
rowData = rowData . t ( ) ;
float * data = ( float * ) rowData . data ;
float * data = ( float * ) rowData . data ;
float x_factor = iImg . cols / 640. ;
float x_factor = iImg . cols / 640. ;
float y_factor = iImg . rows / 640. ;
float y_factor = iImg . rows / 640. ;
for ( int i = 0 ; i < strideNum ; + + i ) {
for ( int i = 0 ; i < strideNum ; + + i )
float * classesScores = data + 4 ;
{
cv : : Mat scores ( 1 , this - > classes . size ( ) , CV_32FC1 , classesScores ) ;
float * classesScores = data + 4 ;
cv : : Point class_id ;
cv : : Mat scores ( 1 , this - > classes . size ( ) , CV_32FC1 , classesScores ) ;
double maxClassScore ;
cv : : Point class_id ;
cv : : minMaxLoc ( scores , 0 , & maxClassScore , 0 , & class_id ) ;
double maxClassScore ;
if ( maxClassScore > rectConfidenceThreshold ) {
cv : : minMaxLoc ( scores , 0 , & maxClassScore , 0 , & class_id ) ;
confidences . push_back ( maxClassScore ) ;
if ( maxClassScore > rectConfidenceThreshold )
class_ids . push_back ( class_id . x ) ;
{
confidences . push_back ( maxClassScore ) ;
float x = data [ 0 ] ;
class_ids . push_back ( class_id . x ) ;
float y = data [ 1 ] ;
float w = data [ 2 ] ;
float x = data [ 0 ] ;
float h = data [ 3 ] ;
float y = data [ 1 ] ;
float w = data [ 2 ] ;
int left = int ( ( x - 0.5 * w ) * x_factor ) ;
float h = data [ 3 ] ;
int top = int ( ( y - 0.5 * h ) * y_factor ) ;
int left = int ( ( x - 0.5 * w ) * x_factor ) ;
int width = int ( w * x_factor ) ;
int top = int ( ( y - 0.5 * h ) * y_factor ) ;
int height = int ( h * y_factor ) ;
int width = int ( w * x_factor ) ;
boxes . emplace_back ( left , top , width , height ) ;
int height = int ( h * y_factor ) ;
}
data + = signalResultNum ;
boxes . emplace_back ( left , top , width , height ) ;
}
}
data + = signalResultNum ;
std : : vector < int > nmsResult ;
}
cv : : dnn : : NMSBoxes ( boxes , confidences , rectConfidenceThreshold , iouThreshold , nmsResult ) ;
std : : vector < int > nmsResult ;
for ( int i = 0 ; i < nmsResult . size ( ) ; + + i ) {
cv : : dnn : : NMSBoxes ( boxes , confidences , rectConfidenceThreshold , iouThreshold , nmsResult ) ;
int idx = nmsResult [ i ] ;
DCSP_RESULT result ;
for ( int i = 0 ; i < nmsResult . size ( ) ; + + i )
result . classId = class_ids [ idx ] ;
{
result . confidence = confidences [ idx ] ;
int idx = nmsResult [ i ] ;
result . box = boxes [ idx ] ;
DCSP_RESULT result ;
oResult . push_back ( result ) ;
result . classId = class_ids [ idx ] ;
}
result . confidence = confidences [ idx ] ;
result . box = boxes [ idx ] ;
oResult . push_back ( result ) ;
}
# ifdef benchmark
# ifdef benchmark
clock_t starttime_4 = clock ( ) ;
clock_t starttime_4 = clock ( ) ;
double pre_process_time = ( double ) ( starttime_2 - starttime_1 ) / CLOCKS_PER_SEC * 1000 ;
double pre_process_time = ( double ) ( starttime_2 - starttime_1 ) / CLOCKS_PER_SEC * 1000 ;
double process_time = ( double ) ( starttime_3 - starttime_2 ) / CLOCKS_PER_SEC * 1000 ;
double process_time = ( double ) ( starttime_3 - starttime_2 ) / CLOCKS_PER_SEC * 1000 ;
double post_process_time = ( double ) ( starttime_4 - starttime_3 ) / CLOCKS_PER_SEC * 1000 ;
double post_process_time = ( double ) ( starttime_4 - starttime_3 ) / CLOCKS_PER_SEC * 1000 ;
if ( cudaEnable )
if ( cudaEnable ) {
{
std : : cout < < " [DCSP_ONNX(CUDA)]: " < < pre_process_time < < " ms pre-process, " < < process_time
std : : cout < < " [DCSP_ONNX(CUDA)]: " < < pre_process_time < < " ms pre-process, " < < process_time < < " ms inference, " < < post_process_time < < " ms post-process. " < < std : : endl ;
< < " ms inference, " < < post_process_time < < " ms post-process. " < < std : : endl ;
}
} else {
else
std : : cout < < " [DCSP_ONNX(CPU)]: " < < pre_process_time < < " ms pre-process, " < < process_time
{
< < " ms inference, " < < post_process_time < < " ms post-process. " < < std : : endl ;
std : : cout < < " [DCSP_ONNX(CPU)]: " < < pre_process_time < < " ms pre-process, " < < process_time < < " ms inference, " < < post_process_time < < " ms post-process. " < < std : : endl ;
}
}
# endif // benchmark
# endif // benchmark
break ;
break ;
}
}
}
}
return RET_OK ;
return RET_OK ;
}
}
char * DCSP_CORE : : WarmUpSession ( )
char * DCSP_CORE : : WarmUpSession ( ) {
{
clock_t starttime_1 = clock ( ) ;
clock_t starttime_1 = clock ( ) ;
cv : : Mat iImg = cv : : Mat ( cv : : Size ( imgSize . at ( 0 ) , imgSize . at ( 1 ) ) , CV_8UC3 ) ;
cv : : Mat iImg = cv : : Mat ( cv : : Size ( imgSize . at ( 0 ) , imgSize . at ( 1 ) ) , CV_8UC3 ) ;
cv : : Mat processedImg ;
cv : : Mat processedImg ;
PostProcess ( iImg , imgSize , processedImg ) ;
PostProcess ( iImg , imgSize , processedImg ) ;
if ( modelType < 4 ) {
if ( modelType < 4 )
float * blob = new float [ iImg . total ( ) * 3 ] ;
{
BlobFromImage ( processedImg , blob ) ;
float * blob = new float [ iImg . total ( ) * 3 ] ;
std : : vector < int64_t > YOLO_input_node_dims = { 1 , 3 , imgSize . at ( 0 ) , imgSize . at ( 1 ) } ;
BlobFromImage ( processedImg , blob ) ;
Ort : : Value input_tensor = Ort : : Value : : CreateTensor < float > (
std : : vector < int64_t > YOLO_input_node_dims = { 1 , 3 , imgSize . at ( 0 ) , imgSize . at ( 1 ) } ;
Ort : : MemoryInfo : : CreateCpu ( OrtDeviceAllocator , OrtMemTypeCPU ) , blob , 3 * imgSize . at ( 0 ) * imgSize . at ( 1 ) ,
Ort : : Value input_tensor = Ort : : Value : : CreateTensor < float > ( Ort : : MemoryInfo : : CreateCpu ( OrtDeviceAllocator , OrtMemTypeCPU ) , blob , 3 * imgSize . at ( 0 ) * imgSize . at ( 1 ) , YOLO_input_node_dims . data ( ) , YOLO_input_node_dims . size ( ) ) ;
YOLO_input_node_dims . data ( ) , YOLO_input_node_dims . size ( ) ) ;
auto output_tensors = session - > Run ( options , inputNodeNames . data ( ) , & input_tensor , 1 , outputNodeNames . data ( ) , outputNodeNames . size ( ) ) ;
auto output_tensors = session - > Run ( options , inputNodeNames . data ( ) , & input_tensor , 1 , outputNodeNames . data ( ) ,
delete [ ] blob ;
outputNodeNames . size ( ) ) ;
clock_t starttime_4 = clock ( ) ;
delete [ ] blob ;
double post_process_time = ( double ) ( starttime_4 - starttime_1 ) / CLOCKS_PER_SEC * 1000 ;
clock_t starttime_4 = clock ( ) ;
if ( cudaEnable )
double post_process_time = ( double ) ( starttime_4 - starttime_1 ) / CLOCKS_PER_SEC * 1000 ;
{
if ( cudaEnable ) {
std : : cout < < " [DCSP_ONNX(CUDA)]: " < < " Cuda warm-up cost " < < post_process_time < < " ms. " < < std : : endl ;
std : : cout < < " [DCSP_ONNX(CUDA)]: " < < " Cuda warm-up cost " < < post_process_time < < " ms. " < < std : : endl ;
}
}
}
} else {
else
# ifdef USE_CUDA
{
half * blob = new half [ iImg . total ( ) * 3 ] ;
half * blob = new half [ iImg . total ( ) * 3 ] ;
BlobFromImage ( processedImg , blob ) ;
BlobFromImage ( processedImg , blob ) ;
std : : vector < int64_t > YOLO_input_node_dims = { 1 , 3 , imgSize . at ( 0 ) , imgSize . at ( 1 ) } ;
std : : vector < int64_t > YOLO_input_node_dims = { 1 , 3 , imgSize . at ( 0 ) , imgSize . at ( 1 ) } ;
Ort : : Value input_tensor = Ort : : Value : : CreateTensor < half > ( Ort : : MemoryInfo : : CreateCpu ( OrtDeviceAllocator , OrtMemTypeCPU ) , blob , 3 * imgSize . at ( 0 ) * imgSize . at ( 1 ) , YOLO_input_node_dims . data ( ) , YOLO_input_node_dims . size ( ) ) ;
Ort : : Value input_tensor = Ort : : Value : : CreateTensor < half > ( Ort : : MemoryInfo : : CreateCpu ( OrtDeviceAllocator , OrtMemTypeCPU ) , blob , 3 * imgSize . at ( 0 ) * imgSize . at ( 1 ) , YOLO_input_node_dims . data ( ) , YOLO_input_node_dim s. size ( ) ) ;
auto output_tensors = session - > Run ( options , inputNodeNames . data ( ) , & input_tensor , 1 , outputNodeNames . data ( ) , outputNodeName s. size ( ) ) ;
auto output_tensors = session - > Run ( options , inputNodeNames . data ( ) , & input_tensor , 1 , outputNodeNames . data ( ) , outputNodeNames . size ( ) ) ;
delete [ ] blob ;
delete [ ] blob ;
clock_t starttime_4 = clock ( ) ;
clock_t starttime_4 = clock ( ) ;
double post_process_time = ( double ) ( starttime_4 - starttime_1 ) / CLOCKS_PER_SEC * 1000 ;
double post_process_time = ( double ) ( starttime_4 - starttime_1 ) / CLOCKS_PER_SEC * 1000 ;
if ( cudaEnable )
if ( cudaEnable )
{
{
std : : cout < < " [DCSP_ONNX(CUDA)]: " < < " Cuda warm-up cost " < < post_process_time < < " ms. " < < std : : endl ;
std : : cout < < " [DCSP_ONNX(CUDA)]: " < < " Cuda warm-up cost " < < post_process_time < < " ms. " < < std : : endl ;
}
}
# endif
}
}
return RET_OK ;
return RET_OK ;
}
}