增加MNN模型支持
This commit is contained in:
@ -6,8 +6,7 @@
|
||||
#include <napi.h>
|
||||
|
||||
#define NODE_INIT_OBJECT(name, function) \
|
||||
do \
|
||||
{ \
|
||||
do { \
|
||||
auto obj = Napi::Object::New(env); \
|
||||
function(env, obj); \
|
||||
exports.Set(Napi::String::New(env, #name), obj); \
|
||||
@ -21,4 +20,13 @@ inline uint64_t __node_ptr_of__(Napi::Value value)
|
||||
|
||||
#define NODE_PTR_OF(type, value) (reinterpret_cast<type *>(__node_ptr_of__(value)))
|
||||
|
||||
|
||||
inline void *dataFromTypedArray(const Napi::Value &val, size_t &bytes)
|
||||
{
|
||||
auto arr = val.As<Napi::TypedArray>();
|
||||
auto data = static_cast<uint8_t *>(arr.ArrayBuffer().Data());
|
||||
bytes = arr.ByteLength();
|
||||
return static_cast<void *>(data + arr.ByteOffset());
|
||||
}
|
||||
|
||||
#endif
|
||||
|
18
cxx/common/tensor.h
Normal file
18
cxx/common/tensor.h
Normal file
@ -0,0 +1,18 @@
|
||||
#ifndef __COMMON_TENSOR_H__
|
||||
#define __COMMON_TENSOR_H__
|
||||
|
||||
enum class TensorDataType {
|
||||
Unknown,
|
||||
Float32,
|
||||
Float64,
|
||||
Int32,
|
||||
Uint32,
|
||||
Int16,
|
||||
Uint16,
|
||||
Int8,
|
||||
Uint8,
|
||||
Int64,
|
||||
Uint64,
|
||||
};
|
||||
|
||||
#endif
|
@ -134,7 +134,7 @@ void InstallOpenCVAPI(Env env, Object exports)
|
||||
CVMat::Init(env, exports);
|
||||
}
|
||||
|
||||
#ifdef USE_OPENCV
|
||||
#if defined(USE_OPENCV) && not defined(BUILD_MAIN_WORD)
|
||||
static Object Init(Env env, Object exports)
|
||||
{
|
||||
InstallOpenCVAPI(env, exports);
|
||||
|
233
cxx/mnn/node.cc
Normal file
233
cxx/mnn/node.cc
Normal file
@ -0,0 +1,233 @@
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
#include <map>
|
||||
#include <cstring>
|
||||
#include <MNN/Interpreter.hpp>
|
||||
#include <MNN/ImageProcess.hpp>
|
||||
#include "common/tensor.h"
|
||||
#include "node.h"
|
||||
|
||||
using namespace Napi;
|
||||
|
||||
#define SESSION_INSTANCE_METHOD(method) InstanceMethod<&MNNSession::method>(#method, static_cast<napi_property_attributes>(napi_writable | napi_configurable))
|
||||
|
||||
static const std::map<TensorDataType, halide_type_t> DATA_TYPE_MAP = {
|
||||
{TensorDataType::Float32, halide_type_of<float>()},
|
||||
{TensorDataType::Float64, halide_type_of<double>()},
|
||||
{TensorDataType::Int32, halide_type_of<int32_t>()},
|
||||
{TensorDataType::Uint32, halide_type_of<uint32_t>()},
|
||||
{TensorDataType::Int16, halide_type_of<int16_t>()},
|
||||
{TensorDataType::Uint16, halide_type_of<uint16_t>()},
|
||||
{TensorDataType::Int8, halide_type_of<int8_t>()},
|
||||
{TensorDataType::Uint8, halide_type_of<uint8_t>()},
|
||||
{TensorDataType::Int64, halide_type_of<int64_t>()},
|
||||
{TensorDataType::Uint64, halide_type_of<uint64_t>()},
|
||||
};
|
||||
|
||||
static size_t getShapeSize(const std::vector<int> &shape)
|
||||
{
|
||||
if (!shape.size()) return 0;
|
||||
size_t sum = 1;
|
||||
for (auto i : shape) {
|
||||
if (i > 1) sum *= i;
|
||||
};
|
||||
return sum;
|
||||
}
|
||||
|
||||
class MNNSessionRunWorker : public AsyncWorker {
|
||||
public:
|
||||
MNNSessionRunWorker(const Napi::Function &callback, MNN::Interpreter *interpreter, MNN::Session *session)
|
||||
: AsyncWorker(callback), interpreter_(interpreter), session_(session) {}
|
||||
|
||||
~MNNSessionRunWorker()
|
||||
{
|
||||
interpreter_->releaseSession(session_);
|
||||
}
|
||||
|
||||
void Execute()
|
||||
{
|
||||
interpreter_->resizeSession(session_);
|
||||
if (MNN::ErrorCode::NO_ERROR != interpreter_->runSession(session_)) {
|
||||
SetError(std::string("Run session failed"));
|
||||
}
|
||||
}
|
||||
|
||||
void OnOK()
|
||||
{
|
||||
if (HasError()) {
|
||||
Callback().Call({Error::New(Env(), errorMessage_.c_str()).Value(), Env().Undefined()});
|
||||
}
|
||||
else {
|
||||
auto result = Object::New(Env());
|
||||
for (auto it : interpreter_->getSessionOutputAll(session_)) {
|
||||
auto tensor = it.second;
|
||||
auto buffer = ArrayBuffer::New(Env(), tensor->size());
|
||||
memcpy(buffer.Data(), tensor->host<float>(), tensor->size());
|
||||
result.Set(it.first, buffer);
|
||||
}
|
||||
Callback().Call({Env().Undefined(), result});
|
||||
}
|
||||
}
|
||||
|
||||
void SetInput(const std::string &name, TensorDataType dataType, const std::vector<int> &shape, void *data, size_t dataBytes)
|
||||
{
|
||||
auto tensor = interpreter_->getSessionInput(session_, name.c_str());
|
||||
if (!tensor) {
|
||||
SetError(std::string("input name #" + name + " not exists"));
|
||||
return;
|
||||
}
|
||||
|
||||
halide_type_t type = tensor->getType();
|
||||
if (dataType != TensorDataType::Unknown) {
|
||||
auto it = DATA_TYPE_MAP.find(dataType);
|
||||
if (it != DATA_TYPE_MAP.end()) type = it->second;
|
||||
}
|
||||
|
||||
if (shape.size()) interpreter_->resizeTensor(tensor, shape);
|
||||
|
||||
auto tensorBytes = getShapeSize(tensor->shape()) * type.bits / 8;
|
||||
if (tensorBytes != dataBytes) {
|
||||
SetError(std::string("input name #" + name + " data size not matched"));
|
||||
return;
|
||||
}
|
||||
|
||||
auto hostTensor = MNN::Tensor::create(tensor->shape(), type, data, MNN::Tensor::CAFFE);
|
||||
tensor->copyFromHostTensor(hostTensor);
|
||||
delete hostTensor;
|
||||
}
|
||||
|
||||
inline void SetError(const std::string &what) { errorMessage_ = what; }
|
||||
inline bool HasError() { return errorMessage_.size() > 0; }
|
||||
|
||||
private:
|
||||
MNN::Interpreter *interpreter_;
|
||||
MNN::Session *session_;
|
||||
std::string errorMessage_;
|
||||
};
|
||||
|
||||
class MNNSession : public ObjectWrap<MNNSession> {
|
||||
public:
|
||||
static Napi::Object Init(Napi::Env env, Napi::Object exports)
|
||||
{
|
||||
Function func = DefineClass(env, "MNNSession", {
|
||||
SESSION_INSTANCE_METHOD(GetInputsInfo),
|
||||
SESSION_INSTANCE_METHOD(GetOutputsInfo),
|
||||
SESSION_INSTANCE_METHOD(Run),
|
||||
});
|
||||
FunctionReference *constructor = new FunctionReference();
|
||||
*constructor = Napi::Persistent(func);
|
||||
exports.Set("MNNSession", func);
|
||||
env.SetInstanceData<FunctionReference>(constructor);
|
||||
return exports;
|
||||
}
|
||||
|
||||
MNNSession(const CallbackInfo &info)
|
||||
: ObjectWrap(info)
|
||||
{
|
||||
try {
|
||||
if (info[0].IsString()) {
|
||||
interpreter_ = MNN::Interpreter::createFromFile(info[0].As<String>().Utf8Value().c_str());
|
||||
}
|
||||
else if (info[0].IsTypedArray()) {
|
||||
size_t bufferBytes;
|
||||
auto buffer = dataFromTypedArray(info[0], bufferBytes);
|
||||
interpreter_ = MNN::Interpreter::createFromBuffer(buffer, bufferBytes);
|
||||
}
|
||||
else interpreter_ = nullptr;
|
||||
|
||||
if (interpreter_) {
|
||||
backendConfig_.precision = MNN::BackendConfig::Precision_High;
|
||||
backendConfig_.power = MNN::BackendConfig::Power_High;
|
||||
scheduleConfig_.type = MNN_FORWARD_CPU;
|
||||
scheduleConfig_.numThread = 1;
|
||||
scheduleConfig_.backendConfig = &backendConfig_;
|
||||
session_ = interpreter_->createSession(scheduleConfig_);
|
||||
}
|
||||
else session_ = nullptr;
|
||||
}
|
||||
catch (std::exception &e) {
|
||||
Error::New(info.Env(), e.what()).ThrowAsJavaScriptException();
|
||||
}
|
||||
}
|
||||
|
||||
~MNNSession() {}
|
||||
|
||||
Napi::Value GetInputsInfo(const Napi::CallbackInfo &info) { return BuildInputOutputInfo(info.Env(), interpreter_->getSessionInputAll(session_)); }
|
||||
|
||||
Napi::Value GetOutputsInfo(const Napi::CallbackInfo &info) { return BuildInputOutputInfo(info.Env(), interpreter_->getSessionOutputAll(session_)); }
|
||||
|
||||
Napi::Value Run(const Napi::CallbackInfo &info)
|
||||
{
|
||||
auto worker = new MNNSessionRunWorker(info[1].As<Function>(), interpreter_, interpreter_->createSession(scheduleConfig_));
|
||||
auto inputArgument = info[0].As<Object>();
|
||||
for (auto it = inputArgument.begin(); it != inputArgument.end(); ++it) {
|
||||
auto name = (*it).first.As<String>().Utf8Value();
|
||||
auto inputOption = static_cast<Napi::Value>((*it).second).As<Object>();
|
||||
auto type = inputOption.Has("type") ? static_cast<TensorDataType>(inputOption.Get("type").As<Number>().Int32Value()) : TensorDataType::Unknown;
|
||||
size_t dataByteLen;
|
||||
void *data = dataFromTypedArray(inputOption.Get("data"), dataByteLen);
|
||||
auto shape = inputOption.Has("shape") ? GetShapeFromJavascript(inputOption.Get("shape").As<Array>()) : std::vector<int>();
|
||||
worker->SetInput(name, type, shape, data, dataByteLen);
|
||||
}
|
||||
worker->Queue();
|
||||
return info.Env().Undefined();
|
||||
}
|
||||
|
||||
private:
|
||||
Napi::Object BuildInputOutputInfo(Napi::Env env, const std::map<std::string, MNN::Tensor *> &tensors)
|
||||
{
|
||||
auto result = Object::New(env);
|
||||
for (auto it : tensors) {
|
||||
auto item = Object::New(env);
|
||||
auto name = it.first;
|
||||
auto shape = it.second->shape();
|
||||
auto type = it.second->getType();
|
||||
TensorDataType dataType = TensorDataType::Unknown;
|
||||
for (auto dt : DATA_TYPE_MAP) {
|
||||
if (dt.second == type) {
|
||||
dataType = dt.first;
|
||||
break;
|
||||
}
|
||||
}
|
||||
auto shapeArr = Array::New(env, shape.size());
|
||||
for (size_t i = 0; i < shape.size(); i++) {
|
||||
shapeArr.Set(i, Number::New(env, shape[i]));
|
||||
}
|
||||
item.Set("name", String::New(env, name));
|
||||
item.Set("shape", shapeArr);
|
||||
item.Set("type", Number::New(env, static_cast<int>(dataType)));
|
||||
result.Set(name, item);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
std::vector<int> GetShapeFromJavascript(const Napi::Array &shape)
|
||||
{
|
||||
std::vector<int> result;
|
||||
for (size_t i = 0; i < shape.Length(); i++) {
|
||||
result.push_back(shape.Get(i).As<Number>().Int32Value());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
private:
|
||||
MNN::Interpreter *interpreter_;
|
||||
MNN::Session *session_;
|
||||
MNN::BackendConfig backendConfig_;
|
||||
MNN::ScheduleConfig scheduleConfig_;
|
||||
};
|
||||
|
||||
void InstallMNNAPI(Napi::Env env, Napi::Object exports)
|
||||
{
|
||||
MNNSession::Init(env, exports);
|
||||
}
|
||||
|
||||
|
||||
#if defined(USE_MNN) && not defined(BUILD_MAIN_WORD)
|
||||
static Object Init(Env env, Object exports)
|
||||
{
|
||||
InstallMNNAPI(env, exports);
|
||||
return exports;
|
||||
}
|
||||
NODE_API_MODULE(addon, Init)
|
||||
#endif
|
8
cxx/mnn/node.h
Normal file
8
cxx/mnn/node.h
Normal file
@ -0,0 +1,8 @@
|
||||
#ifndef __MNN_NODE_H__
|
||||
#define __MNN_NODE_H__
|
||||
|
||||
#include "common/node.h"
|
||||
|
||||
void InstallMNNAPI(Napi::Env env, Napi::Object exports);
|
||||
|
||||
#endif
|
@ -1,19 +0,0 @@
|
||||
#ifndef __MNN_SESSION_H__
|
||||
#define __MNN_SESSION_H__
|
||||
|
||||
#include <MNN/Interpreter.hpp>
|
||||
#include <MNN/ImageProcess.hpp>
|
||||
|
||||
#include "common/session.h"
|
||||
|
||||
namespace ai
|
||||
{
|
||||
class MNNSession : public Session
|
||||
{
|
||||
public:
|
||||
MNNSession(const void *modelData, size_t size);
|
||||
~MNNSession();
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
87
cxx/node.cc
87
cxx/node.cc
@ -1,65 +1,28 @@
|
||||
// #include <unistd.h>
|
||||
// #include <napi.h>
|
||||
// #include "cv/node.h"
|
||||
// #ifdef USE_ORT
|
||||
// #include "ort/node.h"
|
||||
// #endif
|
||||
#include "common/node.h"
|
||||
#include "cv/node.h"
|
||||
#include "mnn/node.h"
|
||||
#include "ort/node.h"
|
||||
|
||||
// using namespace Napi;
|
||||
using namespace Napi;
|
||||
|
||||
// class TestWork : public AsyncWorker
|
||||
// {
|
||||
// public:
|
||||
// TestWork(const Napi::Function &callback, int value) : Napi::AsyncWorker(callback), val_(value) {}
|
||||
// ~TestWork() {}
|
||||
#if defined(BUILD_MAIN_WORD)
|
||||
Object Init(Env env, Object exports)
|
||||
{
|
||||
// OpenCV
|
||||
#ifdef USE_OPENCV
|
||||
printf("use opencv\n");
|
||||
InstallOpenCVAPI(env, exports);
|
||||
#endif
|
||||
// OnnxRuntime
|
||||
#ifdef USE_ONNXRUNTIME
|
||||
InstallOrtAPI(env, exports);
|
||||
#endif
|
||||
// MNN
|
||||
#ifdef USE_MNN
|
||||
InstallMNNAPI(env, exports);
|
||||
#endif
|
||||
|
||||
// void Execute()
|
||||
// {
|
||||
// printf("the worker-thread doing! %d \n", val_);
|
||||
// sleep(3);
|
||||
// printf("the worker-thread done! %d \n", val_);
|
||||
// }
|
||||
|
||||
// void OnOK()
|
||||
// {
|
||||
// Callback().Call({Env().Undefined(), Number::New(Env(), 0)});
|
||||
// }
|
||||
|
||||
// private:
|
||||
// int val_;
|
||||
// };
|
||||
|
||||
// Value test(const CallbackInfo &info)
|
||||
// {
|
||||
// // ai::ORTSession(nullptr, 0);
|
||||
|
||||
// // Function callback = info[1].As<Function>();
|
||||
// // TestWork *work = new TestWork(callback, info[0].As<Number>().Int32Value());
|
||||
// // work->Queue();
|
||||
// return info.Env().Undefined();
|
||||
// }
|
||||
|
||||
// Object Init(Env env, Object exports)
|
||||
// {
|
||||
// //OpenCV
|
||||
// NODE_INIT_OBJECT(cv, InstallOpenCVAPI);
|
||||
// //OnnxRuntime
|
||||
// #ifdef USE_ORT
|
||||
// NODE_INIT_OBJECT(ort, InstallOrtAPI);
|
||||
// #endif
|
||||
|
||||
// Napi::Number::New(env, 0);
|
||||
|
||||
// #define ADD_FUNCTION(name) exports.Set(Napi::String::New(env, #name), Napi::Function::New(env, name))
|
||||
// // ADD_FUNCTION(facedetPredict);
|
||||
// // ADD_FUNCTION(facedetRelease);
|
||||
|
||||
// // ADD_FUNCTION(faceRecognitionCreate);
|
||||
// // ADD_FUNCTION(faceRecognitionPredict);
|
||||
// // ADD_FUNCTION(faceRecognitionRelease);
|
||||
|
||||
// // ADD_FUNCTION(getDistance);
|
||||
// #undef ADD_FUNCTION
|
||||
// return exports;
|
||||
// }
|
||||
// NODE_API_MODULE(addon, Init)
|
||||
return exports;
|
||||
}
|
||||
NODE_API_MODULE(addon, Init)
|
||||
#endif
|
100
cxx/ort/node.cc
100
cxx/ort/node.cc
@ -2,6 +2,7 @@
|
||||
#include <vector>
|
||||
#include <onnxruntime_cxx_api.h>
|
||||
#include "node.h"
|
||||
#include "common/tensor.h"
|
||||
|
||||
#ifdef WIN32
|
||||
#include <locale>
|
||||
@ -13,51 +14,31 @@ using namespace Napi;
|
||||
|
||||
#define SESSION_INSTANCE_METHOD(method) InstanceMethod<&OrtSession::method>(#method, static_cast<napi_property_attributes>(napi_writable | napi_configurable))
|
||||
|
||||
static ONNXTensorElementDataType getDataTypeFromString(const std::string &name)
|
||||
{
|
||||
static const std::map<std::string, ONNXTensorElementDataType> dataTypeNameMap = {
|
||||
{"float32", ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT},
|
||||
{"float", ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT},
|
||||
{"float64", ONNX_TENSOR_ELEMENT_DATA_TYPE_DOUBLE},
|
||||
{"double", ONNX_TENSOR_ELEMENT_DATA_TYPE_DOUBLE},
|
||||
{"int8", ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8},
|
||||
{"uint8", ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8},
|
||||
{"int16", ONNX_TENSOR_ELEMENT_DATA_TYPE_INT16},
|
||||
{"uint16", ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT16},
|
||||
{"int32", ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32},
|
||||
{"uint32", ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT32},
|
||||
{"int64", ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64},
|
||||
{"uint64", ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT64},
|
||||
};
|
||||
auto it = dataTypeNameMap.find(name);
|
||||
return (it == dataTypeNameMap.end()) ? ONNX_TENSOR_ELEMENT_DATA_TYPE_UNDEFINED : it->second;
|
||||
}
|
||||
static const std::map<TensorDataType, ONNXTensorElementDataType> DATA_TYPE_MAP = {
|
||||
{TensorDataType::Float32, ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT},
|
||||
{TensorDataType::Float64, ONNX_TENSOR_ELEMENT_DATA_TYPE_DOUBLE},
|
||||
{TensorDataType::Int32, ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32},
|
||||
{TensorDataType::Uint32, ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT32},
|
||||
{TensorDataType::Int16, ONNX_TENSOR_ELEMENT_DATA_TYPE_INT16},
|
||||
{TensorDataType::Uint16, ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT16},
|
||||
{TensorDataType::Int8, ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8},
|
||||
{TensorDataType::Uint8, ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8},
|
||||
{TensorDataType::Int64, ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64},
|
||||
{TensorDataType::Uint64, ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT64},
|
||||
};
|
||||
|
||||
static size_t getDataTypeSize(ONNXTensorElementDataType type)
|
||||
{
|
||||
static const std::map<ONNXTensorElementDataType, size_t> dataTypeSizeMap = {
|
||||
{ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT, 4},
|
||||
{ONNX_TENSOR_ELEMENT_DATA_TYPE_DOUBLE, 8},
|
||||
{ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8, 1},
|
||||
{ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8, 1},
|
||||
{ONNX_TENSOR_ELEMENT_DATA_TYPE_INT16, 2},
|
||||
{ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT16, 2},
|
||||
{ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32, 4},
|
||||
{ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT32, 4},
|
||||
{ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64, 8},
|
||||
{ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT64, 8},
|
||||
};
|
||||
auto it = dataTypeSizeMap.find(type);
|
||||
return (it == dataTypeSizeMap.end()) ? 0 : it->second;
|
||||
}
|
||||
|
||||
static void *dataFromTypedArray(const Napi::Value &val, size_t &bytes)
|
||||
{
|
||||
auto arr = val.As<TypedArray>();
|
||||
auto data = static_cast<uint8_t *>(arr.ArrayBuffer().Data());
|
||||
bytes = arr.ByteLength();
|
||||
return static_cast<void *>(data + arr.ByteOffset());
|
||||
}
|
||||
static const std::map<ONNXTensorElementDataType, size_t> DATA_TYPE_SIZE_MAP = {
|
||||
{ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT, 4},
|
||||
{ONNX_TENSOR_ELEMENT_DATA_TYPE_DOUBLE, 8},
|
||||
{ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8, 1},
|
||||
{ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8, 1},
|
||||
{ONNX_TENSOR_ELEMENT_DATA_TYPE_INT16, 2},
|
||||
{ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT16, 2},
|
||||
{ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32, 4},
|
||||
{ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT32, 4},
|
||||
{ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64, 8},
|
||||
{ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT64, 8},
|
||||
};
|
||||
|
||||
class OrtSessionNodeInfo {
|
||||
public:
|
||||
@ -67,7 +48,22 @@ class OrtSessionNodeInfo {
|
||||
inline const std::string &GetName() const { return name_; }
|
||||
inline const std::vector<int64_t> &GetShape() const { return shape_; }
|
||||
inline ONNXTensorElementDataType GetType() const { return type_; }
|
||||
inline size_t GetElementSize() const { return getDataTypeSize(type_); }
|
||||
inline size_t GetElementSize() const
|
||||
{
|
||||
auto it = DATA_TYPE_SIZE_MAP.find(type_);
|
||||
return (it == DATA_TYPE_SIZE_MAP.end()) ? 0 : it->second;
|
||||
}
|
||||
TensorDataType GetDataType() const
|
||||
{
|
||||
auto datatype = TensorDataType::Unknown;
|
||||
for (auto it : DATA_TYPE_MAP) {
|
||||
if (it.second == type_) {
|
||||
datatype = it.first;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return datatype;
|
||||
}
|
||||
size_t GetElementCount() const
|
||||
{
|
||||
if (!shape_.size()) return 0;
|
||||
@ -115,7 +111,8 @@ class OrtSessionRunWorker : public AsyncWorker {
|
||||
for (int i = 0; i < outputNames_.size(); ++i) {
|
||||
size_t bytes = outputElementBytes_[i];
|
||||
Ort::Value &value = outputValues_[i];
|
||||
auto buffer = ArrayBuffer::New(Env(), value.GetTensorMutableRawData(), bytes);
|
||||
auto buffer = ArrayBuffer::New(Env(), bytes);
|
||||
memcpy(buffer.Data(), value.GetTensorMutableRawData(), bytes);
|
||||
result.Set(String::New(Env(), outputNames_[i]), buffer);
|
||||
}
|
||||
Callback().Call({Env().Undefined(), result});
|
||||
@ -236,7 +233,12 @@ class OrtSession : public ObjectWrap<OrtSession> {
|
||||
auto inputOption = static_cast<Napi::Value>((*it).second).As<Object>();
|
||||
if (!inputOption.Has("data") || !inputOption.Get("data").IsTypedArray()) worker->SetError((std::string("data is required in inputs #" + name)));
|
||||
else {
|
||||
auto type = inputOption.Has("type") ? getDataTypeFromString(inputOption.Get("type").As<String>().Utf8Value()) : input->GetType();
|
||||
auto type = input->GetType();
|
||||
if (inputOption.Has("type")) {
|
||||
auto t = static_cast<TensorDataType>(inputOption.Get("type").As<Number>().Int32Value());
|
||||
auto it = DATA_TYPE_MAP.find(t);
|
||||
if (it != DATA_TYPE_MAP.end()) type = it->second;
|
||||
}
|
||||
size_t dataByteLen;
|
||||
void *data = dataFromTypedArray(inputOption.Get("data"), dataByteLen);
|
||||
auto shape = inputOption.Has("shape") ? GetShapeFromJavascript(inputOption.Get("shape").As<Array>()) : input->GetShape();
|
||||
@ -279,7 +281,7 @@ class OrtSession : public ObjectWrap<OrtSession> {
|
||||
auto &node = *it.second;
|
||||
auto item = Object::New(env);
|
||||
item.Set(String::New(env, "name"), String::New(env, node.GetName()));
|
||||
item.Set(String::New(env, "type"), Number::New(env, node.GetType()));
|
||||
item.Set(String::New(env, "type"), Number::New(env, static_cast<int>(node.GetDataType())));
|
||||
auto &shapeVec = node.GetShape();
|
||||
auto shape = Array::New(env, shapeVec.size());
|
||||
for (int i = 0; i < shapeVec.size(); ++i) shape.Set(i, Number::New(env, shapeVec[i]));
|
||||
@ -303,7 +305,7 @@ void InstallOrtAPI(Napi::Env env, Napi::Object exports)
|
||||
OrtSession::Init(env, exports);
|
||||
}
|
||||
|
||||
#ifdef USE_ONNXRUNTIME
|
||||
#if defined(USE_ONNXRUNTIME) && not defined(BUILD_MAIN_WORD)
|
||||
static Object Init(Env env, Object exports)
|
||||
{
|
||||
InstallOrtAPI(env, exports);
|
||||
|
Reference in New Issue
Block a user