Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions paddle/fluid/inference/api/paddle_api.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ namespace paddle {

using PaddleDType = paddle_infer::DataType;
using PaddlePlace = paddle_infer::PlaceType;
using PaddleDataLayout = paddle_infer::DataLayout;

/// \brief Memory manager for PaddleTensor.
///
Expand Down
40 changes: 40 additions & 0 deletions paddle/fluid/pybind/inference_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,7 @@ using paddle::NativeConfig;
using paddle::NativePaddlePredictor;
using paddle::PaddleBuf;
using paddle::PaddleDType;
using paddle::PaddleDataLayout;
using paddle::PaddlePassBuilder;
using paddle::PaddlePlace;
using paddle::PaddlePredictor;
Expand All @@ -85,6 +86,7 @@ using paddle::ZeroCopyTensor;

namespace {
void BindPaddleDType(py::module *m);
void BindPaddleDataLayout(py::module *m);
void BindPaddleBuf(py::module *m);
void BindPaddleTensor(py::module *m);
void BindPaddlePlace(py::module *m);
Expand Down Expand Up @@ -211,6 +213,34 @@ void PaddleInferTensorCreate(
tensor.CopyFromCpu(static_cast<const T *>(data.data()));
}

paddle_infer::PlaceType ToPaddleInferPlace(
phi::AllocationType allocation_type) {
if (allocation_type == phi::AllocationType::CPU) {
return paddle_infer::PlaceType::kCPU;
} else if (allocation_type == phi::AllocationType::GPU) {
return paddle_infer::PlaceType::kGPU;
} else {
return paddle_infer::PlaceType::kCPU;
}
}

void PaddleInferShareExternalData(paddle_infer::Tensor &tensor, // NOLINT
framework::Tensor input_tensor) {
std::vector<int> shape;
for (int i = 0; i < input_tensor.dims().size(); ++i) {
shape.push_back(input_tensor.dims()[i]);
}
if (input_tensor.dtype() == phi::DataType::FLOAT32) {
tensor.ShareExternalData(
static_cast<float *>(input_tensor.data()), shape,
ToPaddleInferPlace(input_tensor.place().GetType()));
} else if (input_tensor.dtype() == phi::DataType::FLOAT16) {
tensor.ShareExternalData(
static_cast<paddle::platform::float16 *>(input_tensor.data()), shape,
ToPaddleInferPlace(input_tensor.place().GetType()));
}
}

/// \brief Experimental interface.
/// Create the Strings tensor from data.
/// \param tensor The tensor will be created and
Expand Down Expand Up @@ -327,6 +357,7 @@ void CopyPaddleInferTensor(paddle_infer::Tensor &dst, // NOLINT

void BindInferenceApi(py::module *m) {
BindPaddleDType(m);
BindPaddleDataLayout(m);
BindPaddleBuf(m);
BindPaddleTensor(m);
BindPaddlePlace(m);
Expand Down Expand Up @@ -372,6 +403,14 @@ void BindPaddleDType(py::module *m) {
.value("INT32", PaddleDType::INT32);
}

void BindPaddleDataLayout(py::module *m) {
py::enum_<PaddleDataLayout>(*m, "PaddleDataLayout")
.value("UNK", PaddleDataLayout::kUNK)
.value("Any", PaddleDataLayout::kAny)
.value("NHWC", PaddleDataLayout::kNHWC)
.value("NCHW", PaddleDataLayout::kNCHW);
}

void BindPaddleBuf(py::module *m) {
py::class_<PaddleBuf>(*m, "PaddleBuf")
.def(py::init<size_t>())
Expand Down Expand Up @@ -817,6 +856,7 @@ void BindPaddleInferTensor(py::module *m) {
.def("copy_from_cpu_bind",
&PaddleInferTensorCreate<paddle_infer::float16>)
.def("copy_from_cpu_bind", &PaddleInferStringTensorCreate)
.def("share_external_data_bind", &PaddleInferShareExternalData)
.def("copy_to_cpu", &PaddleInferTensorToNumpy)
.def("shape", &paddle_infer::Tensor::shape)
.def("set_lod", &paddle_infer::Tensor::SetLoD)
Expand Down
13 changes: 13 additions & 0 deletions python/paddle/fluid/inference/wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

from ..core import AnalysisConfig, PaddleDType, PaddlePlace
from ..core import PaddleInferPredictor, PaddleInferTensor
from .. import core

import numpy as np

Expand All @@ -39,4 +40,16 @@ def tensor_copy_from_cpu(self, data):
)


def tensor_share_external_data(self, data):
'''
Support input type check based on tensor.share_external_data.
'''
if isinstance(data, core.LoDTensor):
self.share_external_data_bind(data)
else:
raise TypeError(
"In share_external_data, we only support LoDTensor data type.")


Tensor.copy_from_cpu = tensor_copy_from_cpu
Tensor.share_external_data = tensor_share_external_data