Skip to content

Commit 809793c

Browse files
authored
Merge pull request #3173 from reyoung/feature/move_pybind_to_framework_dir
Move pybind.cc/tensor_bind.h to paddle::framework
2 parents 05af390 + fe5bca4 commit 809793c

File tree

5 files changed

+71
-76
lines changed

5 files changed

+71
-76
lines changed

paddle/CMakeLists.txt

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@ if(Boost_FOUND)
1515
add_subdirectory(platform)
1616
add_subdirectory(framework)
1717
add_subdirectory(operators)
18-
add_subdirectory(pybind)
1918
endif()
2019

2120
if(WITH_C_API)

paddle/framework/CMakeLists.txt

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,3 +36,12 @@ cc_test(net_op_test SRCS net_op_test.cc DEPS net)
3636

3737
cc_library(backward SRCS backward.cc DEPS net)
3838
cc_test(backward_test SRCS backward_test.cc DEPS backward)
39+
cc_library(paddle_pybind SHARED
40+
SRCS pybind.cc
41+
DEPS pybind python backward
42+
fc_op
43+
sgd_op
44+
add_op
45+
mean_op
46+
cross_entropy_op
47+
recurrent_op)

paddle/pybind/pybind.cc renamed to paddle/framework/pybind.cc

Lines changed: 52 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
44
you may not use this file except in compliance with the License.
55
You may obtain a copy of the License at
66
7-
http://www.apache.org/licenses/LICENSE-2.0
7+
http://www.apache.org/licenses/LICENSE-2.0
88
99
Unless required by applicable law or agreed to in writing, software
1010
distributed under the License is distributed on an "AS IS" BASIS,
@@ -21,15 +21,14 @@ limitations under the License. */
2121
#include "paddle/framework/op_registry.h"
2222
#include "paddle/framework/operator.h"
2323
#include "paddle/framework/scope.h"
24+
#include "paddle/framework/tensor_py.h"
2425
#include "paddle/platform/enforce.h"
2526
#include "paddle/platform/place.h"
26-
#include "paddle/pybind/tensor_bind.h"
2727
#include "pybind11/numpy.h"
2828
#include "pybind11/pybind11.h"
2929
#include "pybind11/stl.h"
3030

3131
namespace py = pybind11;
32-
namespace pd = paddle::framework;
3332

3433
USE_OP(add_two);
3534
USE_OP(onehot_cross_entropy);
@@ -41,17 +40,18 @@ USE_OP(sigmoid);
4140
USE_OP(softmax);
4241
USE_OP(rowwise_add);
4342
USE_OP_WITHOUT_KERNEL(recurrent_op);
44-
43+
namespace paddle {
44+
namespace framework {
4545
template <typename ClassType>
46-
void ExposeOperator(ClassType& m) {
46+
void ExposeOperator(ClassType &m) {
4747
m.def("infer_shape", &ClassType::type::InferShape)
4848
.def("run", &ClassType::type::Run)
4949
.def("type",
50-
[](const typename ClassType::type& op) -> std::string {
50+
[](const typename ClassType::type &op) -> std::string {
5151
return op.type_;
5252
})
5353
.def("outputs",
54-
[](const typename ClassType::type& op) -> std::vector<std::string> {
54+
[](const typename ClassType::type &op) -> std::vector<std::string> {
5555
return op.outputs_;
5656
})
5757
.def("__str__", &ClassType::type::DebugString);
@@ -73,80 +73,70 @@ bool IsCompileGPU() {
7373
PYBIND11_PLUGIN(core) {
7474
py::module m("core", "C++ core of PaddlePaddle");
7575

76-
py::class_<pd::Tensor>(m, "Tensor", py::buffer_protocol())
77-
.def_buffer([](pd::Tensor& self) -> py::buffer_info {
78-
return paddle::pybind::CastToPyBuffer(self);
79-
})
76+
py::class_<Tensor>(m, "Tensor", py::buffer_protocol())
77+
.def_buffer(
78+
[](Tensor &self) -> py::buffer_info { return CastToPyBuffer(self); })
8079
.def("get_dims",
81-
[](const pd::Tensor& self) { return pd::vectorize(self.dims()); })
80+
[](const Tensor &self) { return vectorize(self.dims()); })
8281
.def("set_dims",
83-
[](pd::Tensor& self, const std::vector<int>& dim) {
84-
self.Resize(pd::make_ddim(dim));
82+
[](Tensor &self, const std::vector<int> &dim) {
83+
self.Resize(make_ddim(dim));
8584
})
8685
.def("alloc_float",
87-
[](pd::Tensor& self, paddle::platform::GPUPlace& place) {
86+
[](Tensor &self, paddle::platform::GPUPlace &place) {
8887
self.mutable_data<float>(place);
8988
})
9089
.def("alloc_float",
91-
[](pd::Tensor& self, paddle::platform::CPUPlace& place) {
90+
[](Tensor &self, paddle::platform::CPUPlace &place) {
9291
self.mutable_data<float>(place);
9392
})
9493
.def("alloc_int",
95-
[](pd::Tensor& self, paddle::platform::CPUPlace& place) {
94+
[](Tensor &self, paddle::platform::CPUPlace &place) {
9695
self.mutable_data<int>(place);
9796
})
9897
.def("alloc_int",
99-
[](pd::Tensor& self, paddle::platform::GPUPlace& place) {
98+
[](Tensor &self, paddle::platform::GPUPlace &place) {
10099
self.mutable_data<int>(place);
101100
})
102-
.def("set", paddle::pybind::PyCPUTensorSetFromArray<float>)
103-
.def("set", paddle::pybind::PyCPUTensorSetFromArray<int>)
101+
.def("set", PyCPUTensorSetFromArray<float>)
102+
.def("set", PyCPUTensorSetFromArray<int>)
104103
#ifndef PADDLE_ONLY_CPU
105-
.def("set", paddle::pybind::PyCUDATensorSetFromArray<float>)
106-
.def("set", paddle::pybind::PyCUDATensorSetFromArray<int>)
104+
.def("set", PyCUDATensorSetFromArray<float>)
105+
.def("set", PyCUDATensorSetFromArray<int>)
107106
#endif
108-
.def("shape",
109-
[](pd::Tensor& self) { return pd::vectorize(self.dims()); });
107+
.def("shape", [](Tensor &self) { return vectorize(self.dims()); });
110108

111-
py::class_<pd::Variable>(m, "Variable", R"DOC(Variable Class.
109+
py::class_<Variable>(m, "Variable", R"DOC(Variable Class.
112110
113111
All parameter, weight, gradient are variables in Paddle.
114112
)DOC")
115-
.def("is_int", [](const pd::Variable& var) { return var.IsType<int>(); })
113+
.def("is_int", [](const Variable &var) { return var.IsType<int>(); })
116114
.def("set_int",
117-
[](pd::Variable& var, int val) -> void {
118-
*var.GetMutable<int>() = val;
119-
})
120-
.def("get_int",
121-
[](const pd::Variable& var) -> int { return var.Get<int>(); })
115+
[](Variable &var, int val) -> void { *var.GetMutable<int>() = val; })
116+
.def("get_int", [](const Variable &var) -> int { return var.Get<int>(); })
122117
.def("get_tensor",
123-
[](pd::Variable& self) -> pd::Tensor* {
124-
return self.GetMutable<pd::Tensor>();
125-
},
118+
[](Variable &self) -> Tensor * { return self.GetMutable<Tensor>(); },
126119
py::return_value_policy::reference)
127120
.def("get_net",
128-
[](pd::Variable& self) -> pd::NetOp* {
129-
return self.GetMutable<pd::NetOp>();
130-
},
121+
[](Variable &self) -> NetOp * { return self.GetMutable<NetOp>(); },
131122
py::return_value_policy::reference);
132123

133-
py::class_<pd::Scope>(m, "Scope", "")
124+
py::class_<Scope>(m, "Scope", "")
134125
.def("new_var",
135-
[](pd::Scope& self, const std::string& name) -> pd::Variable* {
126+
[](Scope &self, const std::string &name) -> Variable * {
136127
return self.NewVar(name);
137128
},
138129
py::return_value_policy::reference)
139-
.def("find_var", &pd::Scope::FindVar, py::return_value_policy::reference)
130+
.def("find_var", &Scope::FindVar, py::return_value_policy::reference)
140131
.def(py::init<>())
141-
.def("new_scope",
142-
[](pd::Scope& self) -> pd::Scope* { return &self.NewScope(); },
132+
.def("new_scope", [](Scope &self) -> Scope * { return &self.NewScope(); },
143133
py::return_value_policy::reference)
144-
.def("drop_kids", &pd::Scope::DropKids);
134+
.def("drop_kids", &Scope::DropKids);
145135

146136
//! @note: Be careful! PyBind will return std::string as an unicode, not
147137
//! Python str. If you want a str object, you should cast them in Python.
148138
m.def("get_all_op_protos", []() -> std::vector<py::bytes> {
149-
auto& protos = pd::OpRegistry::protos();
139+
auto &protos = OpRegistry::protos();
150140
std::vector<py::bytes> ret_values;
151141
for (auto it = protos.begin(); it != protos.end(); ++it) {
152142
PADDLE_ENFORCE(it->second.IsInitialized(),
@@ -161,8 +151,8 @@ All parameter, weight, gradient are variables in Paddle.
161151
m.def_submodule(
162152
"var_names",
163153
"The module will return special predefined variable name in Paddle")
164-
.def("empty", pd::OperatorBase::EMPTY_VAR_NAME)
165-
.def("temp", pd::OperatorBase::TMP_VAR_NAME);
154+
.def("empty", OperatorBase::EMPTY_VAR_NAME)
155+
.def("temp", OperatorBase::TMP_VAR_NAME);
166156
// clang-format off
167157
py::class_<paddle::platform::DeviceContext>(m, "DeviceContext")
168158
.def_static("create",
@@ -185,43 +175,43 @@ All parameter, weight, gradient are variables in Paddle.
185175

186176
py::class_<paddle::platform::CPUPlace>(m, "CPUPlace").def(py::init<>());
187177

188-
py::class_<pd::OperatorBase, std::shared_ptr<pd::OperatorBase>> operator_base(
178+
py::class_<OperatorBase, std::shared_ptr<OperatorBase>> operator_base(
189179
m, "Operator");
190180

191181
operator_base.def_static("create", [](py::bytes protobin) {
192-
pd::OpDesc desc;
182+
OpDesc desc;
193183
PADDLE_ENFORCE(desc.ParsePartialFromString(protobin),
194184
"Cannot parse user input to OpDesc");
195185
PADDLE_ENFORCE(desc.IsInitialized(),
196186
"User OpDesc is not initialized, reason %s",
197187
desc.InitializationErrorString());
198-
return pd::OpRegistry::CreateOp(desc);
188+
return OpRegistry::CreateOp(desc);
199189
});
200190

201191
operator_base.def("backward",
202-
[](const pd::OperatorBase& forwardOp,
203-
const std::unordered_set<std::string>& no_grad_vars) {
204-
return pd::Backward(forwardOp, no_grad_vars);
192+
[](const OperatorBase &forwardOp,
193+
const std::unordered_set<std::string> &no_grad_vars) {
194+
return Backward(forwardOp, no_grad_vars);
205195
});
206196

207197
ExposeOperator(operator_base);
208198

209-
py::class_<pd::NetOp, std::shared_ptr<pd::NetOp>> net(m, "Net");
199+
py::class_<NetOp, std::shared_ptr<NetOp>> net(m, "Net");
210200

211201
net.def_static("create",
212-
[]() -> std::shared_ptr<pd::NetOp> {
213-
auto retv = std::make_shared<pd::NetOp>();
202+
[]() -> std::shared_ptr<NetOp> {
203+
auto retv = std::make_shared<NetOp>();
214204
retv->type_ = "plain_net";
215205
return retv;
216206
})
217-
.def("add_op", &pd::NetOp::AddOp)
207+
.def("add_op", &NetOp::AddOp)
218208
.def("add_op",
219-
[](pd::NetOp& self, const std::shared_ptr<pd::NetOp>& net) -> void {
220-
self.AddOp(std::static_pointer_cast<pd::OperatorBase>(net));
209+
[](NetOp &self, const std::shared_ptr<NetOp> &net) -> void {
210+
self.AddOp(std::static_pointer_cast<OperatorBase>(net));
221211
})
222-
.def("complete_add_op", &pd::NetOp::CompleteAddOp)
212+
.def("complete_add_op", &NetOp::CompleteAddOp)
223213
.def("complete_add_op",
224-
[](std::shared_ptr<pd::NetOp>& self) { self->CompleteAddOp(); });
214+
[](std::shared_ptr<NetOp> &self) { self->CompleteAddOp(); });
225215
ExposeOperator(net);
226216

227217
m.def("unique_integer", UniqueIntegerGenerator);
@@ -230,3 +220,5 @@ All parameter, weight, gradient are variables in Paddle.
230220

231221
return m.ptr();
232222
}
223+
} // namespace framework
224+
} // namespace paddle

paddle/framework/tensor.h

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -26,19 +26,17 @@ limitations under the License. */
2626
#include "unsupported/Eigen/CXX11/Tensor"
2727

2828
namespace paddle {
29-
namespace pybind {
30-
namespace details { // forward declare
31-
template <bool less, size_t i, typename... args>
32-
struct CastToPyBufferImpl;
33-
} // namespace details
34-
} // namespace pybind
3529

3630
namespace framework {
31+
namespace details {
32+
template <bool less, size_t i, typename... args>
33+
struct CastToPyBufferImpl;
34+
}
3735

3836
class Tensor {
3937
public:
4038
template <bool less, size_t i, typename... args>
41-
friend struct paddle::pybind::details::CastToPyBufferImpl;
39+
friend struct details::CastToPyBufferImpl;
4240

4341
template <typename T, size_t D, int MajorType, typename IndexType>
4442
friend struct EigenTensor;

paddle/pybind/tensor_bind.h renamed to paddle/framework/tensor_py.h

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ namespace py = pybind11;
2323

2424
namespace paddle {
2525

26-
namespace pybind {
26+
namespace framework {
2727

2828
namespace details {
2929

@@ -63,11 +63,8 @@ struct CastToPyBufferImpl<true, I, ARGS...> {
6363
}
6464
return py::buffer_info(
6565
dst_tensor.mutable_data<CUR_TYPE>(dst_tensor.holder_->place()),
66-
sizeof(CUR_TYPE),
67-
py::format_descriptor<CUR_TYPE>::format(),
68-
(size_t)framework::arity(dst_tensor.dims()),
69-
dims_outside,
70-
strides);
66+
sizeof(CUR_TYPE), py::format_descriptor<CUR_TYPE>::format(),
67+
(size_t)framework::arity(dst_tensor.dims()), dims_outside, strides);
7168
} else {
7269
constexpr bool less = I + 1 < std::tuple_size<std::tuple<ARGS...>>::value;
7370
return CastToPyBufferImpl<less, I + 1, ARGS...>()(tensor);
@@ -110,8 +107,8 @@ void PyCUDATensorSetFromArray(
110107

111108
self.Resize(framework::make_ddim(dims));
112109
auto *dst = self.mutable_data<T>(place);
113-
paddle::platform::GpuMemcpySync(
114-
dst, array.data(), sizeof(T) * array.size(), cudaMemcpyHostToDevice);
110+
paddle::platform::GpuMemcpySync(dst, array.data(), sizeof(T) * array.size(),
111+
cudaMemcpyHostToDevice);
115112
}
116113
#endif
117114

0 commit comments

Comments
 (0)