@@ -4,7 +4,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
44you may not use this file except in compliance with the License. 
55You may obtain a copy of the License at 
66
7-   http://www.apache.org/licenses/LICENSE-2.0
7+ http://www.apache.org/licenses/LICENSE-2.0 
88
99Unless required by applicable law or agreed to in writing, software 
1010distributed under the License is distributed on an "AS IS" BASIS, 
@@ -21,15 +21,14 @@ limitations under the License. */
2121#include  " paddle/framework/op_registry.h" 
2222#include  " paddle/framework/operator.h" 
2323#include  " paddle/framework/scope.h" 
24+ #include  " paddle/framework/tensor_py.h" 
2425#include  " paddle/platform/enforce.h" 
2526#include  " paddle/platform/place.h" 
26- #include  " paddle/pybind/tensor_bind.h" 
2727#include  " pybind11/numpy.h" 
2828#include  " pybind11/pybind11.h" 
2929#include  " pybind11/stl.h" 
3030
3131namespace  py  =  pybind11;
32- namespace  pd  =  paddle::framework;
3332
3433USE_OP (add_two);
3534USE_OP (onehot_cross_entropy);
@@ -41,17 +40,18 @@ USE_OP(sigmoid);
4140USE_OP (softmax);
4241USE_OP (rowwise_add);
4342USE_OP_WITHOUT_KERNEL (recurrent_op);
44- 
43+ namespace  paddle  {
44+ namespace  framework  {
4545template  <typename  ClassType>
46- void  ExposeOperator (ClassType&  m) {
46+ void  ExposeOperator (ClassType & m) {
4747 m.def (" infer_shape" 
4848 .def (" run" 
4949 .def (" type" 
50-  [](const  typename  ClassType::type&  op) -> std::string {
50+  [](const  typename  ClassType::type & op) -> std::string {
5151 return  op.type_ ;
5252 })
5353 .def (" outputs" 
54-  [](const  typename  ClassType::type&  op) -> std::vector<std::string> {
54+  [](const  typename  ClassType::type & op) -> std::vector<std::string> {
5555 return  op.outputs_ ;
5656 })
5757 .def (" __str__" 
@@ -73,80 +73,70 @@ bool IsCompileGPU() {
7373PYBIND11_PLUGIN (core) {
7474 py::module  m (" core" " C++ core of PaddlePaddle" 
7575
76-  py::class_<pd::Tensor>(m, " Tensor" py::buffer_protocol ())
77-  .def_buffer ([](pd::Tensor& self) -> py::buffer_info {
78-  return  paddle::pybind::CastToPyBuffer (self);
79-  })
76+  py::class_<Tensor>(m, " Tensor" py::buffer_protocol ())
77+  .def_buffer (
78+  [](Tensor &self) -> py::buffer_info { return  CastToPyBuffer (self); })
8079 .def (" get_dims" 
81-  [](const  pd:: Tensor&  self) { return  pd:: vectorizedims ()); })
80+  [](const  Tensor & self) { return  vectorize (self.dims ()); })
8281 .def (" set_dims" 
83-  [](pd:: Tensor&  self, const  std::vector<int >&  dim) {
84-  self.Resize (pd:: make_ddim
82+  [](Tensor & self, const  std::vector<int > & dim) {
83+  self.Resize (make_ddim (dim));
8584 })
8685 .def (" alloc_float" 
87-  [](pd:: Tensor&  self, paddle::platform::GPUPlace&  place) {
86+  [](Tensor & self, paddle::platform::GPUPlace & place) {
8887 self.mutable_data <float >(place);
8988 })
9089 .def (" alloc_float" 
91-  [](pd:: Tensor&  self, paddle::platform::CPUPlace&  place) {
90+  [](Tensor & self, paddle::platform::CPUPlace & place) {
9291 self.mutable_data <float >(place);
9392 })
9493 .def (" alloc_int" 
95-  [](pd:: Tensor&  self, paddle::platform::CPUPlace&  place) {
94+  [](Tensor & self, paddle::platform::CPUPlace & place) {
9695 self.mutable_data <int >(place);
9796 })
9897 .def (" alloc_int" 
99-  [](pd:: Tensor&  self, paddle::platform::GPUPlace&  place) {
98+  [](Tensor & self, paddle::platform::GPUPlace & place) {
10099 self.mutable_data <int >(place);
101100 })
102-  .def (" set" paddle::pybind:: PyCPUTensorSetFromArray<float >)
103-  .def (" set" paddle::pybind:: PyCPUTensorSetFromArray<int >)
101+  .def (" set" float >)
102+  .def (" set" int >)
104103#ifndef  PADDLE_ONLY_CPU
105-  .def (" set" paddle::pybind:: PyCUDATensorSetFromArray<float >)
106-  .def (" set" paddle::pybind:: PyCUDATensorSetFromArray<int >)
104+  .def (" set" float >)
105+  .def (" set" int >)
107106#endif 
108-  .def (" shape" 
109-  [](pd::Tensor& self) { return  pd::vectorize (self.dims ()); });
107+  .def (" shape" return  vectorize (self.dims ()); });
110108
111-  py::class_<pd:: Variable>(m, " Variable" R"DOC( Variable Class.
109+  py::class_<Variable>(m, " Variable" R"DOC( Variable Class.
112110
113111All parameter, weight, gradient are variables in Paddle. 
114112)DOC" 
115-  .def (" is_int" const  pd:: Variable&  var) { return  var.IsType <int >(); })
113+  .def (" is_int" const  Variable & var) { return  var.IsType <int >(); })
116114 .def (" set_int" 
117-  [](pd::Variable& var, int  val) -> void  {
118-  *var.GetMutable <int >() = val;
119-  })
120-  .def (" get_int" 
121-  [](const  pd::Variable& var) -> int  { return  var.Get <int >(); })
115+  [](Variable &var, int  val) -> void  { *var.GetMutable <int >() = val; })
116+  .def (" get_int" const  Variable &var) -> int  { return  var.Get <int >(); })
122117 .def (" get_tensor" 
123-  [](pd::Variable& self) -> pd::Tensor* {
124-  return  self.GetMutable <pd::Tensor>();
125-  },
118+  [](Variable &self) -> Tensor * { return  self.GetMutable <Tensor>(); },
126119 py::return_value_policy::reference)
127120 .def (" get_net" 
128-  [](pd::Variable& self) -> pd::NetOp* {
129-  return  self.GetMutable <pd::NetOp>();
130-  },
121+  [](Variable &self) -> NetOp * { return  self.GetMutable <NetOp>(); },
131122 py::return_value_policy::reference);
132123
133-  py::class_<pd:: Scope>(m, " Scope" " " 
124+  py::class_<Scope>(m, " Scope" " " 
134125 .def (" new_var" 
135-  [](pd:: Scope&  self, const  std::string&  name) -> pd:: Variable* {
126+  [](Scope & self, const  std::string & name) -> Variable  * {
136127 return  self.NewVar (name);
137128 },
138129 py::return_value_policy::reference)
139-  .def (" find_var" pd:: Scope::FindVar, py::return_value_policy::reference)
130+  .def (" find_var" 
140131 .def (py::init<>())
141-  .def (" new_scope" 
142-  [](pd::Scope& self) -> pd::Scope* { return  &self.NewScope (); },
132+  .def (" new_scope" return  &self.NewScope (); },
143133 py::return_value_policy::reference)
144-  .def (" drop_kids" pd:: Scope::DropKids);
134+  .def (" drop_kids" 
145135
146136 // ! @note: Be careful! PyBind will return std::string as an unicode, not
147137 // ! Python str. If you want a str object, you should cast them in Python.
148138 m.def (" get_all_op_protos" 
149-  auto &  protos = pd:: OpRegistry::protos
139+  auto  & protos = OpRegistry::protos ();
150140 std::vector<py::bytes> ret_values;
151141 for  (auto  it = protos.begin (); it != protos.end (); ++it) {
152142 PADDLE_ENFORCE (it->second .IsInitialized (),
@@ -161,8 +151,8 @@ All parameter, weight, gradient are variables in Paddle.
161151 m.def_submodule (
162152 " var_names" 
163153 " The module will return special predefined variable name in Paddle" 
164-  .def (" empty" pd:: OperatorBase::EMPTY_VAR_NAME)
165-  .def (" temp" pd:: OperatorBase::TMP_VAR_NAME);
154+  .def (" empty" 
155+  .def (" temp" 
166156 //  clang-format off
167157 py::class_<paddle::platform::DeviceContext>(m, " DeviceContext" 
168158 .def_static (" create" 
@@ -185,43 +175,43 @@ All parameter, weight, gradient are variables in Paddle.
185175
186176 py::class_<paddle::platform::CPUPlace>(m, " CPUPlace" def (py::init<>());
187177
188-  py::class_<pd:: OperatorBase, std::shared_ptr<pd:: OperatorBase>> operator_base (
178+  py::class_<OperatorBase, std::shared_ptr<OperatorBase>> operator_base (
189179 m, " Operator" 
190180
191181 operator_base.def_static (" create" 
192-  pd:: OpDesc desc;
182+  OpDesc desc;
193183 PADDLE_ENFORCE (desc.ParsePartialFromString (protobin),
194184 " Cannot parse user input to OpDesc" 
195185 PADDLE_ENFORCE (desc.IsInitialized (),
196186 " User OpDesc is not initialized, reason %s" 
197187 desc.InitializationErrorString ());
198-  return  pd:: OpRegistry::CreateOp
188+  return  OpRegistry::CreateOp (desc);
199189 });
200190
201191 operator_base.def (" backward" 
202-  [](const  pd:: OperatorBase&  forwardOp,
203-  const  std::unordered_set<std::string>&  no_grad_vars) {
204-  return  pd:: Backward
192+  [](const  OperatorBase & forwardOp,
193+  const  std::unordered_set<std::string> & no_grad_vars) {
194+  return  Backward (forwardOp, no_grad_vars);
205195 });
206196
207197 ExposeOperator (operator_base);
208198
209-  py::class_<pd:: NetOp, std::shared_ptr<pd:: NetOp>> net (m, " Net" 
199+  py::class_<NetOp, std::shared_ptr<NetOp>> net (m, " Net" 
210200
211201 net.def_static (" create" 
212-  []() -> std::shared_ptr<pd:: NetOp> {
213-  auto  retv = std::make_shared<pd:: NetOp>();
202+  []() -> std::shared_ptr<NetOp> {
203+  auto  retv = std::make_shared<NetOp>();
214204 retv->type_  = " plain_net" 
215205 return  retv;
216206 })
217-  .def (" add_op" pd:: NetOp::AddOp)
207+  .def (" add_op" 
218208 .def (" add_op" 
219-  [](pd:: NetOp&  self, const  std::shared_ptr<pd:: NetOp>&  net) -> void  {
220-  self.AddOp (std::static_pointer_cast<pd:: OperatorBase>(net));
209+  [](NetOp & self, const  std::shared_ptr<NetOp> & net) -> void  {
210+  self.AddOp (std::static_pointer_cast<OperatorBase>(net));
221211 })
222-  .def (" complete_add_op" pd:: NetOp::CompleteAddOp)
212+  .def (" complete_add_op" 
223213 .def (" complete_add_op" 
224-  [](std::shared_ptr<pd:: NetOp>&  self) { self->CompleteAddOp (); });
214+  [](std::shared_ptr<NetOp> & self) { self->CompleteAddOp (); });
225215 ExposeOperator (net);
226216
227217 m.def (" unique_integer" 
@@ -230,3 +220,5 @@ All parameter, weight, gradient are variables in Paddle.
230220
231221 return  m.ptr ();
232222}
223+ } //  namespace framework
224+ } //  namespace paddle
0 commit comments