Skip to content

Commit 5f49a20

Browse files
committed
move meshgrid yaml
1 parent cce176b commit 5f49a20

File tree

8 files changed

+227
-1
lines changed

8 files changed

+227
-1
lines changed

paddle/phi/api/lib/api_custom_impl.cc

Lines changed: 148 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -410,5 +410,153 @@ std::vector<Tensor> stack_grad_impl(const std::vector<Tensor>& x,
410410
return x_grad;
411411
}
412412

413+
std::vector<Tensor> meshgrid_impl(const std::vector<Tensor>& inputs) {
414+
Backend kernel_backend = Backend::UNDEFINED;
415+
DataLayout kernel_layout = DataLayout::UNDEFINED;
416+
DataType kernel_data_type = DataType::UNDEFINED;
417+
418+
if (kernel_backend == Backend::UNDEFINED ||
419+
kernel_layout == DataLayout::UNDEFINED ||
420+
kernel_data_type == DataType::UNDEFINED) {
421+
auto kernel_key_set = ParseKernelKeyByInputArgs(inputs);
422+
auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey();
423+
if (kernel_backend == Backend::UNDEFINED) {
424+
kernel_backend = kernel_key.backend();
425+
}
426+
if (kernel_layout == DataLayout::UNDEFINED) {
427+
kernel_layout = kernel_key.layout();
428+
}
429+
if (kernel_data_type == DataType::UNDEFINED) {
430+
kernel_data_type = kernel_key.dtype();
431+
}
432+
}
433+
434+
const auto& kernel = phi::KernelFactory::Instance().SelectKernelOrThrowError(
435+
"meshgrid", {kernel_backend, kernel_layout, kernel_data_type});
436+
VLOG(6) << "meshgrid API kernel key: [" << kernel_backend << ", "
437+
<< kernel_layout << ", " << kernel_data_type << "]";
438+
VLOG(6) << "meshgrid API kernel: " << kernel;
439+
440+
auto* dev_ctx = GetDeviceContextByBackend(kernel_backend);
441+
442+
auto input_inputs_vec = PrepareData(inputs, kernel.InputAt(0), {});
443+
std::vector<const phi::DenseTensor*> input_inputs(input_inputs_vec->size());
444+
for (size_t i = 0; i < input_inputs.size(); ++i) {
445+
input_inputs[i] = &input_inputs_vec->at(i);
446+
}
447+
448+
auto x_meta_vec = MakeMetaTensor(input_inputs);
449+
std::vector<phi::MetaTensor*> inputs_metas(x_meta_vec.size());
450+
for (size_t i = 0; i < x_meta_vec.size(); ++i) {
451+
inputs_metas[i] = &x_meta_vec[i];
452+
}
453+
454+
// Calculate the number of out tensors
455+
size_t out_number = inputs.size();
456+
457+
std::vector<Tensor> out;
458+
auto dense_outs = SetKernelOutput(out_number, kernel_backend, &out);
459+
460+
std::vector<phi::MetaTensor> meta_outs;
461+
meta_outs.reserve(out_number);
462+
std::vector<phi::MetaTensor*> meta_out_ptrs;
463+
meta_out_ptrs.reserve(out_number);
464+
for (size_t i = 0; i < out_number; ++i) {
465+
meta_outs.push_back(dense_outs[i]);
466+
meta_out_ptrs.push_back(&meta_outs.back());
467+
}
468+
phi::MeshgridInferMeta(inputs_metas, meta_out_ptrs);
469+
470+
using kernel_signature = void (*)(const platform::DeviceContext&,
471+
const std::vector<const phi::DenseTensor*>&,
472+
std::vector<phi::DenseTensor*>&);
473+
auto* kernel_fn = kernel.GetVariadicKernelFn<kernel_signature>();
474+
(*kernel_fn)(*dev_ctx, input_inputs, dense_outs);
475+
476+
return out;
477+
}
478+
479+
std::vector<Tensor> meshgrid_grad_impl(
480+
const std::vector<Tensor>& inputs,
481+
const std::vector<Tensor>& outputs_grad) {
482+
Backend kernel_backend = Backend::UNDEFINED;
483+
DataLayout kernel_layout = DataLayout::UNDEFINED;
484+
DataType kernel_data_type = DataType::UNDEFINED;
485+
486+
if (kernel_backend == Backend::UNDEFINED ||
487+
kernel_layout == DataLayout::UNDEFINED ||
488+
kernel_data_type == DataType::UNDEFINED) {
489+
auto kernel_key_set = ParseKernelKeyByInputArgs(inputs, outputs_grad);
490+
auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey();
491+
if (kernel_backend == Backend::UNDEFINED) {
492+
kernel_backend = kernel_key.backend();
493+
}
494+
if (kernel_layout == DataLayout::UNDEFINED) {
495+
kernel_layout = kernel_key.layout();
496+
}
497+
if (kernel_data_type == DataType::UNDEFINED) {
498+
kernel_data_type = kernel_key.dtype();
499+
}
500+
}
501+
502+
const auto& kernel = phi::KernelFactory::Instance().SelectKernelOrThrowError(
503+
"meshgrid_grad", {kernel_backend, kernel_layout, kernel_data_type});
504+
VLOG(6) << "meshgrid_grad API kernel key: [" << kernel_backend << ", "
505+
<< kernel_layout << ", " << kernel_data_type << "]";
506+
VLOG(6) << "meshgrid_grad API kernel: " << kernel;
507+
508+
auto* dev_ctx = GetDeviceContextByBackend(kernel_backend);
509+
510+
auto input_inputs_vec = PrepareData(inputs, kernel.InputAt(0), {});
511+
std::vector<const phi::DenseTensor*> input_inputs(input_inputs_vec->size());
512+
for (size_t i = 0; i < input_inputs.size(); ++i) {
513+
input_inputs[i] = &input_inputs_vec->at(i);
514+
}
515+
auto input_outputs_grad_vec =
516+
PrepareData(outputs_grad, kernel.InputAt(1), {});
517+
std::vector<const phi::DenseTensor*> input_outputs_grad(
518+
input_outputs_grad_vec->size());
519+
for (size_t i = 0; i < input_outputs_grad.size(); ++i) {
520+
input_outputs_grad[i] = &input_outputs_grad_vec->at(i);
521+
}
522+
523+
size_t out_number = inputs.size();
524+
std::vector<Tensor> api_output;
525+
auto kernel_out = SetKernelOutput(out_number, kernel_backend, &api_output);
526+
527+
auto inputs_meta_vec = MakeMetaTensor(input_inputs);
528+
std::vector<phi::MetaTensor*> inputs_metas(inputs_meta_vec.size());
529+
for (size_t i = 0; i < inputs_meta_vec.size(); ++i) {
530+
inputs_metas[i] = &inputs_meta_vec[i];
531+
}
532+
533+
auto outputs_grad_meta_vec = MakeMetaTensor(input_outputs_grad);
534+
std::vector<phi::MetaTensor*> outputs_grad_metas(
535+
outputs_grad_meta_vec.size());
536+
for (size_t i = 0; i < outputs_grad_meta_vec.size(); ++i) {
537+
outputs_grad_metas[i] = &outputs_grad_meta_vec[i];
538+
}
539+
540+
std::vector<phi::MetaTensor> meta_outs;
541+
meta_outs.reserve(out_number);
542+
std::vector<phi::MetaTensor*> meta_out_ptrs;
543+
meta_out_ptrs.reserve(out_number);
544+
for (size_t i = 0; i < out_number; ++i) {
545+
meta_outs.push_back(kernel_out[i]);
546+
meta_out_ptrs.push_back(&meta_outs.back());
547+
}
548+
549+
phi::MeshgridGradInferMeta(inputs_metas, outputs_grad_metas, meta_out_ptrs);
550+
551+
using kernel_signature = void (*)(const platform::DeviceContext&,
552+
const std::vector<const phi::DenseTensor*>&,
553+
const std::vector<const phi::DenseTensor*>&,
554+
std::vector<phi::DenseTensor*>&);
555+
auto* kernel_fn = kernel.GetVariadicKernelFn<kernel_signature>();
556+
(*kernel_fn)(*dev_ctx, input_inputs, input_outputs_grad, kernel_out);
557+
558+
return api_output;
559+
}
560+
413561
} // namespace experimental
414562
} // namespace paddle

paddle/phi/api/lib/api_custom_impl.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,9 @@ std::vector<Tensor> concat_grad_impl(const std::vector<Tensor>& x,
5959
std::vector<Tensor> stack_grad_impl(const std::vector<Tensor>& x,
6060
const Tensor& out_grad,
6161
int axis);
62+
std::vector<Tensor> meshgrid_impl(const std::vector<Tensor>& inputs);
63+
std::vector<Tensor> meshgrid_grad_impl(const std::vector<Tensor>& inputs,
64+
const std::vector<Tensor>& outputs_grad);
6265

6366
} // namespace experimental
6467
} // namespace paddle

paddle/phi/infermeta/backward.cc

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -245,6 +245,20 @@ void MaxPoolWithIndexGradInferMeta(const MetaTensor& x,
245245
dx->share_meta(x);
246246
}
247247

248+
void MeshgridGradInferMeta(const std::vector<MetaTensor*>& inputs,
249+
const std::vector<MetaTensor*>& outputs_grad,
250+
std::vector<MetaTensor*> inputs_grad) {
251+
PADDLE_ENFORCE_GT(outputs_grad.size(),
252+
1,
253+
errors::InvalidArgument(
254+
"Number of Inputs(Out@Grad) should be larger than 1."
255+
"But received Inputs(Out@Grad)' size = %d .",
256+
outputs_grad.size()));
257+
for (size_t i = 0; i < inputs.size(); i++) {
258+
inputs_grad[i]->share_meta(*inputs[i]);
259+
}
260+
}
261+
248262
void NllLossGradInferMeta(const MetaTensor& x,
249263
const MetaTensor& label,
250264
paddle::optional<const MetaTensor&> weight,

paddle/phi/infermeta/backward.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -115,6 +115,10 @@ void MaxPoolWithIndexGradInferMeta(const MetaTensor& x,
115115
bool adaptive,
116116
MetaTensor* dx);
117117

118+
void MeshgridGradInferMeta(const std::vector<MetaTensor*>& inputs,
119+
const std::vector<MetaTensor*>& outputs_grad,
120+
std::vector<MetaTensor*> inputs_grad);
121+
118122
void NllLossGradInferMeta(const MetaTensor& input,
119123
const MetaTensor& label,
120124
paddle::optional<const MetaTensor&> weight,

python/paddle/fluid/tests/unittests/test_meshgrid_op.py

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
import paddle.fluid as fluid
2121
import paddle
2222
from paddle.fluid import compiler, Program, program_guard, core
23+
from paddle.fluid.framework import _test_eager_guard
2324

2425

2526
class TestMeshgridOp(OpTest):
@@ -149,6 +150,10 @@ def test_api_with_dygraph(self):
149150
assert np.array_equal(res_3.shape, [100, 200])
150151
assert np.array_equal(res_4.shape, [100, 200])
151152

153+
def test_api_eager_dygraph(self):
154+
with _test_eager_guard():
155+
self.test_api_with_dygraph()
156+
152157

153158
class TestMeshgridOp7(unittest.TestCase):
154159
def test_api_with_dygraph_list_input(self):
@@ -163,6 +168,10 @@ def test_api_with_dygraph_list_input(self):
163168
assert np.array_equal(res_3.shape, [100, 200])
164169
assert np.array_equal(res_4.shape, [100, 200])
165170

171+
def test_api_eager_dygraph(self):
172+
with _test_eager_guard():
173+
self.test_api_with_dygraph_list_input()
174+
166175

167176
class TestMeshgridOp8(unittest.TestCase):
168177
def test_api_with_dygraph_tuple_input(self):
@@ -177,6 +186,40 @@ def test_api_with_dygraph_tuple_input(self):
177186
assert np.array_equal(res_3.shape, [100, 200])
178187
assert np.array_equal(res_4.shape, [100, 200])
179188

189+
def test_api_eager_dygraph(self):
190+
with _test_eager_guard():
191+
self.test_api_with_dygraph_tuple_input()
192+
193+
194+
class TestMeshgridEager(unittest.TestCase):
195+
def test_dygraph_final_state_api(self):
196+
input_1 = np.random.randint(0, 100, [100, ]).astype('int32')
197+
input_2 = np.random.randint(0, 100, [200, ]).astype('int32')
198+
199+
with fluid.dygraph.guard():
200+
tensor_1 = fluid.dygraph.to_variable(input_1)
201+
tensor_2 = fluid.dygraph.to_variable(input_2)
202+
tensor_1.stop_gradient = False
203+
tensor_2.stop_gradient = False
204+
res_1, res_2 = paddle.tensor.meshgrid((tensor_1, tensor_2))
205+
sum = paddle.add_n([res_1, res_2])
206+
sum.backward()
207+
with _test_eager_guard():
208+
tensor_eager_1 = fluid.dygraph.to_variable(input_1)
209+
tensor_eager_2 = fluid.dygraph.to_variable(input_2)
210+
tensor_eager_1.stop_gradient = False
211+
tensor_eager_2.stop_gradient = False
212+
res_eager_1, res_eager_2 = paddle.tensor.meshgrid(
213+
(tensor_eager_1, tensor_eager_2))
214+
sum_eager = paddle.add_n([res_eager_1, res_eager_2])
215+
sum_eager.backward()
216+
self.assertEqual((
217+
tensor_1.grad.numpy() == tensor_eager_1.grad.numpy()).all(),
218+
True)
219+
self.assertEqual((
220+
tensor_2.grad.numpy() == tensor_eager_2.grad.numpy()).all(),
221+
True)
222+
180223

181224
if __name__ == '__main__':
182225
paddle.enable_static()

python/paddle/tensor/creation.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -776,10 +776,12 @@ def meshgrid(*args, **kwargs):
776776

777777
if len(args) == 1 and isinstance(args[0], (list, tuple)):
778778
args = args[0]
779-
if paddle.in_dynamic_mode():
779+
if _in_legacy_dygraph():
780780
num = len(args)
781781
out = _C_ops.meshgrid(list(args), num)
782782
return out
783+
if in_dygraph_mode():
784+
return _C_ops.final_state_meshgrid(list(args))
783785

784786
name = kwargs.get("name", None)
785787
helper = LayerHelper('meshgrid', **locals())

python/paddle/utils/code_gen/api.yaml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1120,6 +1120,12 @@
11201120
func : mean
11211121
backward : mean_grad
11221122

1123+
- api : meshgrid
1124+
args : (Tensor[] inputs)
1125+
output : Tensor[]
1126+
invoke : meshgrid_impl(inputs)
1127+
backward : meshgrid_grad
1128+
11231129
- api : min
11241130
args : (Tensor x, int64_t[] dims={}, bool keep_dim=false)
11251131
output : Tensor(out)

python/paddle/utils/code_gen/backward.yaml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -777,6 +777,12 @@
777777
kernel :
778778
func : mean_grad
779779

780+
- backward_api : meshgrid_grad
781+
forward : meshgrid (Tensor[] inputs) -> Tensor[](outputs)
782+
args : (Tensor[] inputs, Tensor[] outputs_grad)
783+
output : Tensor[](inputs_grad)
784+
invoke : meshgrid_grad_impl(inputs, outputs_grad)
785+
780786
- backward_api : min_grad
781787
forward: min (Tensor x, int64_t[] dims={}, bool keep_dim=false) -> Tensor(out)
782788
args : (Tensor x, Tensor out, Tensor out_grad, int64_t[] dims={}, bool keep_dim=false, bool reduce_all=false)

0 commit comments

Comments
 (0)