Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
176 changes: 92 additions & 84 deletions paddle/fluid/pybind/place.cc
Original file line number Diff line number Diff line change
Expand Up @@ -185,14 +185,94 @@ static inline int PlaceIndex(const PlaceType &p) { // NOLINT

template <typename PlaceType1, typename PlaceType2>
static inline bool IsSamePlace(const PlaceType1 &p1, const PlaceType2 &p2) {
return phi::Place(p1) == phi::Place(p2);
if (std::is_same_v<PlaceType1, phi::Place> ||
std::is_same_v<PlaceType2, phi::Place> ||
std::is_same_v<PlaceType1, PlaceType2>) {
return phi::Place(p1) == phi::Place(p2);
}
return false;
}

void BindPlace(pybind11::module &m) { // NOLINT
using namespace paddle::framework; // NOLINT
py::class_<phi::CustomPlace> customplace(m,
"CustomPlace",
R"DOC(

py::class_<phi::Place> platformplace(m, "Place");
g_place_pytype = reinterpret_cast<PyTypeObject *>(platformplace.ptr());
platformplace.def(py::init<>())
.def("_type", &PlaceIndex<phi::Place>)
.def("_equals", &IsSamePlace<phi::Place, phi::Place>)
.def("_equals", &IsSamePlace<phi::Place, phi::GPUPlace>)
.def("_equals", &IsSamePlace<phi::Place, phi::CPUPlace>)
.def("_equals", &IsSamePlace<phi::Place, phi::XPUPlace>)
.def("_equals", &IsSamePlace<phi::Place, phi::IPUPlace>)
.def("_equals", &IsSamePlace<phi::Place, phi::GPUPinnedPlace>)
.def("_equals", &IsSamePlace<phi::Place, phi::XPUPinnedPlace>)
.def("_equals", &IsSamePlace<phi::Place, phi::CustomPlace>)
.def("__eq__",
[](const py::object &self, const py::object &other) -> bool {
if (py::isinstance<phi::Place>(other)) {
return self.attr("_equals")(other).cast<bool>();
}
return false;
})
.def("__hash__",
[](const phi::Place &self) { return phi::Place::Hash()(self); })
.def("is_gpu_place",
[](phi::Place &self) { return phi::is_gpu_place(self); })
.def("is_cpu_place",
[](phi::Place &self) { return phi::is_cpu_place(self); })
.def("is_xpu_place",
[](phi::Place &self) { return phi::is_xpu_place(self); })
.def("is_ipu_place",
[](phi::Place &self) { return phi::is_ipu_place(self); })
.def("is_cuda_pinned_place",
[](phi::Place &self) { return phi::is_cuda_pinned_place(self); })
.def("is_xpu_pinned_place",
[](phi::Place &self) { return phi::is_xpu_pinned_place(self); })
.def("is_custom_place",
[](phi::Place &self) { return phi::is_custom_place(self); })
.def("gpu_device_id", [](phi::Place &self) { return self.device; })
.def("xpu_device_id", [](phi::Place &self) { return self.device; })
.def("ipu_device_id", [](phi::Place &self) { return self.device; })
.def("custom_device_id", [](phi::Place &self) { return self.device; })
.def("custom_device_type",
[](phi::Place &self) { return self.GetDeviceType(); })
.def("set_place",
[](phi::Place &self, const phi::Place &other) { self = other; })
.def("set_place",
[](phi::Place &self, const phi::CPUPlace &cpu_place) {
self = cpu_place;
})
.def("set_place",
[](phi::Place &self, const phi::XPUPlace &xpu_place) {
self = xpu_place;
})
.def("set_place",
[](phi::Place &self, const phi::GPUPlace &gpu_place) {
self = gpu_place;
})
.def("set_place",
[](phi::Place &self, const phi::GPUPinnedPlace &cuda_pinned_place) {
self = cuda_pinned_place;
})
.def("set_place",
[](phi::Place &self, const phi::XPUPinnedPlace &xpu_pinned_place) {
self = xpu_pinned_place;
})
.def("set_place",
[](phi::Place &self, const phi::IPUPlace &ipu_place) {
self = ipu_place;
})
.def("set_place",
[](phi::Place &self, const phi::CustomPlace &plug_place) {
self = plug_place;
})
.def("__repr__", string::to_string<const phi::Place &>)
.def("__str__", string::to_string<const phi::Place &>);

py::class_<phi::CustomPlace, phi::Place> customplace(m,
"CustomPlace",
R"DOC(
CustomPlace is a descriptor of a device.
It represents a custom device on which a tensor will be allocated and a model will run.

Expand Down Expand Up @@ -280,7 +360,7 @@ void BindPlace(pybind11::module &m) { // NOLINT
[](const phi::CustomPlace &self) { return self.GetDeviceType(); })
.def("__repr__", string::to_string<const phi::CustomPlace &>)
.def("__str__", string::to_string<const phi::CustomPlace &>);
py::class_<phi::GPUPlace> cudaplace(m, "CUDAPlace", R"DOC(
py::class_<phi::GPUPlace, phi::Place> cudaplace(m, "CUDAPlace", R"DOC(

CUDAPlace is a descriptor of a device.
It represents a GPU device allocated or to be allocated with Tensor.
Expand Down Expand Up @@ -384,7 +464,7 @@ void BindPlace(pybind11::module &m) { // NOLINT
#endif
});
#endif
py::class_<phi::XPUPlace> xpuplace(m, "XPUPlace", R"DOC(
py::class_<phi::XPUPlace, phi::Place> xpuplace(m, "XPUPlace", R"DOC(
Return a Baidu Kunlun Place

Examples:
Expand Down Expand Up @@ -490,7 +570,7 @@ void BindPlace(pybind11::module &m) { // NOLINT
});
#endif

py::class_<phi::CPUPlace> cpuplace(m, "CPUPlace", R"DOC(
py::class_<phi::CPUPlace, phi::Place> cpuplace(m, "CPUPlace", R"DOC(
CPUPlace is a descriptor of a device.
It represents a CPU device on which a tensor will be allocated and a model will run.

Expand Down Expand Up @@ -524,7 +604,8 @@ void BindPlace(pybind11::module &m) { // NOLINT
return false;
#endif
});
py::class_<phi::GPUPinnedPlace> cudapinnedplace(m, "CUDAPinnedPlace", R"DOC(
py::class_<phi::GPUPinnedPlace, phi::Place> cudapinnedplace(
m, "CUDAPinnedPlace", R"DOC(
CUDAPinnedPlace is a descriptor of a device.
It refers to the page locked memory allocated by the CUDA function `cudaHostAlloc()` in the host memory.
The host operating system will not paging and exchanging the memory.
Expand Down Expand Up @@ -562,7 +643,8 @@ void BindPlace(pybind11::module &m) { // NOLINT
.def("__str__", string::to_string<const phi::GPUPinnedPlace &>);

// XPUPinnedPlace
py::class_<phi::XPUPinnedPlace> xpupinnedplace(m, "XPUPinnedPlace", R"DOC(
py::class_<phi::XPUPinnedPlace, phi::Place> xpupinnedplace(
m, "XPUPinnedPlace", R"DOC(
XPUPinnedPlace is a descriptor of a device.
It refers to the page locked memory allocated by the CUDA function `cudaHostAlloc()` in the host memory.
The host operating system will not paging and exchanging the memory.
Expand Down Expand Up @@ -600,7 +682,7 @@ void BindPlace(pybind11::module &m) { // NOLINT
.def("__str__", string::to_string<const phi::XPUPinnedPlace &>);

// IPUPlace
py::class_<phi::IPUPlace> ipuplace(m, "IPUPlace", R"DOC(
py::class_<phi::IPUPlace, phi::Place> ipuplace(m, "IPUPlace", R"DOC(
IPUPlace is a descriptor of a device.
It represents a IPU device on which a tensor will be allocated and a model will run.

Expand Down Expand Up @@ -648,80 +730,6 @@ void BindPlace(pybind11::module &m) { // NOLINT
.def("_equals", &IsSamePlace<phi::IPUPlace, phi::GPUPinnedPlace>)
.def("_equals", &IsSamePlace<phi::IPUPlace, phi::XPUPinnedPlace>)
.def("__str__", string::to_string<const phi::IPUPlace &>);

py::class_<phi::Place> platformplace(m, "Place");
g_place_pytype = reinterpret_cast<PyTypeObject *>(platformplace.ptr());
platformplace.def(py::init<>())
.def("_type", &PlaceIndex<phi::Place>)
.def("_equals", &IsSamePlace<phi::Place, phi::Place>)
.def("_equals", &IsSamePlace<phi::Place, phi::GPUPlace>)
.def("_equals", &IsSamePlace<phi::Place, phi::CPUPlace>)
.def("_equals", &IsSamePlace<phi::Place, phi::XPUPlace>)
.def("_equals", &IsSamePlace<phi::Place, phi::IPUPlace>)
.def("_equals", &IsSamePlace<phi::Place, phi::GPUPinnedPlace>)
.def("_equals", &IsSamePlace<phi::Place, phi::XPUPinnedPlace>)
.def("_equals", &IsSamePlace<phi::Place, phi::CustomPlace>)
.def("__eq__",
[](const py::object &self, const py::object &other) -> bool {
if (py::isinstance<phi::Place>(other)) {
return self.attr("_equals")(other).cast<bool>();
}
return false;
})
.def("__hash__",
[](const phi::Place &self) { return phi::Place::Hash()(self); })
.def("is_gpu_place",
[](phi::Place &self) { return phi::is_gpu_place(self); })
.def("is_cpu_place",
[](phi::Place &self) { return phi::is_cpu_place(self); })
.def("is_xpu_place",
[](phi::Place &self) { return phi::is_xpu_place(self); })
.def("is_ipu_place",
[](phi::Place &self) { return phi::is_ipu_place(self); })
.def("is_cuda_pinned_place",
[](phi::Place &self) { return phi::is_cuda_pinned_place(self); })
.def("is_xpu_pinned_place",
[](phi::Place &self) { return phi::is_xpu_pinned_place(self); })
.def("is_custom_place",
[](phi::Place &self) { return phi::is_custom_place(self); })
.def("gpu_device_id", [](phi::Place &self) { return self.device; })
.def("xpu_device_id", [](phi::Place &self) { return self.device; })
.def("ipu_device_id", [](phi::Place &self) { return self.device; })
.def("custom_device_id", [](phi::Place &self) { return self.device; })
.def("custom_device_type",
[](phi::Place &self) { return self.GetDeviceType(); })
.def("set_place",
[](phi::Place &self, const phi::Place &other) { self = other; })
.def("set_place",
[](phi::Place &self, const phi::CPUPlace &cpu_place) {
self = cpu_place;
})
.def("set_place",
[](phi::Place &self, const phi::XPUPlace &xpu_place) {
self = xpu_place;
})
.def("set_place",
[](phi::Place &self, const phi::GPUPlace &gpu_place) {
self = gpu_place;
})
.def("set_place",
[](phi::Place &self, const phi::GPUPinnedPlace &cuda_pinned_place) {
self = cuda_pinned_place;
})
.def("set_place",
[](phi::Place &self, const phi::XPUPinnedPlace &xpu_pinned_place) {
self = xpu_pinned_place;
})
.def("set_place",
[](phi::Place &self, const phi::IPUPlace &ipu_place) {
self = ipu_place;
})
.def("set_place",
[](phi::Place &self, const phi::CustomPlace &plug_place) {
self = plug_place;
})
.def("__repr__", string::to_string<const phi::Place &>)
.def("__str__", string::to_string<const phi::Place &>);
}

} // namespace paddle::pybind
5 changes: 3 additions & 2 deletions python/paddle/base/framework.py
Original file line number Diff line number Diff line change
Expand Up @@ -809,8 +809,9 @@ def _dygraph_tracer():

def _current_expected_place_():
global _global_expected_place_
if _global_expected_place_ is None or isinstance(
_global_expected_place_, core.Place
if (
_global_expected_place_ is None
or type(_global_expected_place_) is core.Place
):
if core.is_compiled_with_cuda():
try:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@
tensor_dim,
)
from .dispatcher import Dispatcher, optional
from .tracker import ConstTracker, DanglingTracker, DummyTracker
from .tracker import ConstTracker, DanglingTracker, DummyTracker, GetAttrTracker
from .variables import (
BuiltinVariable,
CallableVariable,
Expand Down Expand Up @@ -252,6 +252,15 @@ def inner(*args, **kwargs):
var.get_py_type(), graph=var.graph, tracker=DummyTracker([var])
),
)
Dispatcher.register(
type,
("VariableBase",),
lambda var: VariableFactory.from_value(
type(var.get_py_value()),
graph=var.graph,
tracker=GetAttrTracker(var, "__class__"),
),
)

# dict
Dispatcher.register(
Expand Down
110 changes: 110 additions & 0 deletions test/legacy_test/test_pybind_place.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
# Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest

import paddle


class TestPybindPlace(unittest.TestCase):
def test_cpu_place(self):
pybind_place = paddle.CPUPlace()
self.assertEqual(pybind_place, pybind_place)
tensor_place = paddle.randn([2, 2]).to(device="cpu").place
self.assertEqual(pybind_place, tensor_place)
self.assertEqual(tensor_place, pybind_place)
self.assertEqual(tensor_place, tensor_place)

tensor_place_2 = paddle.randn([2, 2]).to(device="cpu").place
self.assertEqual(tensor_place_2, tensor_place)
self.assertEqual(tensor_place, tensor_place_2)

pybind_place_2 = paddle.CPUPlace()
self.assertEqual(pybind_place, pybind_place_2)

def test_cuda_place(self):
if paddle.device.is_compiled_with_cuda():
pybind_place = paddle.CUDAPlace(0)
self.assertEqual(pybind_place, pybind_place)
tensor_place = paddle.randn([2, 2]).place
self.assertEqual(pybind_place, tensor_place)
self.assertEqual(tensor_place, pybind_place)
self.assertEqual(tensor_place, tensor_place)

tensor_place_2 = paddle.randn([2, 2]).place
self.assertEqual(tensor_place_2, tensor_place)
self.assertEqual(tensor_place, tensor_place_2)

pybind_place_2 = paddle.CUDAPlace(0)
self.assertEqual(pybind_place, pybind_place_2)
else:
self.skipTest("Skip as paddle is not compiled with cuda")

def test_xpu_place(self):
if paddle.device.is_compiled_with_xpu():
pybind_place = paddle.XPUPlace(0)
self.assertEqual(pybind_place, pybind_place)
tensor_place = paddle.randn([2, 2]).place
self.assertEqual(pybind_place, tensor_place)
self.assertEqual(tensor_place, pybind_place)
self.assertEqual(tensor_place, tensor_place)

tensor_place_2 = paddle.randn([2, 2]).place
self.assertEqual(tensor_place_2, tensor_place)
self.assertEqual(tensor_place, tensor_place_2)

pybind_place_2 = paddle.XPUPlace(0)
self.assertEqual(pybind_place, pybind_place_2)
else:
self.skipTest("Skip as paddle is not compiled with xpu")

def test_custom_place(self):
if paddle.device.is_compiled_with_custom_device("FakeCPU"):
pybind_place = paddle.CustomPlace("FakeCPU", 0)
self.assertEqual(pybind_place, pybind_place)
tensor_place = paddle.randn([2, 2]).place
self.assertEqual(pybind_place, tensor_place)
self.assertEqual(tensor_place, pybind_place)
self.assertEqual(tensor_place, tensor_place)

tensor_place_2 = paddle.randn([2, 2]).place
self.assertEqual(tensor_place_2, tensor_place)
self.assertEqual(tensor_place, tensor_place_2)

pybind_place_2 = paddle.CustomPlace("FakeCPU", 0)
self.assertEqual(pybind_place, pybind_place_2)
else:
self.skipTest("Skip as paddle is not compiled with custom device")

def test_ipu_place(self):
if paddle.device.is_compiled_with_ipu():
pybind_place = paddle.IPUPlace()
self.assertEqual(pybind_place, pybind_place)
tensor_place = paddle.randn([2, 2]).place
self.assertEqual(pybind_place, tensor_place)
self.assertEqual(tensor_place, pybind_place)
self.assertEqual(tensor_place, tensor_place)

tensor_place_2 = paddle.randn([2, 2]).place
self.assertEqual(tensor_place_2, tensor_place)
self.assertEqual(tensor_place, tensor_place_2)

pybind_place_2 = paddle.IPUPlace()
self.assertEqual(pybind_place, pybind_place_2)
else:
self.skipTest("Skip as paddle is not compiled with ipu")


if __name__ == '__main__':
unittest.main()
Loading