|
| 1 | +# Copyright (c) MONAI Consortium |
| 2 | +# Licensed under the Apache License, Version 2.0 (the "License"); |
| 3 | +# you may not use this file except in compliance with the License. |
| 4 | +# You may obtain a copy of the License at |
| 5 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 6 | +# Unless required by applicable law or agreed to in writing, software |
| 7 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 8 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 9 | +# See the License for the specific language governing permissions and |
| 10 | +# limitations under the License. |
| 11 | + |
| 12 | +import unittest |
| 13 | + |
| 14 | +import torch |
| 15 | +import torch.distributed as dist |
| 16 | +from torch.cuda.amp import autocast |
| 17 | + |
| 18 | +# FIXME: test for the workaround of https://github.com/Project-MONAI/MONAI/issues/5291 |
| 19 | +from monai.config.deviceconfig import print_config |
| 20 | +from tests.utils import skip_if_no_cuda |
| 21 | + |
| 22 | + |
| 23 | +def main_worker(rank, ngpus_per_node): |
| 24 | + dist.init_process_group(backend="nccl", init_method="tcp://127.0.0.1:12345", world_size=ngpus_per_node, rank=rank) |
| 25 | + # `benchmark = True` is not compatible with openCV in PyTorch 22.09 docker for multi-gpu training |
| 26 | + torch.backends.cudnn.benchmark = True |
| 27 | + |
| 28 | + model = torch.nn.Conv3d(in_channels=1, out_channels=32, kernel_size=3, bias=True).to(rank) |
| 29 | + model = torch.nn.parallel.DistributedDataParallel( |
| 30 | + model, device_ids=[rank], output_device=rank, find_unused_parameters=False |
| 31 | + ) |
| 32 | + x = torch.ones(1, 1, 192, 192, 192).to(rank) |
| 33 | + with autocast(enabled=True): |
| 34 | + model(x) |
| 35 | + |
| 36 | + |
| 37 | +@skip_if_no_cuda |
| 38 | +class TestCV2Dist(unittest.TestCase): |
| 39 | + def test_cv2_cuda_ops(self): |
| 40 | + print_config() |
| 41 | + ngpus_per_node = torch.cuda.device_count() |
| 42 | + torch.multiprocessing.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node,)) |
| 43 | + |
| 44 | + |
| 45 | +if __name__ == "__main__": |
| 46 | + unittest.main() |
0 commit comments