Skip to content

Commit 2288e1f

Browse files
authored
Merge pull request #329 from pyiron/max_workers
Add max_workers parameter for backwards compatiblity
2 parents 0e758f1 + 6172507 commit 2288e1f

File tree

4 files changed

+29
-7
lines changed

4 files changed

+29
-7
lines changed

pympipool/__init__.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,9 @@ class Executor:
1919
an interactive Jupyter notebook.
2020
2121
Args:
22+
max_workers (int): for backwards compatibility with the standard library, max_workers also defines the number of
23+
cores which can be used in parallel - just like the max_cores parameter. Using max_cores is
24+
recommended, as computers have a limited number of compute cores.
2225
max_cores (int): defines the number cores which can be used in parallel
2326
cores_per_worker (int): number of MPI cores to be used for each function call
2427
threads_per_core (int): number of OpenMP threads to be used for each function call
@@ -64,6 +67,7 @@ class Executor:
6467

6568
def __init__(
6669
self,
70+
max_workers: int = 1,
6771
max_cores: int = 1,
6872
cores_per_worker: int = 1,
6973
threads_per_core: int = 1,
@@ -84,6 +88,7 @@ def __init__(
8488

8589
def __new__(
8690
cls,
91+
max_workers: int = 1,
8792
max_cores: int = 1,
8893
cores_per_worker: int = 1,
8994
threads_per_core: int = 1,
@@ -108,6 +113,9 @@ def __new__(
108113
requires the SLURM workload manager to be installed on the system.
109114
110115
Args:
116+
max_workers (int): for backwards compatibility with the standard library, max_workers also defines the
117+
number of cores which can be used in parallel - just like the max_cores parameter. Using
118+
max_cores is recommended, as computers have a limited number of compute cores.
111119
max_cores (int): defines the number cores which can be used in parallel
112120
cores_per_worker (int): number of MPI cores to be used for each function call
113121
threads_per_core (int): number of OpenMP threads to be used for each function call
@@ -135,6 +143,7 @@ def __new__(
135143
"""
136144
if not disable_dependencies:
137145
return ExecutorWithDependencies(
146+
max_workers=max_workers,
138147
max_cores=max_cores,
139148
cores_per_worker=cores_per_worker,
140149
threads_per_core=threads_per_core,
@@ -152,6 +161,7 @@ def __new__(
152161
else:
153162
_check_refresh_rate(refresh_rate=refresh_rate)
154163
return create_executor(
164+
max_workers=max_workers,
155165
max_cores=max_cores,
156166
cores_per_worker=cores_per_worker,
157167
threads_per_core=threads_per_core,

pympipool/scheduler/__init__.py

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
check_executor,
1515
check_backend,
1616
check_init_function,
17+
validate_number_of_cores,
1718
)
1819
from pympipool.scheduler.slurm import (
1920
PySlurmExecutor,
@@ -36,6 +37,7 @@
3637

3738

3839
def create_executor(
40+
max_workers: int = 1,
3941
max_cores: int = 1,
4042
cores_per_worker: int = 1,
4143
threads_per_core: int = 1,
@@ -58,19 +60,21 @@ def create_executor(
5860
requires the SLURM workload manager to be installed on the system.
5961
6062
Args:
63+
max_workers (int): for backwards compatibility with the standard library, max_workers also defines the number of
64+
cores which can be used in parallel - just like the max_cores parameter. Using max_cores is
65+
recommended, as computers have a limited number of compute cores.
6166
max_cores (int): defines the number cores which can be used in parallel
6267
cores_per_worker (int): number of MPI cores to be used for each function call
6368
threads_per_core (int): number of OpenMP threads to be used for each function call
6469
gpus_per_worker (int): number of GPUs per worker - defaults to 0
6570
oversubscribe (bool): adds the `--oversubscribe` command line flag (OpenMPI and SLURM only) - default False
6671
cwd (str/None): current working directory where the parallel python task is executed
6772
hostname_localhost (boolean): use localhost instead of the hostname to establish the zmq connection. In the
68-
context of an HPC cluster this essential to be able to communicate to an
69-
Executor running on a different compute node within the same allocation. And
70-
in principle any computer should be able to resolve that their own hostname
71-
points to the same address as localhost. Still MacOS >= 12 seems to disable
72-
this look up for security reasons. So on MacOS it is required to set this
73-
option to true
73+
context of an HPC cluster this essential to be able to communicate to an Executor
74+
running on a different compute node within the same allocation. And in principle
75+
any computer should be able to resolve that their own hostname points to the same
76+
address as localhost. Still MacOS >= 12 seems to disable this look up for security
77+
reasons. So on MacOS it is required to set this option to true
7478
backend (str): Switch between the different backends "flux", "mpi" or "slurm". Alternatively, when "auto"
7579
is selected (the default) the available backend is determined automatically.
7680
block_allocation (boolean): To accelerate the submission of a series of python functions with the same
@@ -81,6 +85,7 @@ def create_executor(
8185
command_line_argument_lst (list): Additional command line arguments for the srun call (SLURM only)
8286
8387
"""
88+
max_cores = validate_number_of_cores(max_cores=max_cores, max_workers=max_workers)
8489
check_init_function(block_allocation=block_allocation, init_function=init_function)
8590
check_backend(backend=backend)
8691
if backend == "flux" or (backend == "auto" and flux_installed):

pympipool/shared/inputcheck.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -78,3 +78,10 @@ def check_backend(backend):
7878
def check_init_function(block_allocation, init_function):
7979
if not block_allocation and init_function is not None:
8080
raise ValueError("")
81+
82+
83+
def validate_number_of_cores(max_cores, max_workers):
84+
# only overwrite max_cores when it is set to 1
85+
if max_workers != 1 and max_cores == 1:
86+
return max_workers
87+
return max_cores

tests/test_executor_backend_mpi.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ def test_meta_executor_single(self):
4343

4444
def test_meta_executor_parallel(self):
4545
with Executor(
46-
max_cores=2,
46+
max_workers=2,
4747
cores_per_worker=2,
4848
hostname_localhost=True,
4949
backend="mpi",

0 commit comments

Comments
 (0)