Skip to content

Commit 4b5855f

Browse files
committed
Fix
1 parent f968050 commit 4b5855f

File tree

5 files changed

+12
-12
lines changed

5 files changed

+12
-12
lines changed

python/paddle/distributed/auto_parallel/static/cluster.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -741,7 +741,7 @@ def _build_from_dict(self, cluster_info):
741741
cluster_info.get("alpha_latency")
742742
)
743743
else:
744-
self._alpha_latecy = None
744+
self._alpha_latency = None
745745

746746
def build_from_file(self, json_file_path):
747747
with open(json_file_path) as json_file:

python/paddle/distributed/auto_parallel/static/completion.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1643,7 +1643,7 @@ def _is_grad_var_name(name):
16431643
def _get_forward_varname_from_grad_varname(grad_var_name):
16441644
assert _is_grad_var_name(
16451645
grad_var_name
1646-
), f"[{grad_var_name}] is not a grad varnme."
1646+
), f"[{grad_var_name}] is not a grad var name."
16471647
return grad_var_name[: grad_var_name.find("@GRAD")]
16481648

16491649
def _get_op_by_id(ops, id):
@@ -1769,7 +1769,7 @@ def _complete_grad_op_with_forward_op(forward_op, grad_op, vars):
17691769
def infer_backward_op_partial_status(
17701770
vars, grad_op, grad_op_dist_attr
17711771
):
1772-
# NOTE Since we use composite op in static mode which might have implicit Reduction of broadcast axes for caculating parameter's gradient.
1772+
# NOTE Since we use composite op in static mode which might have implicit Reduction of broadcast axes for calculating parameter's gradient.
17731773
# Those implicit Reduction hinder the Partial inference in a normal way, and we need a special method to handle it.
17741774
param_grads = []
17751775
activation_grad = None
@@ -1993,7 +1993,7 @@ def infer_backward_op_partial_status(
19931993
output_name, ref_fwd_dims_mapping
19941994
)
19951995
# NOTE(zhaoyingli):
1996-
# The sum op is used to accmulate the grads' value of the same forward var,
1996+
# The sum op is used to accumulate the grads' value of the same forward var,
19971997
# sum op's chunk_id is same with the last op which generate the grad.
19981998
ref_chunk_id = None
19991999
ref_process_mesh = None
@@ -2336,7 +2336,7 @@ def _init_global_mesh_for_program(self):
23362336
assert dist_op is not None
23372337
dist_op.dist_attr.process_mesh = ProcessMesh(world_ranks)
23382338

2339-
# Find the most compatible implemenetations from the distributed operator
2339+
# Find the most compatible implementations from the distributed operator
23402340
op_dist_impls = find_compatible_distributed_operator_impls(
23412341
dist_op, fwd=True
23422342
)

python/paddle/distributed/auto_parallel/static/dist_tensor.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -112,8 +112,8 @@ def get_local_offsets(
112112
global_sizes, dims_mapping, topology, processes, rank, shard_sizes
113113
)
114114
local_offsets = []
115-
rank_relatvie = processes.index(rank)
116-
coordinate = _linear_idx2coordinate(topology, rank_relatvie)
115+
rank_relative = processes.index(rank)
116+
coordinate = _linear_idx2coordinate(topology, rank_relative)
117117

118118
for i in range(len(global_sizes)):
119119
if dims_mapping[i] == -1:

python/paddle/distributed/auto_parallel/static/helper.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@
3636
class ProxyLayer(Layer):
3737
"""
3838
ProxyLayer implements all logic for converting dygraph model into
39-
static Program IR. Meanwhile, it provides conviential interfaces for
39+
static Program IR. Meanwhile, it provides conventional interfaces for
4040
auto parallel to visit feed/fetch/loss/metric variables.
4141
"""
4242

python/paddle/distributed/auto_parallel/static/utils.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -297,8 +297,8 @@ def _get_comm_group(processes, shape, axis, rank):
297297
assert (
298298
rank in processes
299299
), f"rank [{rank}] is NOT in processes group {processes}"
300-
rank_relatvie = processes.index(rank)
301-
coordinate = _linear_idx2coordinate(shape, rank_relatvie)
300+
rank_relative = processes.index(rank)
301+
coordinate = _linear_idx2coordinate(shape, rank_relative)
302302
coordinates_in_group = [coordinate[:] for i in range(shape[axis])]
303303

304304
# select comm group
@@ -328,8 +328,8 @@ def _get_idx_in_axis(processes, shape, axis, rank):
328328

329329
# NOTE _linear_idx2coordinate assume processes mesh start with 0 and continuous
330330
# tricks to support processes mesh when it is not start with 0 or continuous
331-
rank_relatvie = processes.index(rank)
332-
coordinate = _linear_idx2coordinate(shape, rank_relatvie)
331+
rank_relative = processes.index(rank)
332+
coordinate = _linear_idx2coordinate(shape, rank_relative)
333333
return coordinate[axis]
334334

335335

0 commit comments

Comments
 (0)