Skip to content

Commit 0ac9c29

Browse files
authored
Fix defition definition (#60679)
1 parent c173503 commit 0ac9c29

20 files changed

+42
-42
lines changed

python/paddle/device/cuda/cuda_graphed_layer.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -66,9 +66,9 @@ def forward(ctx, context, *args):
6666
ctx.save_for_backward(context, context.args_static, y)
6767
return y.detach()
6868
else:
69-
for x_staic, x in zip(context.args_static, args):
70-
if isinstance(x_staic, paddle.Tensor):
71-
x_staic.copy_(x, True)
69+
for x_static, x in zip(context.args_static, args):
70+
if isinstance(x_static, paddle.Tensor):
71+
x_static.copy_(x, True)
7272

7373
context.forward_graph.replay()
7474
y = context.y_static

python/paddle/distributed/auto_parallel/interface.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -237,9 +237,9 @@ def __call__(self, *args, **kwargs):
237237

238238
def exclude_ops_in_recompute(run_function):
239239
"""
240-
Exclude some operators in recompute segements.
240+
Exclude some operators in recompute segments.
241241
Args:
242-
run_function (callabe): The callabe function to be excluded.
242+
run_function (callable): The callable function to be excluded.
243243
244244
Returns:
245245
ExcludeOperator: The callable object.

python/paddle/distributed/auto_parallel/random.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929
_basic_seed = 42
3030
_basic_name = ""
3131

32-
# use Prime number as offset to avoid confict
32+
# use Prime number as offset to avoid conflict
3333
_mesh_offset = 173
3434
_dim_offsets = [11, 23, 37, 73]
3535

@@ -57,7 +57,7 @@ def parallel_manual_seed(seed, name=""):
5757
5858
This function should be called only once before auto parallel compiles the computation graph (e.g. auto_parallel.engine.prepare() or fit()).
5959
60-
This seed only affects how randomness-relative **operators** (dropout, fuse op with dropout inside, etc) are execute amonge mesh, and would NOT affect other processe like Parameter initialization.
60+
This seed only affects how randomness-relative **operators** (dropout, fuse op with dropout inside, etc) are execute amonge mesh, and would NOT affect other process like Parameter initialization.
6161
6262
Examples:
6363
# seed relative to training step
@@ -85,7 +85,7 @@ def determinate_rng(
8585
), "Cannot provide dims mapping and placements at same time."
8686
# TODO(JZ-LIANG) Support Mesh with any high rank
8787
# use a string to unique integer hashing algorithm for seed computation.
88-
# instead of using offsets to coodinate seed across devices.
88+
# instead of using offsets to coordinate seed across devices.
8989
if len(process_mesh.shape) > 4:
9090
raise NotImplementedError(
9191
"Auto Parallel Random Control for Mesh's rank > 4 is NOT supported! Got {}".format(
@@ -131,7 +131,7 @@ def determinate_rng(
131131
else:
132132
assert (
133133
seed_ not in _rng_name_to_seed.values()
134-
), "Seed Confilt! current seed: {}, current sharding expr: {}, generated seed: {}".format(
134+
), "Seed Conflict! current seed: {}, current sharding expr: {}, generated seed: {}".format(
135135
seed_, sharding_expr, _rng_name_to_seed
136136
)
137137
_rng_name_to_seed[sharding_expr] = seed_

python/paddle/distributed/auto_parallel/static/cost/estimate_cost.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424

2525

2626
class CostEstimator:
27-
_sepical_op_type = ["fused_attention", "fused_feedforward"]
27+
_special_op_type = ["fused_attention", "fused_feedforward"]
2828

2929
def __init__(
3030
self, program, cluster, mode="modeling", rank=None, loop_count=10
@@ -207,7 +207,7 @@ def _estimate_core(self, dist_context, resharder, block):
207207

208208
if dist_op_cost is None:
209209
assert (
210-
dist_op.serial_op.type in CostEstimator._sepical_op_type
210+
dist_op.serial_op.type in CostEstimator._special_op_type
211211
)
212212
continue
213213
for item in dist_op_cost:

python/paddle/distributed/auto_parallel/static/cost/op_runtime_cost.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ def _filter_vars_with_zero_in_degree_and_ignore_feed_fetch_vars():
8888
for op in cloned_main_block.ops:
8989
op: Operator
9090
if is_comm_op(op):
91-
# ignore communication op from graph, bacause sometimes we want to profile a sub-graph
91+
# ignore communication op from graph, because sometimes we want to profile a sub-graph
9292
# and these dangling operators will not work (no graph to communicate to/from)
9393
continue
9494
input_var_names, output_var_names = _collect_op_input_var_names(
@@ -262,7 +262,7 @@ def measure_program_real_op_cost(
262262
>>> from paddle.distributed.auto_parallel.static.utils import measure_program_real_op_cost
263263
>>> place: str = paddle.device.get_device() # here we assume place = "cuda:x"
264264
>>> place = paddle.CUDAPlace(int(place.split(':')[1]))
265-
>>> # here "program" is an inner object that has alredy been built before
265+
>>> # here "program" is an inner object that has already been built before
266266
>>> measure_program_real_op_cost(program, verbose_level=1)
267267
'''
268268

python/paddle/distributed/auto_parallel/static/operators/dist_assign.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ def __init__(self, op_type):
2222
super().__init__(op_type)
2323

2424

25-
# TODO reomve assign dist op
25+
# TODO remove assign dist op
2626
# register_distributed_operator_impl_container(DistributedAssign("assign"))
2727

2828

python/paddle/distributed/auto_parallel/static/operators/dist_concat.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -43,16 +43,16 @@ def update_dims_mapping(dist_op):
4343
input_specs = []
4444
for name in input_arg_names:
4545
input_specs.append(get_dist_tensor_spec(dist_op, name))
46-
output_sepc = get_dist_tensor_spec(dist_op, output_arg_names[0], False)
46+
output_spec = get_dist_tensor_spec(dist_op, output_arg_names[0], False)
4747

4848
# step2: infer spmd
4949
rule = get_phi_spmd_rule("concat")
50-
# tensor order following order in PHI defition
50+
# tensor order following order in PHI definition
5151
fw_results = rule.infer_forward(input_specs, axis)
52-
bw_results = rule.infer_backward(input_specs, output_sepc, axis)
52+
bw_results = rule.infer_backward(input_specs, output_spec, axis)
5353

5454
# step3: update dist_attr
55-
# tensor order following order in PHI defition
55+
# tensor order following order in PHI definition
5656
changed = update_op_dims_mapping(
5757
dist_op,
5858
input_arg_names,

python/paddle/distributed/auto_parallel/static/operators/dist_cross_entropy.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ def update_dims_mapping(dist_op):
6464

6565
# step2: infer spmd
6666
rule = get_phi_spmd_rule("softmax_with_cross_entropy")
67-
# tensor order following order in PHI defition
67+
# tensor order following order in PHI definition
6868
fw_results = rule.infer_forward(
6969
logits_spec,
7070
label_spec,
@@ -87,7 +87,7 @@ def update_dims_mapping(dist_op):
8787
)
8888

8989
# step3: update dist_attr
90-
# tensor order following order in PHI defition
90+
# tensor order following order in PHI definition
9191
changed = update_op_dims_mapping(
9292
dist_op,
9393
[logits_name, label_name],

python/paddle/distributed/auto_parallel/static/operators/dist_default.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -141,12 +141,12 @@ def update_dims_mapping(dist_op):
141141

142142
# step2: infer spmd
143143
rule = get_phi_spmd_rule("default_")
144-
# tensor order following order in PHI defition
144+
# tensor order following order in PHI definition
145145
fw_results = rule.infer_forward(input_specs, output_specs)
146146
bw_results = rule.infer_backward(input_specs, output_specs)
147147

148148
# step3: update dist_attr
149-
# tensor order following order in PHI defition
149+
# tensor order following order in PHI definition
150150
changed = update_op_dims_mapping(
151151
dist_op, input_arg_names, output_arg_names, fw_results, bw_results
152152
)

python/paddle/distributed/auto_parallel/static/operators/dist_dropout.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -58,12 +58,12 @@ def update_dims_mapping(dist_op):
5858

5959
# step2: infer spmd
6060
rule = get_phi_spmd_rule("dropout")
61-
# tensor order following order in PHI defition
61+
# tensor order following order in PHI definition
6262
fw_results = rule.infer_forward(x_spec)
6363
bw_results = rule.infer_backward(x_spec, output_spec)
6464

6565
# step3: update dist_attr
66-
# tensor order following order in PHI defition
66+
# tensor order following order in PHI definition
6767
changed = update_op_dims_mapping(
6868
dist_op, [x_name], [out_name], fw_results, bw_results
6969
)

0 commit comments

Comments
 (0)