Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,7 @@ select = [

# Flake8-simplify
"SIM101",
"SIM117",

# Pygrep-hooks
"PGH004",
Expand Down
12 changes: 7 additions & 5 deletions python/paddle/base/dygraph/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -650,11 +650,13 @@ def guard(place: PlaceLike | None = None) -> Generator[None, None, None]:
else:
expected_place = framework._current_expected_place_()

with framework.program_guard(train, startup):
with framework.unique_name.guard():
with framework._dygraph_guard(tracer):
with framework._dygraph_place_guard(expected_place):
yield
with (
framework.program_guard(train, startup),
framework.unique_name.guard(),
framework._dygraph_guard(tracer),
framework._dygraph_place_guard(expected_place),
):
yield


@framework.non_static_only
Expand Down
28 changes: 15 additions & 13 deletions python/paddle/cinn/compiler/compute_code_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -221,20 +221,22 @@ def visit_If(self, node):
self.visit_compound_statement(node.body)

def visit_With(self, node):
with self.variables_table:
with contextlib.ExitStack() as context_stack:
for item in node.items:
cur_ctx = ExprExecutor(self.variables_table.get()).exec(
item.context_expr
with (
self.variables_table,
contextlib.ExitStack() as context_stack,
):
for item in node.items:
cur_ctx = ExprExecutor(self.variables_table.get()).exec(
item.context_expr
)
cur_ctx = context_stack.enter_context(cur_ctx)
if item.optional_vars is not None:
local_var_table = exec_assign(
target=item.optional_vars, source=cur_ctx
)
cur_ctx = context_stack.enter_context(cur_ctx)
if item.optional_vars is not None:
local_var_table = exec_assign(
target=item.optional_vars, source=cur_ctx
)
for k, v in local_var_table.items():
self.variables_table.add(k, v)
body = self.visit_compound_statement(node.body)
for k, v in local_var_table.items():
self.variables_table.add(k, v)
body = self.visit_compound_statement(node.body)

def visit_Expr(self, node):
if is_node_parsed_in_schedule(node.value):
Expand Down
28 changes: 15 additions & 13 deletions python/paddle/dataset/movielens.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,19 +148,21 @@ def __initialize_meta_info__():
def __reader__(rand_seed=0, test_ratio=0.1, is_test=False):
fn = __initialize_meta_info__()
np.random.seed(rand_seed)
with zipfile.ZipFile(file=fn) as package:
with package.open('ml-1m/ratings.dat') as rating:
for line in rating:
line = line.decode(encoding='latin')
if (np.random.random() < test_ratio) == is_test:
uid, mov_id, rating, _ = line.strip().split("::")
uid = int(uid)
mov_id = int(mov_id)
rating = float(rating) * 2 - 5.0

mov = MOVIE_INFO[mov_id]
usr = USER_INFO[uid]
yield usr.value() + mov.value() + [[rating]]
with (
zipfile.ZipFile(file=fn) as package,
package.open('ml-1m/ratings.dat') as rating,
):
for line in rating:
line = line.decode(encoding='latin')
if (np.random.random() < test_ratio) == is_test:
uid, mov_id, rating, _ = line.strip().split("::")
uid = int(uid)
mov_id = int(mov_id)
rating = float(rating) * 2 - 5.0

mov = MOVIE_INFO[mov_id]
usr = USER_INFO[uid]
yield usr.value() + mov.value() + [[rating]]


@deprecated(
Expand Down
28 changes: 16 additions & 12 deletions python/paddle/distributed/auto_parallel/static/helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -564,21 +564,25 @@ def init(self, main_program, place, dist_context):
param.get_tensor()._clear()
with paddle.base.dygraph.guard():
if amp_dtype == "float16":
with paddle.no_grad():
with paddle.base.framework._dygraph_place_guard(
with (
paddle.no_grad(),
paddle.base.framework._dygraph_place_guard(
place=place
):
t_casted = param_used.cast(
dtype=core.VarDesc.VarType.FP16
)
),
):
t_casted = param_used.cast(
dtype=core.VarDesc.VarType.FP16
)
elif amp_dtype == "bfloat16":
with paddle.no_grad():
with paddle.base.framework._dygraph_place_guard(
with (
paddle.no_grad(),
paddle.base.framework._dygraph_place_guard(
place=place
):
t_casted = param_used.cast(
dtype=core.VarDesc.VarType.BF16
)
),
):
t_casted = param_used.cast(
dtype=core.VarDesc.VarType.BF16
)
# NOTE(lizhiyu): Clear the origin param. Don't use `param_used.get_tensor().get_tensor()._clear()` to
# clear the `DistTensor`, because it can't clear the `_holder`,
# which `param_used.get_tensor().get_tensor()` will copy one `DenseTensor`.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -244,9 +244,11 @@ def _generate_optimizer(
self._dist_context._serial_optimizer = optimizer
self._dist_context._serial_optimizer._learning_rate = learning_rate

with program_guard(main_program, startup_program):
with main_program.switch_name_generator_guard("opt_"):
optimizer_ops = new_optimizer.apply_gradients(params_grads)
with (
program_guard(main_program, startup_program),
main_program.switch_name_generator_guard("opt_"),
):
optimizer_ops = new_optimizer.apply_gradients(params_grads)
self._completer.complete_update_annotation(main_program)
return optimizer_ops

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -342,11 +342,13 @@ def _apply_optimization(self, trial):

# Generate optimizer
# FIXME should be remove from apply pass after pass support optimizers
with program_guard(dist_main_prog, dist_startup_prog):
with dist_main_prog.switch_name_generator_guard("opt_"):
optimizer_ops = dist_context.serial_optimizer.apply_gradients(
dist_params_grads
)
with (
program_guard(dist_main_prog, dist_startup_prog),
dist_main_prog.switch_name_generator_guard("opt_"),
):
optimizer_ops = dist_context.serial_optimizer.apply_gradients(
dist_params_grads
)
completer.complete_update_annotation(dist_main_prog)

resharder = Resharder(
Expand Down
8 changes: 5 additions & 3 deletions python/paddle/distributed/passes/auto_parallel_master_grad.py
Original file line number Diff line number Diff line change
Expand Up @@ -273,9 +273,11 @@ def _regenerate_optimizer(
dist_context._serial_optimizer._learning_rate
)
serial_optimizer._sorted = False
with program_guard(main_program, startup_program):
with main_program.switch_name_generator_guard("opt_"):
_ = serial_optimizer.apply_gradients(params_grads)
with (
program_guard(main_program, startup_program),
main_program.switch_name_generator_guard("opt_"),
):
_ = serial_optimizer.apply_gradients(params_grads)
self._completer.complete_update_annotation(main_program)

def _add_master_grad(self, main_program, params_grads, dist_context):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -113,20 +113,20 @@ def sync_shared_parameters(self, main_program, startup_program):
)

# Add shared parameter builtin.parameter with "shared_" prefix.
with auto_complete_op_role(main_program, OpRole.Forward):
with paddle.static.program_guard(
with (
auto_complete_op_role(main_program, OpRole.Forward),
paddle.static.program_guard(
main_program, startup_program
):
shared_param = paddle.pir.core.create_parameter(
dtype=param.dtype,
shape=param.shape,
name="shared_" + param_name,
process_mesh=dst_mesh,
placements=src_dist_attr.placements,
initializer=paddle.nn.initializer.Constant(
value=0
),
)
),
):
shared_param = paddle.pir.core.create_parameter(
dtype=param.dtype,
shape=param.shape,
name="shared_" + param_name,
process_mesh=dst_mesh,
placements=src_dist_attr.placements,
initializer=paddle.nn.initializer.Constant(value=0),
)
main_program.set_parameters_from(startup_program)

# Record new shared parameter.
Expand Down
12 changes: 7 additions & 5 deletions python/paddle/framework/io_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,11 +171,13 @@ def _load_program_scope(main=None, startup=None, scope=None):
prog = main if main else paddle.base.Program()
startup_prog = startup if startup else paddle.base.Program()
scope = scope if scope else paddle.base.core.Scope()
with paddle.base.scope_guard(scope):
with paddle.base.program_guard(prog, startup_prog):
with paddle.base.unique_name.guard():
with paddle.base.framework._dygraph_guard(None):
yield
with (
paddle.base.scope_guard(scope),
paddle.base.program_guard(prog, startup_prog),
paddle.base.unique_name.guard(),
paddle.base.framework._dygraph_guard(None),
):
yield


@static_only
Expand Down
Loading