Skip to content

Commit 17e1ad3

Browse files
craymichaelfacebook-github-bot
authored andcommitted
Fix OSS flake8 and mypy failures (meta-pytorch#1538)
Summary: Pull Request resolved: meta-pytorch#1538 Fix OSS test failures due to updated flake8 and mypy versions. Reviewed By: sarahtranfb Differential Revision: D72193786 fbshipit-source-id: 5567147675307094a18eb95d1e41be0367840cdb
1 parent 2db93a7 commit 17e1ad3

File tree

6 files changed

+2
-11
lines changed

6 files changed

+2
-11
lines changed

captum/_utils/common.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -863,10 +863,8 @@ def forward_hook(
863863
inp: Union[Tensor, Tuple[Tensor, ...]],
864864
out: Union[Tensor, Tuple[Tensor, ...]],
865865
) -> None:
866-
nonlocal grad_out
867866

868867
def output_tensor_hook(output_grad: Tensor) -> None:
869-
nonlocal grad_out
870868
grad_out[output_grad.device] = output_grad
871869

872870
if isinstance(out, tuple):
@@ -881,8 +879,6 @@ def pre_hook(module: Module, inp: Union[Tensor, Tuple[Tensor, ...]]) -> Tensor:
881879
def input_tensor_hook(
882880
input_grad: Tensor,
883881
) -> Union[None, Tensor, Tuple[Tensor, ...]]:
884-
nonlocal grad_out
885-
886882
if len(grad_out) == 0:
887883
return None
888884
hook_out = hook(module, input_grad, grad_out[input_grad.device])

captum/_utils/gradient.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -306,7 +306,6 @@ def forward_hook(module, inp, out=None):
306306
if require_layer_grads:
307307
apply_gradient_requirements(eval_tsrs, warn=False)
308308
with lock:
309-
nonlocal saved_layer
310309
# Note that cloning behaviour of `eval_tsr` is different
311310
# when `forward_hook_with_return` is set to True. This is because
312311
# otherwise `backward()` on the last output layer won't execute.

captum/influence/_core/influence_function.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1054,7 +1054,6 @@ def compute_intermediate_quantities(
10541054
# define a helper function that returns the embeddings for a batch
10551055
# pyre-fixme[53]: Captured variable `loss_fn` is not annotated.
10561056
def get_batch_embeddings(batch: Tuple[Tensor, ...]) -> Tensor:
1057-
nonlocal loss_fn, reduction_type, return_device
10581057
# if `self.R` is on cpu, and `self.model_device` was not cpu, this implies
10591058
# `self.R` was too large to fit in gpu memory, and we should do the matrix
10601059
# multiplication of the batch jacobians with `self.R` separately for each

captum/influence/_core/tracincp.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -878,7 +878,6 @@ def compute_intermediate_quantities(
878878
f_inputs: DataLoader = _format_inputs_dataset(inputs)
879879

880880
def get_checkpoint_contribution(checkpoint: str) -> Tensor:
881-
nonlocal f_inputs
882881
assert (
883882
checkpoint is not None
884883
), "None returned from `checkpoints`, cannot load."
@@ -1246,7 +1245,6 @@ def calculate_via_vector_norm(layer_jacobian: Tensor) -> Tensor:
12461245
)
12471246

12481247
def get_checkpoint_contribution(checkpoint: str) -> Tensor:
1249-
nonlocal inputs_len
12501248
# This function returns a 1D tensor representing the contribution to the
12511249
# self influence score for the given checkpoint, for all batches in
12521250
# `inputs`. The length of the 1D tensor is the total number of

captum/testing/helpers/basic_models.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -436,7 +436,7 @@ class BasicModel_GradientLayerAttribution(nn.Module):
436436
def __init__(
437437
self,
438438
inplace: bool = False,
439-
unsupported_layer_output: PassThroughOutputType = None,
439+
unsupported_layer_output: Optional[PassThroughOutputType] = None,
440440
) -> None:
441441
super().__init__()
442442
# Linear 0 is simply identity transform
@@ -464,7 +464,7 @@ def __init__(
464464
self.linear3.weight = nn.Parameter(torch.ones(2, 4))
465465
self.linear3.bias = nn.Parameter(torch.tensor([-1.0, 1.0]))
466466

467-
self.int_layer = PassThroughLayerOutput() # sample layer with an int ouput
467+
self.int_layer = PassThroughLayerOutput() # sample layer with an int output
468468

469469
@no_type_check
470470
def forward(

tests/attr/test_input_layer_wrapper.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,6 @@
4545

4646
class InputLayerMeta(type):
4747
def __new__(metacls, name: str, bases: Tuple, attrs: Dict):
48-
global layer_methods_to_test_with_equiv
4948
for (
5049
layer_method,
5150
equiv_method,

0 commit comments

Comments
 (0)