From f0f7452e315c712c15f4d21925122a717a2c32a2 Mon Sep 17 00:00:00 2001 From: Sergii Dymchenko Date: Wed, 24 Apr 2024 02:18:14 +0000 Subject: [PATCH] Do not propogate (#124769) Fix the propogate typos. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124769 Approved by: https://github.com/Skylion007 --- test/inductor/test_torchinductor.py | 2 +- torch/_inductor/codegen/common.py | 4 ++-- torch/ao/pruning/_experimental/pruner/prune_functions.py | 8 ++++---- torch/csrc/jit/frontend/ir_emitter.cpp | 2 +- torch/distributed/_spmd/distribute.py | 2 +- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/test/inductor/test_torchinductor.py b/test/inductor/test_torchinductor.py index 20cf8be304f3a3..7a99b1f31eb76a 100644 --- a/test/inductor/test_torchinductor.py +++ b/test/inductor/test_torchinductor.py @@ -8612,7 +8612,7 @@ def fn(x): self.common(fn, (torch.ones(1, 1, 13, dtype=dtype),)) @unittest.skipIf(not HAS_CPU or not RUN_CPU, "requires C++ compiler") - def test_data_type_propogation(self): + def test_data_type_propagation(self): from torch._dynamo.utils import detect_fake_mode from torch._inductor.codegen.common import boolean_ops from torch._inductor.compile_fx import _shape_env_from_inputs diff --git a/torch/_inductor/codegen/common.py b/torch/_inductor/codegen/common.py index 674c131d422303..256e16b68d6354 100644 --- a/torch/_inductor/codegen/common.py +++ b/torch/_inductor/codegen/common.py @@ -229,12 +229,12 @@ def deduce_node_dtype_by_inputs(self, node: torch.fx.Node): if len(input_nodes) == 0: return None - all_input_nodes_propogated = all( + all_input_nodes_propagated = all( OptimizationContext.key in n.meta and n.meta[OptimizationContext.key].dtype is not None for n in input_nodes ) - if not all_input_nodes_propogated: + if not all_input_nodes_propagated: return None return functools.reduce( diff --git a/torch/ao/pruning/_experimental/pruner/prune_functions.py b/torch/ao/pruning/_experimental/pruner/prune_functions.py index a75c09cc30f8be..2b16d4b327a0b1 100644 --- a/torch/ao/pruning/_experimental/pruner/prune_functions.py +++ b/torch/ao/pruning/_experimental/pruner/prune_functions.py @@ -84,7 +84,7 @@ def _prune_module_bias(module: nn.Module, mask: Tensor) -> None: delattr(module, "_bias") -def _propogate_module_bias(module: nn.Module, mask: Tensor) -> Optional[Tensor]: +def _propagate_module_bias(module: nn.Module, mask: Tensor) -> Optional[Tensor]: r""" In the case that we need to propagate biases, this function will return the biases we need """ @@ -143,7 +143,7 @@ def prune_linear_activation_linear( if getattr(linear1, "prune_bias", False): _prune_module_bias(linear1, mask) else: - pruned_biases = _propogate_module_bias(linear1, mask) + pruned_biases = _propagate_module_bias(linear1, mask) if pruned_biases is not None: if activation: pruned_biases = activation(pruned_biases) @@ -251,7 +251,7 @@ def prune_conv2d_activation_conv2d( if prune_bias: _prune_module_bias(conv2d_1, mask) else: - pruned_biases = _propogate_module_bias(conv2d_1, mask) + pruned_biases = _propagate_module_bias(conv2d_1, mask) if pruned_biases is not None: if activation: pruned_biases = activation(pruned_biases) @@ -335,7 +335,7 @@ def prune_conv2d_pool_flatten_linear( if getattr(conv2d, "prune_bias", False): _prune_module_bias(conv2d, mask) else: - pruned_biases = cast(Tensor, _propogate_module_bias(conv2d, mask)) + pruned_biases = cast(Tensor, _propagate_module_bias(conv2d, mask)) flattened_pruned_biases = torch.tensor( [[bias] * flatten_scale for bias in pruned_biases], device=mask.device ).flatten() diff --git a/torch/csrc/jit/frontend/ir_emitter.cpp b/torch/csrc/jit/frontend/ir_emitter.cpp index 855744ff60ece3..989a6eaf2dfe0f 100644 --- a/torch/csrc/jit/frontend/ir_emitter.cpp +++ b/torch/csrc/jit/frontend/ir_emitter.cpp @@ -5685,7 +5685,7 @@ void runCleanupPasses(std::shared_ptr& to_clean) { // successive runs of immutable constant prop does not change the graph ConstantPropagationImmutableTypes(to_clean); - // Constant Pooling pass must be after ConstantPropogation, which can create + // Constant Pooling pass must be after ConstantPropagation, which can create // new constants that needs to be pooled. ConstantPooling(to_clean); diff --git a/torch/distributed/_spmd/distribute.py b/torch/distributed/_spmd/distribute.py index 771b064b57b9e6..d0d7aba31489fe 100644 --- a/torch/distributed/_spmd/distribute.py +++ b/torch/distributed/_spmd/distribute.py @@ -350,7 +350,7 @@ def default_factory_op_rule( } -# Dispatch override for factory ops, as DTensor cannot propogate sharding spec +# Dispatch override for factory ops, as DTensor cannot propagate sharding spec # without DTensor inputs. FACTORY_OPS: Dict[torch._ops.OpOverload, Callable] = { aten.scalar_tensor.default: default_factory_op_rule,