Skip to content

Commit

Permalink
Do not propogate (pytorch#124769)
Browse files Browse the repository at this point in the history
Fix the propogate typos.

Pull Request resolved: pytorch#124769
Approved by: https://github.com/Skylion007
  • Loading branch information
kit1980 authored and pytorchmergebot committed Apr 24, 2024
1 parent 952a00e commit f0f7452
Show file tree
Hide file tree
Showing 5 changed files with 9 additions and 9 deletions.
2 changes: 1 addition & 1 deletion test/inductor/test_torchinductor.py
Original file line number Diff line number Diff line change
Expand Up @@ -8612,7 +8612,7 @@ def fn(x):
self.common(fn, (torch.ones(1, 1, 13, dtype=dtype),))

@unittest.skipIf(not HAS_CPU or not RUN_CPU, "requires C++ compiler")
def test_data_type_propogation(self):
def test_data_type_propagation(self):
from torch._dynamo.utils import detect_fake_mode
from torch._inductor.codegen.common import boolean_ops
from torch._inductor.compile_fx import _shape_env_from_inputs
Expand Down
4 changes: 2 additions & 2 deletions torch/_inductor/codegen/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -229,12 +229,12 @@ def deduce_node_dtype_by_inputs(self, node: torch.fx.Node):
if len(input_nodes) == 0:
return None

all_input_nodes_propogated = all(
all_input_nodes_propagated = all(
OptimizationContext.key in n.meta
and n.meta[OptimizationContext.key].dtype is not None
for n in input_nodes
)
if not all_input_nodes_propogated:
if not all_input_nodes_propagated:
return None

return functools.reduce(
Expand Down
8 changes: 4 additions & 4 deletions torch/ao/pruning/_experimental/pruner/prune_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ def _prune_module_bias(module: nn.Module, mask: Tensor) -> None:
delattr(module, "_bias")


def _propogate_module_bias(module: nn.Module, mask: Tensor) -> Optional[Tensor]:
def _propagate_module_bias(module: nn.Module, mask: Tensor) -> Optional[Tensor]:
r"""
In the case that we need to propagate biases, this function will return the biases we need
"""
Expand Down Expand Up @@ -143,7 +143,7 @@ def prune_linear_activation_linear(
if getattr(linear1, "prune_bias", False):
_prune_module_bias(linear1, mask)
else:
pruned_biases = _propogate_module_bias(linear1, mask)
pruned_biases = _propagate_module_bias(linear1, mask)
if pruned_biases is not None:
if activation:
pruned_biases = activation(pruned_biases)
Expand Down Expand Up @@ -251,7 +251,7 @@ def prune_conv2d_activation_conv2d(
if prune_bias:
_prune_module_bias(conv2d_1, mask)
else:
pruned_biases = _propogate_module_bias(conv2d_1, mask)
pruned_biases = _propagate_module_bias(conv2d_1, mask)
if pruned_biases is not None:
if activation:
pruned_biases = activation(pruned_biases)
Expand Down Expand Up @@ -335,7 +335,7 @@ def prune_conv2d_pool_flatten_linear(
if getattr(conv2d, "prune_bias", False):
_prune_module_bias(conv2d, mask)
else:
pruned_biases = cast(Tensor, _propogate_module_bias(conv2d, mask))
pruned_biases = cast(Tensor, _propagate_module_bias(conv2d, mask))
flattened_pruned_biases = torch.tensor(
[[bias] * flatten_scale for bias in pruned_biases], device=mask.device
).flatten()
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/frontend/ir_emitter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5685,7 +5685,7 @@ void runCleanupPasses(std::shared_ptr<Graph>& to_clean) {
// successive runs of immutable constant prop does not change the graph
ConstantPropagationImmutableTypes(to_clean);

// Constant Pooling pass must be after ConstantPropogation, which can create
// Constant Pooling pass must be after ConstantPropagation, which can create
// new constants that needs to be pooled.
ConstantPooling(to_clean);

Expand Down
2 changes: 1 addition & 1 deletion torch/distributed/_spmd/distribute.py
Original file line number Diff line number Diff line change
Expand Up @@ -350,7 +350,7 @@ def default_factory_op_rule(
}


# Dispatch override for factory ops, as DTensor cannot propogate sharding spec
# Dispatch override for factory ops, as DTensor cannot propagate sharding spec
# without DTensor inputs.
FACTORY_OPS: Dict[torch._ops.OpOverload, Callable] = {
aten.scalar_tensor.default: default_factory_op_rule,
Expand Down

0 comments on commit f0f7452

Please sign in to comment.