Skip to content

Commit

Permalink
[BE]: Update Ruff to 0.0.280 (pytorch#105724)
Browse files Browse the repository at this point in the history
Removes unusued loop values in python dictionary iteration. Automated fix from Ruff master

Pull Request resolved: pytorch#105724
Approved by: https://github.com/ezyang, https://github.com/janeyx99
  • Loading branch information
Skylion007 authored and pytorchmergebot committed Jul 22, 2023
1 parent 53a4b26 commit 6d43c89
Show file tree
Hide file tree
Showing 88 changed files with 221 additions and 238 deletions.
6 changes: 3 additions & 3 deletions .ci/pytorch/perf_test/compare_with_baseline.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,12 +59,12 @@
print("z-value: ", z_value)

if z_value >= 3:
raise Exception('''\n
raise Exception(f'''\n
z-value >= 3, there is high chance of perf regression.\n
To reproduce this regression, run
`cd .ci/pytorch/perf_test/ && bash {}.sh` on your local machine
`cd .ci/pytorch/perf_test/ && bash {test_name}.sh` on your local machine
and compare the runtime before/after your code change.
'''.format(test_name))
''')
else:
print("z-value < 3, no perf regression detected.")
if args.update:
Expand Down
2 changes: 1 addition & 1 deletion .github/scripts/trymerge.py
Original file line number Diff line number Diff line change
Expand Up @@ -620,7 +620,7 @@ def get_ghstack_prs(repo: GitRepo, pr: "GitHubPR") -> List[Tuple["GitHubPR", str
Get the open PRs in the stack that are below this PR. Throws error if any of the PRs are out of sync.
"""
assert pr.is_ghstack_pr()
entire_stack: List[Tuple["GitHubPR", str]] = []
entire_stack: List[Tuple[GitHubPR, str]] = []
# For ghstack, cherry-pick commits based from origin
orig_ref = f"{repo.remote}/{re.sub(r'/head$', '/orig', pr.head_ref())}"
rev_list = repo.revlist(f"{pr.default_branch()}..{orig_ref}")
Expand Down
2 changes: 1 addition & 1 deletion .lintrunner.toml
Original file line number Diff line number Diff line change
Expand Up @@ -3086,6 +3086,6 @@ init_command = [
'python3',
'tools/linter/adapters/pip_init.py',
'--dry-run={{DRYRUN}}',
'ruff==0.0.277',
'ruff==0.0.280',
]
is_formatter = true
3 changes: 2 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ ignore = [
"B019", "B020",
"B023", "B024", "B026",
"B028", # No explicit `stacklevel` keyword argument found
"B904", "B905",
"B904",
"E402",
"C408", # C408 ignored because we like the dict keyword argument syntax
"E501", # E501 is not flexible enough, we're using B950 instead
Expand Down Expand Up @@ -70,6 +70,7 @@ select = [
# Not included in flake8
"UP",
"PERF",
"PGH004",
"PLE",
"TRY302",
]
Expand Down
2 changes: 1 addition & 1 deletion test/cpp_api_parity/functional_impl_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ def build_cpp_tests(unit_test_class, print_cpp_source=False):
assert len(unit_test_class.functional_test_params_map) > 0
cpp_sources = TORCH_NN_COMMON_TEST_HARNESS + SAMPLE_FUNCTIONAL_CPP_SOURCE
functions = []
for test_name, test_params in unit_test_class.functional_test_params_map.items():
for test_params in unit_test_class.functional_test_params_map.values():
cpp_sources += generate_test_cpp_sources(test_params=test_params, template=TORCH_NN_FUNCTIONAL_TEST_FORWARD)
functions.append(f'{test_params.functional_variant_name}_test_forward')
if print_cpp_source:
Expand Down
2 changes: 1 addition & 1 deletion test/cpp_api_parity/module_impl_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -292,7 +292,7 @@ def build_cpp_tests(unit_test_class, print_cpp_source=False):
assert len(unit_test_class.module_test_params_map) > 0
cpp_sources = TORCH_NN_COMMON_TEST_HARNESS + SAMPLE_MODULE_CPP_SOURCE
functions = []
for test_name, test_params in unit_test_class.module_test_params_map.items():
for test_params in unit_test_class.module_test_params_map.values():
cpp_sources += generate_test_cpp_sources(
test_params=test_params, template=TORCH_NN_MODULE_TEST_FORWARD_BACKWARD)
functions.append(f'{test_params.module_variant_name}_test_forward_backward')
Expand Down
4 changes: 2 additions & 2 deletions test/distributed/fsdp/test_fsdp_optim_state.py
Original file line number Diff line number Diff line change
Expand Up @@ -1870,14 +1870,14 @@ def step():

step()
original_osd = deepcopy(optim.state_dict())
for param_id, state in original_osd["state"].items():
for state in original_osd["state"].values():
# Add customized value
state["value1"] = 2.74
state["value2"] = None

osd = FSDP.optim_state_dict(model, optim, optim_state_dict=original_osd)
osd_to_load = FSDP.optim_state_dict_to_load(model, optim, osd)
for param_id, state in osd_to_load["state"].items():
for state in osd_to_load["state"].values():
self.assertEqual(state["value1"], 2.74)
self.assertEqual(state["value2"], None)

Expand Down
4 changes: 2 additions & 2 deletions test/distributions/test_distributions.py
Original file line number Diff line number Diff line change
Expand Up @@ -1898,8 +1898,8 @@ def rvs(self, n_sample):
self._check_sampler_sampler(
MixtureSameFamily(Categorical(probs=probs), Normal(loc, scale)),
ScipyMixtureNormal(probs.numpy(), loc.numpy(), scale.numpy()),
'''MixtureSameFamily(Categorical(probs={}),
Normal(loc={}, scale={}))'''.format(probs, loc, scale))
f'''MixtureSameFamily(Categorical(probs={probs}),
Normal(loc={loc}, scale={scale}))''')

def test_normal(self):
loc = torch.randn(5, 5, requires_grad=True)
Expand Down
2 changes: 1 addition & 1 deletion test/dynamo/test_modules.py
Original file line number Diff line number Diff line change
Expand Up @@ -504,7 +504,7 @@ def __init__(

def forward(self, init_features):
features = [init_features]
for name, layer in self.items():
for layer in self.values():
new_features = layer(features)
features.append(new_features)
return torch.cat(features, 1)
Expand Down
4 changes: 2 additions & 2 deletions test/functorch/discover_coverage.py
Original file line number Diff line number Diff line change
Expand Up @@ -321,7 +321,7 @@ def get_all_tested_ops():
overridable_outplace_we_care_about = get_public_overridable_outplace_we_care_about()
op_to_opinfo = get_ops_covered_by_opinfos()
result = set({})
for name, op in get_covered_ops(overridable_outplace_we_care_about).items():
for op in get_covered_ops(overridable_outplace_we_care_about).values():
opinfos = op_to_opinfo[op]
for opinfo in opinfos:
result.add(opinfo.name)
Expand All @@ -332,7 +332,7 @@ def get_skipped_or_xfailed_ops_for(test_name):
overridable_outplace_we_care_about = get_public_overridable_outplace_we_care_about()
op_to_opinfo = get_ops_covered_by_opinfos()
result = set({})
for name, op in get_covered_ops(overridable_outplace_we_care_about).items():
for op in get_covered_ops(overridable_outplace_we_care_about).values():
opinfos = op_to_opinfo[op]
for opinfo in opinfos:
for decorator in opinfo.decorators:
Expand Down
2 changes: 1 addition & 1 deletion test/inductor/test_kernel_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def setUp(self):

def get_compiled_module(self):
compiled_module = None
for k, v in PyCodeCache.cache.items():
for v in PyCodeCache.cache.values():
if hasattr(v, "benchmark_compiled_module"):
self.assertTrue(
compiled_module is None, "Found multiple compiled modules"
Expand Down
2 changes: 1 addition & 1 deletion test/inductor/test_triton_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
class TestTritonWrapper(TestCase):
def get_compiled_module(self):
compiled_module = None
for k, v in PyCodeCache.cache.items():
for v in PyCodeCache.cache.values():
if hasattr(v, "benchmark_compiled_module"):
self.assertTrue(
compiled_module is None, "Found multiple compiled modules"
Expand Down
2 changes: 1 addition & 1 deletion test/jit/fixtures_srcs/test_upgrader_models_generation.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

class TestUpgraderModelGeneration(TestCase):
def test_all_modules(self):
for a_module, expect_operator in ALL_MODULES.items():
for a_module in ALL_MODULES.keys():
module_name = type(a_module).__name__
self.assertTrue(
isinstance(a_module, torch.nn.Module),
Expand Down
2 changes: 1 addition & 1 deletion test/jit/test_list_dict.py
Original file line number Diff line number Diff line change
Expand Up @@ -1964,7 +1964,7 @@ def __init__(self, configs):
self.configs = configs

def forward(self, x):
for _id, config in self.configs.items():
for config in self.configs.values():
x += config.size
return x

Expand Down
2 changes: 1 addition & 1 deletion test/jit/test_tracer.py
Original file line number Diff line number Diff line change
Expand Up @@ -2383,7 +2383,7 @@ def __init__(self):

def forward(self, feature_map: Dict[str, List[Tensor]]) -> Tensor:
output = []
for i, j in feature_map.items():
for j in feature_map.values():
output.append(self.linear(j[0]))

return torch.stack(output)
Expand Down
30 changes: 15 additions & 15 deletions test/quantization/eager/test_numeric_suite_eager.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ def compare_and_validate_results(float_model, q_model):
float_model.state_dict(), q_model.state_dict()
)
self.assertEqual(len(weight_dict), 1)
for k, v in weight_dict.items():
for v in weight_dict.values():
self.assertTrue(v["float"].shape == v["quantized"].shape)

model_list = [AnnotatedConvModel(qengine), AnnotatedConvBnReLUModel(qengine)]
Expand All @@ -126,7 +126,7 @@ def compare_and_validate_results(float_model, q_model):
float_model.state_dict(), q_model.state_dict()
)
self.assertEqual(len(weight_dict), 1)
for k, v in weight_dict.items():
for v in weight_dict.values():
self.assertTrue(v["float"].shape == v["quantized"].shape)

model_list = [AnnotatedSingleLayerLinearModel(qengine)]
Expand All @@ -148,7 +148,7 @@ def compare_and_validate_results(float_model, q_model):
float_model.state_dict(), q_model.state_dict()
)
self.assertEqual(len(weight_dict), 1)
for k, v in weight_dict.items():
for v in weight_dict.values():
self.assertTrue(len(v["float"]) == len(v["quantized"]))
for i, val in enumerate(v["quantized"]):
self.assertTrue(v["float"][i].shape == v["quantized"][i].shape)
Expand All @@ -172,7 +172,7 @@ def compare_and_validate_results(float_model, q_model):
float_model.state_dict(), q_model.state_dict()
)
self.assertEqual(len(weight_dict), 1)
for k, v in weight_dict.items():
for v in weight_dict.values():
self.assertTrue(len(v["float"]) == len(v["quantized"]))
for i, val in enumerate(v["quantized"]):
self.assertTrue(v["float"][i].shape == v["quantized"][i].shape)
Expand All @@ -194,7 +194,7 @@ def test_compare_model_stub_conv_static(self):
def compare_and_validate_results(float_model, q_model, module_swap_list, data):
ob_dict = compare_model_stub(float_model, q_model, module_swap_list, data)
self.assertEqual(len(ob_dict), 1)
for k, v in ob_dict.items():
for v in ob_dict.values():
self.assertTrue(len(v["float"]) == len(v["quantized"]))
for i, val in enumerate(v["quantized"]):
self.assertTrue(v["float"][i].shape == v["quantized"][i].shape)
Expand All @@ -221,7 +221,7 @@ def test_compare_model_stub_linear_static(self):
def compare_and_validate_results(float_model, q_model, module_swap_list, data):
ob_dict = compare_model_stub(float_model, q_model, module_swap_list, data)
self.assertEqual(len(ob_dict), 1)
for k, v in ob_dict.items():
for v in ob_dict.values():
self.assertTrue(len(v["float"]) == len(v["quantized"]))
for i, val in enumerate(v["quantized"]):
self.assertTrue(v["float"][i].shape == v["quantized"][i].shape)
Expand All @@ -246,7 +246,7 @@ def test_compare_model_stub_partial(self):
def compare_and_validate_results(float_model, q_model, module_swap_list, data):
ob_dict = compare_model_stub(float_model, q_model, module_swap_list, data)
self.assertEqual(len(ob_dict), 1)
for k, v in ob_dict.items():
for v in ob_dict.values():
self.assertTrue(len(v["float"]) == len(v["quantized"]))
for i, val in enumerate(v["quantized"]):
self.assertTrue(v["float"][i].shape == v["quantized"][i].shape)
Expand Down Expand Up @@ -301,7 +301,7 @@ def test_compare_model_stub_functional_static(self):
self.assertTrue(isinstance(q_model.myadd_relu, Shadow))
self.assertTrue(isinstance(q_model.my_scalar_add, Shadow))
self.assertTrue(isinstance(q_model.my_scalar_mul, Shadow))
for k, v in ob_dict.items():
for v in ob_dict.values():
self.assertTrue(len(v["float"]) == len(v["quantized"]))
for i, val in enumerate(v["quantized"]):
self.assertTrue(v["float"][i].shape == v["quantized"][i].shape)
Expand All @@ -315,7 +315,7 @@ def test_compare_model_stub_linear_dynamic(self):
def compare_and_validate_results(float_model, q_model, module_swap_list, data):
ob_dict = compare_model_stub(float_model, q_model, module_swap_list, data)
self.assertEqual(len(ob_dict), 1)
for k, v in ob_dict.items():
for v in ob_dict.values():
self.assertTrue(len(v["float"]) == len(v["quantized"]))
for i, val in enumerate(v["quantized"]):
self.assertTrue(v["float"][i].shape == v["quantized"][i].shape)
Expand Down Expand Up @@ -344,7 +344,7 @@ def compare_and_validate_results(
float_model, q_model, module_swap_list, input, hidden
)
self.assertEqual(len(ob_dict), 1)
for k, v in ob_dict.items():
for v in ob_dict.values():
self.assertTrue(len(v["float"]) == len(v["quantized"]))
for i, val in enumerate(v["quantized"]):
self.assertTrue(v["float"][i].shape == v["quantized"][i].shape)
Expand Down Expand Up @@ -375,7 +375,7 @@ def compare_and_validate_results(float_model, q_model, data):
expected_act_compare_dict_keys = {"conv.stats", "quant.stats"}

self.assertTrue(act_compare_dict.keys() == expected_act_compare_dict_keys)
for k, v in act_compare_dict.items():
for v in act_compare_dict.values():
self.assertTrue(v["float"][0].shape == v["quantized"][0].shape)

model_list = [AnnotatedConvModel(qengine), AnnotatedConvBnReLUModel(qengine)]
Expand All @@ -398,7 +398,7 @@ def compare_and_validate_results(float_model, q_model, data):
expected_act_compare_dict_keys = {"fc1.quant.stats", "fc1.module.stats"}

self.assertTrue(act_compare_dict.keys() == expected_act_compare_dict_keys)
for k, v in act_compare_dict.items():
for v in act_compare_dict.values():
self.assertTrue(len(v["float"]) == len(v["quantized"]))
for i, val in enumerate(v["quantized"]):
self.assertTrue(v["float"][i].shape == v["quantized"][i].shape)
Expand Down Expand Up @@ -434,7 +434,7 @@ def test_compare_model_outputs_functional_static(self):
"quant.stats",
}
self.assertTrue(act_compare_dict.keys() == expected_act_compare_dict_keys)
for k, v in act_compare_dict.items():
for v in act_compare_dict.values():
self.assertTrue(len(v["float"]) == len(v["quantized"]))
for i, val in enumerate(v["quantized"]):
self.assertTrue(v["float"][i].shape == v["quantized"][i].shape)
Expand All @@ -451,7 +451,7 @@ def compare_and_validate_results(float_model, q_model, data):
expected_act_compare_dict_keys = {"fc1.stats"}

self.assertTrue(act_compare_dict.keys() == expected_act_compare_dict_keys)
for k, v in act_compare_dict.items():
for v in act_compare_dict.values():
self.assertTrue(len(v["float"]) == len(v["quantized"]))
for i, val in enumerate(v["quantized"]):
self.assertTrue(v["float"][i].shape == v["quantized"][i].shape)
Expand Down Expand Up @@ -480,7 +480,7 @@ def compare_and_validate_results(float_model, q_model, input, hidden):
expected_act_compare_dict_keys = {"lstm.stats"}

self.assertTrue(act_compare_dict.keys() == expected_act_compare_dict_keys)
for k, v in act_compare_dict.items():
for v in act_compare_dict.values():
self.assertTrue(len(v["float"]) == len(v["quantized"]))
for i, val in enumerate(v["quantized"]):
self.assertTrue(len(v["float"][i]) == len(v["quantized"][i]))
Expand Down
4 changes: 2 additions & 2 deletions test/quantization/fx/test_numeric_suite_fx.py
Original file line number Diff line number Diff line change
Expand Up @@ -637,7 +637,7 @@ def test_op_relationship_mapping(self):
# 4. go through the ops mapped to each QuantizeHandler type, and verify
# correctness.
def _op_in_base_sets_of_related_ops(op):
for name, ops in base_name_to_sets_of_related_ops.items():
for ops in base_name_to_sets_of_related_ops.values():
if op in ops:
return True
return False
Expand Down Expand Up @@ -1829,7 +1829,7 @@ def test_extend_logger_results_with_comparison(self):
results, 'fp32', 'int8', compute_cosine_similarity,
'cosine_similarity_int8_vs_fp32')

for layer_name, layer_results in results.items():
for layer_results in results.values():
assert 'sqnr_int8_vs_fp32' in \
layer_results['weight']['int8'][0].keys()
assert 'l2_error_int8_vs_fp32' in \
Expand Down
8 changes: 4 additions & 4 deletions test/run_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -846,7 +846,7 @@ def run_doctests(test_module, test_directory, options):
if enabled["qengine"] == "auto":
try:
# Is there a better check if quantization is enabled?
import torch.ao.nn.quantized as nnq # NOQA
import torch.ao.nn.quantized as nnq # NOQA: F401

torch.backends.quantized.engine = "qnnpack"
torch.backends.quantized.engine = "fbgemm"
Expand All @@ -857,9 +857,9 @@ def run_doctests(test_module, test_directory, options):

if enabled["onnx"] == "auto":
try:
import onnx # NOQA
import onnxruntime # NOQA
import onnxscript # NOQA
import onnx # NOQA: F401
import onnxruntime # NOQA: F401
import onnxscript # NOQA: F401
except ImportError:
exclude_module_list.append("torch.onnx.*")
enabled["onnx"] = False
Expand Down
6 changes: 3 additions & 3 deletions test/test_dispatch.py
Original file line number Diff line number Diff line change
Expand Up @@ -782,11 +782,11 @@ def test_find_dangling_impls_ext(self):
impls = C._dispatch_find_dangling_impls()
self.assertEqual(1, len(impls))
self.assertEqual(
'''\
f'''\
name: __test::foo
schema: (none)
CPU: registered at {}:5 :: () -> () [ boxed unboxed ]
'''.format(extension_path),
CPU: registered at {extension_path}:5 :: () -> () [ boxed unboxed ]
''',
impls[0])

def test_dispatch_print_registrations_for_dispatch_key_invalid(self):
Expand Down
4 changes: 2 additions & 2 deletions test/test_fx.py
Original file line number Diff line number Diff line change
Expand Up @@ -3490,7 +3490,7 @@ def f_sum(x):

def f_sum_dict(x):
out = 0
for k, v in x.items():
for v in x.values():
out += v
return out

Expand Down Expand Up @@ -4302,7 +4302,7 @@ def _get_functional(cls):
try:
sig = inspect.signature(fn)
has_tensor_arg = False
for arg, param in sig.parameters.items():
for param in sig.parameters.values():
if isinstance(param.annotation, type) and issubclass(param.annotation, torch.Tensor):
has_tensor_arg = True
if not has_tensor_arg:
Expand Down
Loading

0 comments on commit 6d43c89

Please sign in to comment.